summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWanpeng Li <liwp@linux.vnet.ibm.com>2012-08-01 03:43:26 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-01 05:42:44 +0400
commitda92c47d069890106484cb6605df701a54d24499 (patch)
tree374915962f0e8071e27156f2dad39bc408572ff5
parentaaad153e3408a4b8784de4c8446a40e70d57481f (diff)
downloadlinux-da92c47d069890106484cb6605df701a54d24499.tar.xz
mm/memcg: replace inexistence move_lock_page_cgroup() by move_lock_mem_cgroup() in comment
Signed-off-by: Wanpeng Li <liwp.linux@gmail.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memcontrol.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ac87e79b00d1..4f73c823c59f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1898,7 +1898,7 @@ again:
return;
/*
* If this memory cgroup is not under account moving, we don't
- * need to take move_lock_page_cgroup(). Because we already hold
+ * need to take move_lock_mem_cgroup(). Because we already hold
* rcu_read_lock(), any calls to move_account will be delayed until
* rcu_read_unlock() if mem_cgroup_stolen() == true.
*/
@@ -1920,7 +1920,7 @@ void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
/*
* It's guaranteed that pc->mem_cgroup never changes while
* lock is held because a routine modifies pc->mem_cgroup
- * should take move_lock_page_cgroup().
+ * should take move_lock_mem_cgroup().
*/
move_unlock_mem_cgroup(pc->mem_cgroup, flags);
}