summaryrefslogtreecommitdiff
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c61
1 files changed, 32 insertions, 29 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index a0aeb3fe46a7..a6a7743ee98f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -960,7 +960,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
int force, enum migrate_mode mode)
{
int rc = -EAGAIN;
- int page_was_mapped = 0;
+ bool page_was_mapped = false;
struct anon_vma *anon_vma = NULL;
bool is_lru = !__PageMovable(page);
@@ -1008,7 +1008,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
/*
- * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
+ * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
* we cannot notice that anon_vma is freed while we migrates a page.
* This get_anon_vma() delays freeing anon_vma pointer until the end
* of migration. File cache pages are no problem because of page_lock()
@@ -1063,7 +1063,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_migrate(page, 0);
- page_was_mapped = 1;
+ page_was_mapped = true;
}
if (!page_mapped(page))
@@ -1900,6 +1900,23 @@ set_status:
mmap_read_unlock(mm);
}
+static int get_compat_pages_array(const void __user *chunk_pages[],
+ const void __user * __user *pages,
+ unsigned long chunk_nr)
+{
+ compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
+ compat_uptr_t p;
+ int i;
+
+ for (i = 0; i < chunk_nr; i++) {
+ if (get_user(p, pages32 + i))
+ return -EFAULT;
+ chunk_pages[i] = compat_ptr(p);
+ }
+
+ return 0;
+}
+
/*
* Determine the nodes of a user array of pages and store it in
* a user array of status.
@@ -1919,8 +1936,15 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
chunk_nr = DO_PAGES_STAT_CHUNK_NR;
- if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
- break;
+ if (in_compat_syscall()) {
+ if (get_compat_pages_array(chunk_pages, pages,
+ chunk_nr))
+ break;
+ } else {
+ if (copy_from_user(chunk_pages, pages,
+ chunk_nr * sizeof(*chunk_pages)))
+ break;
+ }
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
@@ -2023,28 +2047,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
}
-#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
- compat_uptr_t __user *, pages32,
- const int __user *, nodes,
- int __user *, status,
- int, flags)
-{
- const void __user * __user *pages;
- int i;
-
- pages = compat_alloc_user_space(nr_pages * sizeof(void *));
- for (i = 0; i < nr_pages; i++) {
- compat_uptr_t p;
-
- if (get_user(p, pages32 + i) ||
- put_user(compat_ptr(p), pages + i))
- return -EFAULT;
- }
- return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
-}
-#endif /* CONFIG_COMPAT */
-
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns true if this is a safe migration target node for misplaced NUMA
@@ -2107,6 +2109,7 @@ out:
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
+ int nr_pages = thp_nr_pages(page);
VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
@@ -2115,7 +2118,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 0;
/* Avoid migrating to a node that is nearly full */
- if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
+ if (!migrate_balanced_pgdat(pgdat, nr_pages))
return 0;
if (isolate_lru_page(page))
@@ -2123,7 +2126,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
page_lru = page_is_file_lru(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
- thp_nr_pages(page));
+ nr_pages);
/*
* Isolating the page has taken another reference, so the