summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/hw_tags.c14
-rw-r--r--mm/kasan/sw_tags.c2
-rw-r--r--mm/madvise.c15
-rw-r--r--mm/memblock.c9
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/migrate.c151
-rw-r--r--mm/oom_kill.c15
-rw-r--r--mm/page_owner.c6
-rw-r--r--mm/shmem.c24
9 files changed, 58 insertions, 182 deletions
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index dc892119e88f..7355cb534e4f 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
}
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
+static inline const char *kasan_mode_info(void)
+{
+ if (kasan_mode == KASAN_MODE_ASYNC)
+ return "async";
+ else if (kasan_mode == KASAN_MODE_ASYMM)
+ return "asymm";
+ else
+ return "sync";
+}
+
/* kasan_init_hw_tags_cpu() is called for each CPU. */
void kasan_init_hw_tags_cpu(void)
{
@@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void)
break;
}
- pr_info("KernelAddressSanitizer initialized\n");
+ pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n",
+ kasan_mode_info(),
+ kasan_stack_collection_enabled() ? "on" : "off");
}
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index bd3f540feb47..77f13f391b57 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -42,7 +42,7 @@ void __init kasan_init_sw_tags(void)
for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = (u32)get_cycles();
- pr_info("KernelAddressSanitizer initialized\n");
+ pr_info("KernelAddressSanitizer initialized (sw-tags)\n");
}
/*
diff --git a/mm/madvise.c b/mm/madvise.c
index 0734db8d53a7..8c927202bbe6 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1235,7 +1235,6 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
struct iovec iovstack[UIO_FASTIOV], iovec;
struct iovec *iov = iovstack;
struct iov_iter iter;
- struct pid *pid;
struct task_struct *task;
struct mm_struct *mm;
size_t total_len;
@@ -1250,18 +1249,12 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
if (ret < 0)
goto out;
- pid = pidfd_get_pid(pidfd, &f_flags);
- if (IS_ERR(pid)) {
- ret = PTR_ERR(pid);
+ task = pidfd_get_task(pidfd, &f_flags);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
goto free_iov;
}
- task = get_pid_task(pid, PIDTYPE_PID);
- if (!task) {
- ret = -ESRCH;
- goto put_pid;
- }
-
if (!process_madvise_behavior_valid(behavior)) {
ret = -EINVAL;
goto release_task;
@@ -1301,8 +1294,6 @@ release_mm:
mmput(mm);
release_task:
put_task_struct(task);
-put_pid:
- put_pid(pid);
free_iov:
kfree(iov);
out:
diff --git a/mm/memblock.c b/mm/memblock.c
index 659bf0ffb086..1018e50566f3 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -287,7 +287,7 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
{
/* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
- end == MEMBLOCK_ALLOC_KASAN)
+ end == MEMBLOCK_ALLOC_NOLEAKTRACE)
end = memblock.current_limit;
/* avoid allocating the first page */
@@ -1387,8 +1387,11 @@ again:
return 0;
done:
- /* Skip kmemleak for kasan_init() due to high volume. */
- if (end != MEMBLOCK_ALLOC_KASAN)
+ /*
+ * Skip kmemleak for those places like kasan_init() and
+ * early_pgtable_alloc() due to high volume.
+ */
+ if (end != MEMBLOCK_ALLOC_NOLEAKTRACE)
/*
* The min_count is set to 0 so that memblock allocated
* blocks are never reported as leaks. This is because many
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 508bcea7df56..781605e92015 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2058,13 +2058,11 @@ again:
memcg->move_lock_task = current;
memcg->move_lock_flags = flags;
}
-EXPORT_SYMBOL(folio_memcg_lock);
void lock_page_memcg(struct page *page)
{
folio_memcg_lock(page_folio(page));
}
-EXPORT_SYMBOL(lock_page_memcg);
static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{
@@ -2092,13 +2090,11 @@ void folio_memcg_unlock(struct folio *folio)
{
__folio_memcg_unlock(folio_memcg(folio));
}
-EXPORT_SYMBOL(folio_memcg_unlock);
void unlock_page_memcg(struct page *page)
{
folio_memcg_unlock(page_folio(page));
}
-EXPORT_SYMBOL(unlock_page_memcg);
struct obj_stock {
#ifdef CONFIG_MEMCG_KMEM
diff --git a/mm/migrate.c b/mm/migrate.c
index a11e948593df..cf25b00f03c8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -404,12 +404,6 @@ int folio_migrate_mapping(struct address_space *mapping,
newzone = folio_zone(newfolio);
xas_lock_irq(&xas);
- if (folio_ref_count(folio) != expected_count ||
- xas_load(&xas) != folio) {
- xas_unlock_irq(&xas);
- return -EAGAIN;
- }
-
if (!folio_ref_freeze(folio, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;
@@ -2368,7 +2362,6 @@ again:
* can't be dropped from it).
*/
get_page(page);
- migrate->cpages++;
/*
* Optimize for the common case where page is only mapped once
@@ -2378,7 +2371,7 @@ again:
if (trylock_page(page)) {
pte_t swp_pte;
- mpfn |= MIGRATE_PFN_LOCKED;
+ migrate->cpages++;
ptep_get_and_clear(mm, addr, ptep);
/* Setup special migration page table entry */
@@ -2412,6 +2405,9 @@ again:
if (pte_present(pte))
unmapped++;
+ } else {
+ put_page(page);
+ mpfn = 0;
}
next:
@@ -2516,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page)
}
/*
- * migrate_vma_prepare() - lock pages and isolate them from the lru
+ * migrate_vma_unmap() - replace page mapping with special migration pte entry
* @migrate: migrate struct containing all migration information
*
- * This locks pages that have been collected by migrate_vma_collect(). Once each
- * page is locked it is isolated from the lru (for non-device pages). Finally,
- * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
- * migrated by concurrent kernel threads.
+ * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
+ * special migration pte entry and check if it has been pinned. Pinned pages are
+ * restored because we cannot migrate them.
+ *
+ * This is the last step before we call the device driver callback to allocate
+ * destination memory and copy contents of original page over to new page.
*/
-static void migrate_vma_prepare(struct migrate_vma *migrate)
+static void migrate_vma_unmap(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
@@ -2533,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
lru_add_drain();
- for (i = 0; (i < npages) && migrate->cpages; i++) {
+ for (i = 0; i < npages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
- bool remap = true;
if (!page)
continue;
- if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
- /*
- * Because we are migrating several pages there can be
- * a deadlock between 2 concurrent migration where each
- * are waiting on each other page lock.
- *
- * Make migrate_vma() a best effort thing and backoff
- * for any page we can not lock right away.
- */
- if (!trylock_page(page)) {
- migrate->src[i] = 0;
- migrate->cpages--;
- put_page(page);
- continue;
- }
- remap = false;
- migrate->src[i] |= MIGRATE_PFN_LOCKED;
- }
-
/* ZONE_DEVICE pages are not on LRU */
if (!is_zone_device_page(page)) {
if (!PageLRU(page) && allow_drain) {
@@ -2568,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
}
if (isolate_lru_page(page)) {
- if (remap) {
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
- restore++;
- } else {
- migrate->src[i] = 0;
- unlock_page(page);
- migrate->cpages--;
- put_page(page);
- }
+ migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ migrate->cpages--;
+ restore++;
continue;
}
@@ -2585,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
put_page(page);
}
- if (!migrate_vma_check_page(page)) {
- if (remap) {
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
- restore++;
-
- if (!is_zone_device_page(page)) {
- get_page(page);
- putback_lru_page(page);
- }
- } else {
- migrate->src[i] = 0;
- unlock_page(page);
- migrate->cpages--;
+ if (page_mapped(page))
+ try_to_migrate(page, 0);
- if (!is_zone_device_page(page))
- putback_lru_page(page);
- else
- put_page(page);
+ if (page_mapped(page) || !migrate_vma_check_page(page)) {
+ if (!is_zone_device_page(page)) {
+ get_page(page);
+ putback_lru_page(page);
}
- }
- }
-
- for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
-
- if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
- continue;
-
- remove_migration_pte(page, migrate->vma, addr, page);
-
- migrate->src[i] = 0;
- unlock_page(page);
- put_page(page);
- restore--;
- }
-}
-
-/*
- * migrate_vma_unmap() - replace page mapping with special migration pte entry
- * @migrate: migrate struct containing all migration information
- *
- * Replace page mapping (CPU page table pte) with a special migration pte entry
- * and check again if it has been pinned. Pinned pages are restored because we
- * cannot migrate them.
- *
- * This is the last step before we call the device driver callback to allocate
- * destination memory and copy contents of original page over to new page.
- */
-static void migrate_vma_unmap(struct migrate_vma *migrate)
-{
- const unsigned long npages = migrate->npages;
- const unsigned long start = migrate->start;
- unsigned long addr, i, restore = 0;
-
- for (i = 0; i < npages; i++) {
- struct page *page = migrate_pfn_to_page(migrate->src[i]);
- if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
+ migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
+ migrate->cpages--;
+ restore++;
continue;
-
- if (page_mapped(page)) {
- try_to_migrate(page, 0);
- if (page_mapped(page))
- goto restore;
}
-
- if (migrate_vma_check_page(page))
- continue;
-
-restore:
- migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
- migrate->cpages--;
- restore++;
}
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
@@ -2671,12 +2582,8 @@ restore:
migrate->src[i] = 0;
unlock_page(page);
+ put_page(page);
restore--;
-
- if (is_zone_device_page(page))
- put_page(page);
- else
- putback_lru_page(page);
}
}
@@ -2699,8 +2606,8 @@ restore:
* it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
* flag set). Once these are allocated and copied, the caller must update each
* corresponding entry in the dst array with the pfn value of the destination
- * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
- * (destination pages must have their struct pages locked, via lock_page()).
+ * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
+ * lock_page().
*
* Note that the caller does not have to migrate all the pages that are marked
* with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
@@ -2770,8 +2677,6 @@ int migrate_vma_setup(struct migrate_vma *args)
migrate_vma_collect(args);
if (args->cpages)
- migrate_vma_prepare(args);
- if (args->cpages)
migrate_vma_unmap(args);
/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 195b3661da3d..1ddabefcfb5a 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -1150,21 +1150,14 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
struct task_struct *p;
unsigned int f_flags;
bool reap = false;
- struct pid *pid;
long ret = 0;
if (flags)
return -EINVAL;
- pid = pidfd_get_pid(pidfd, &f_flags);
- if (IS_ERR(pid))
- return PTR_ERR(pid);
-
- task = get_pid_task(pid, PIDTYPE_TGID);
- if (!task) {
- ret = -ESRCH;
- goto put_pid;
- }
+ task = pidfd_get_task(pidfd, &f_flags);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
/*
* Make sure to choose a thread which still has a reference to mm
@@ -1204,8 +1197,6 @@ drop_mm:
mmput(mm);
put_task:
put_task_struct(task);
-put_pid:
- put_pid(pid);
return ret;
#else
return -ENOSYS;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 79936db59859..4f924957ce7a 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -125,7 +125,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
return handle;
}
-void __reset_page_owner(struct page *page, unsigned int order)
+void __reset_page_owner(struct page *page, unsigned short order)
{
int i;
struct page_ext *page_ext;
@@ -149,7 +149,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
static inline void __set_page_owner_handle(struct page_ext *page_ext,
depot_stack_handle_t handle,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
struct page_owner *page_owner;
int i;
@@ -169,7 +169,7 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
}
}
-noinline void __set_page_owner(struct page *page, unsigned int order,
+noinline void __set_page_owner(struct page *page, unsigned short order,
gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 23c91a8beb78..f0eee4e221a7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2960,28 +2960,6 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
return shmem_unlink(dir, dentry);
}
-static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
-{
- bool old_is_dir = d_is_dir(old_dentry);
- bool new_is_dir = d_is_dir(new_dentry);
-
- if (old_dir != new_dir && old_is_dir != new_is_dir) {
- if (old_is_dir) {
- drop_nlink(old_dir);
- inc_nlink(new_dir);
- } else {
- drop_nlink(new_dir);
- inc_nlink(old_dir);
- }
- }
- old_dir->i_ctime = old_dir->i_mtime =
- new_dir->i_ctime = new_dir->i_mtime =
- d_inode(old_dentry)->i_ctime =
- d_inode(new_dentry)->i_ctime = current_time(old_dir);
-
- return 0;
-}
-
static int shmem_whiteout(struct user_namespace *mnt_userns,
struct inode *old_dir, struct dentry *old_dentry)
{
@@ -3027,7 +3005,7 @@ static int shmem_rename2(struct user_namespace *mnt_userns,
return -EINVAL;
if (flags & RENAME_EXCHANGE)
- return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
+ return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
if (!simple_empty(new_dentry))
return -ENOTEMPTY;