summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 774bd6e21e27..73692cd8c142 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2936,7 +2936,8 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
* Moreover, it should not come from DMA buffer and is not readily
* reclaimable. So those GFP bits should be masked off.
*/
-#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
+#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
+ __GFP_ACCOUNT | __GFP_NOFAIL)
/*
* mod_objcg_mlstate() may be called with irq enabled, so
@@ -3165,6 +3166,7 @@ __always_inline struct obj_cgroup *current_obj_cgroup(void)
return NULL;
from_memcg:
+ objcg = NULL;
for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
/*
* Memcg pointer is protected by scope (see set_active_memcg())
@@ -3175,7 +3177,6 @@ from_memcg:
objcg = rcu_dereference_check(memcg->objcg, 1);
if (likely(objcg))
break;
- objcg = NULL;
}
return objcg;
@@ -4378,7 +4379,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
* only one element of the array here.
*/
for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
- eventfd_signal(t->entries[i].eventfd, 1);
+ eventfd_signal(t->entries[i].eventfd);
/* i = current_threshold + 1 */
i++;
@@ -4390,7 +4391,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
* only one element of the array here.
*/
for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
- eventfd_signal(t->entries[i].eventfd, 1);
+ eventfd_signal(t->entries[i].eventfd);
/* Update current_threshold */
t->current_threshold = i - 1;
@@ -4430,7 +4431,7 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
spin_lock(&memcg_oom_lock);
list_for_each_entry(ev, &memcg->oom_notify, list)
- eventfd_signal(ev->eventfd, 1);
+ eventfd_signal(ev->eventfd);
spin_unlock(&memcg_oom_lock);
return 0;
@@ -4649,7 +4650,7 @@ static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
/* already in OOM ? */
if (memcg->under_oom)
- eventfd_signal(eventfd, 1);
+ eventfd_signal(eventfd);
spin_unlock(&memcg_oom_lock);
return 0;
@@ -4941,7 +4942,7 @@ static void memcg_event_remove(struct work_struct *work)
event->unregister_event(memcg, event->eventfd);
/* Notify userspace the event is going away. */
- eventfd_signal(event->eventfd, 1);
+ eventfd_signal(event->eventfd);
eventfd_ctx_put(event->eventfd);
kfree(event);
@@ -7542,6 +7543,17 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new)
/* Transfer the charge and the css ref */
commit_charge(new, memcg);
+ /*
+ * If the old folio is a large folio and is in the split queue, it needs
+ * to be removed from the split queue now, in case getting an incorrect
+ * split queue in destroy_large_folio() after the memcg of the old folio
+ * is cleared.
+ *
+ * In addition, the old folio is about to be freed after migration, so
+ * removing from the split queue a bit earlier seems reasonable.
+ */
+ if (folio_test_large(old) && folio_test_large_rmappable(old))
+ folio_undo_large_rmappable(old);
old->memcg_data = 0;
}