summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c54
1 files changed, 44 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 78fd7cfb4f92..1f204a262054 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -671,6 +671,8 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
/* Subset of vm_event_item to report for memcg event stats */
static const unsigned int memcg_vm_event_stat[] = {
+ PGPGIN,
+ PGPGOUT,
PGSCAN_KSWAPD,
PGSCAN_DIRECT,
PGSTEAL_KSWAPD,
@@ -692,14 +694,30 @@ static const unsigned int memcg_vm_event_stat[] = {
#endif
};
+#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
+static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
+
+static void init_memcg_events(void)
+{
+ int i;
+
+ for (i = 0; i < NR_MEMCG_EVENTS; ++i)
+ mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
+}
+
+static inline int memcg_events_index(enum vm_event_item idx)
+{
+ return mem_cgroup_events_index[idx] - 1;
+}
+
struct memcg_vmstats_percpu {
/* Local (CPU and cgroup) page state & events */
long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
+ unsigned long events[NR_MEMCG_EVENTS];
/* Delta calculation for lockless upward propagation */
long state_prev[MEMCG_NR_STAT];
- unsigned long events_prev[NR_VM_EVENT_ITEMS];
+ unsigned long events_prev[NR_MEMCG_EVENTS];
/* Cgroup1: threshold notifications & softlimit tree updates */
unsigned long nr_page_events;
@@ -709,11 +727,11 @@ struct memcg_vmstats_percpu {
struct memcg_vmstats {
/* Aggregated (CPU and subtree) page state & events */
long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
+ unsigned long events[NR_MEMCG_EVENTS];
/* Pending child counts during tree propagation */
long state_pending[MEMCG_NR_STAT];
- unsigned long events_pending[NR_VM_EVENT_ITEMS];
+ unsigned long events_pending[NR_MEMCG_EVENTS];
};
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
@@ -873,27 +891,37 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count)
{
- if (mem_cgroup_disabled())
+ int index = memcg_events_index(idx);
+
+ if (mem_cgroup_disabled() || index < 0)
return;
memcg_stats_lock();
- __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
+ __this_cpu_add(memcg->vmstats_percpu->events[index], count);
memcg_rstat_updated(memcg, count);
memcg_stats_unlock();
}
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
{
- return READ_ONCE(memcg->vmstats->events[event]);
+ int index = memcg_events_index(event);
+
+ if (index < 0)
+ return 0;
+ return READ_ONCE(memcg->vmstats->events[index]);
}
static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
long x = 0;
int cpu;
+ int index = memcg_events_index(event);
+
+ if (index < 0)
+ return 0;
for_each_possible_cpu(cpu)
- x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
+ x += per_cpu(memcg->vmstats_percpu->events[index], cpu);
return x;
}
@@ -1564,10 +1592,15 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
memcg_events(memcg, PGSTEAL_KSWAPD) +
memcg_events(memcg, PGSTEAL_DIRECT));
- for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++)
+ for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
+ if (memcg_vm_event_stat[i] == PGPGIN ||
+ memcg_vm_event_stat[i] == PGPGOUT)
+ continue;
+
seq_buf_printf(&s, "%s %lu\n",
vm_event_name(memcg_vm_event_stat[i]),
memcg_events(memcg, memcg_vm_event_stat[i]));
+ }
/* The above should easily fit into one page */
WARN_ON_ONCE(seq_buf_has_overflowed(&s));
@@ -5309,6 +5342,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
page_counter_init(&memcg->kmem, &parent->kmem);
page_counter_init(&memcg->tcpmem, &parent->tcpmem);
} else {
+ init_memcg_events();
page_counter_init(&memcg->memory, NULL);
page_counter_init(&memcg->swap, NULL);
page_counter_init(&memcg->kmem, NULL);
@@ -5477,7 +5511,7 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
parent->vmstats->state_pending[i] += delta;
}
- for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
+ for (i = 0; i < NR_MEMCG_EVENTS; i++) {
delta = memcg->vmstats->events_pending[i];
if (delta)
memcg->vmstats->events_pending[i] = 0;