summaryrefslogtreecommitdiff
path: root/include/linux/vmstat.h
diff options
context:
space:
mode:
authorKemi Wang <kemi.wang@intel.com>2017-09-09 02:12:55 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-09 04:26:47 +0300
commit638032224ed762a29baca1fc37f1168efc2554ae (patch)
tree41ec4e67d4698b0c3208afd182297882f49ecc1f /include/linux/vmstat.h
parent1d90ca897cb05cf38bd62f36756d219e02913b7d (diff)
downloadlinux-638032224ed762a29baca1fc37f1168efc2554ae.tar.xz
mm: consider the number in local CPUs when reading NUMA stats
To avoid deviation, the per cpu number of NUMA stats in vm_numa_stat_diff[] is included when a user *reads* the NUMA stats. Since NUMA stats does not be read by users frequently, and kernel does not need it to make a decision, it will not be a problem to make the readers more expensive. Link: http://lkml.kernel.org/r/1503568801-21305-4-git-send-email-kemi.wang@intel.com Signed-off-by: Kemi Wang <kemi.wang@intel.com> Reported-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Aaron Lu <aaron.lu@intel.com> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Christopher Lameter <cl@linux.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Tim Chen <tim.c.chen@intel.com> Cc: Ying Huang <ying.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/vmstat.h')
-rw-r--r--include/linux/vmstat.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9ac82e29948f..ade7cb5f1359 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -125,10 +125,14 @@ static inline unsigned long global_numa_state(enum numa_stat_item item)
return x;
}
-static inline unsigned long zone_numa_state(struct zone *zone,
+static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
enum numa_stat_item item)
{
long x = atomic_long_read(&zone->vm_numa_stat[item]);
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
return x;
}