summaryrefslogtreecommitdiff
path: root/include/linux/cgroup-defs.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-04-27 00:29:04 +0300
committerTejun Heo <tj@kernel.org>2018-04-27 00:29:04 +0300
commitd4ff749b5e0f1e2d4d69a3e4ea81cdeaeb4904d2 (patch)
tree99a922407a9aa00dbd7441e26969e18120a027e5 /include/linux/cgroup-defs.h
parentc58632b3631cb222da41d9dc0dd39e106c1eafd0 (diff)
downloadlinux-d4ff749b5e0f1e2d4d69a3e4ea81cdeaeb4904d2.tar.xz
cgroup: Distinguish base resource stat implementation from rstat
Base resource stat accounts universial (not specific to any controller) resource consumptions on top of rstat. Currently, its implementation is intermixed with rstat implementation making the code confusing to follow. This patch clarifies the distintion by doing the followings. * Encapsulate base resource stat counters, currently only cputime, in struct cgroup_base_stat. * Move prev_cputime into struct cgroup and initialize it with cgroup. * Rename the related functions so that they start with cgroup_base_stat. * Prefix the related variables and field names with b. This patch doesn't make any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'include/linux/cgroup-defs.h')
-rw-r--r--include/linux/cgroup-defs.h29
1 files changed, 16 insertions, 13 deletions
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 04cb42419310..60d62fe97dc3 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -258,6 +258,10 @@ struct css_set {
struct rcu_head rcu_head;
};
+struct cgroup_base_stat {
+ struct task_cputime cputime;
+};
+
/*
* rstat - cgroup scalable recursive statistics. Accounting is done
* per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
@@ -273,20 +277,24 @@ struct css_set {
* aren't active and stat may be read frequently. The combination can
* become very expensive. By propagating selectively, increasing reading
* frequency decreases the cost of each read.
+ *
+ * This struct hosts both the fields which implement the above -
+ * updated_children and updated_next - and the fields which track basic
+ * resource statistics on top of it - bsync, bstat and last_bstat.
*/
struct cgroup_rstat_cpu {
/*
- * ->sync protects all the current counters. These are the only
- * fields which get updated in the hot path.
+ * ->bsync protects ->bstat. These are the only fields which get
+ * updated in the hot path.
*/
- struct u64_stats_sync sync;
- struct task_cputime cputime;
+ struct u64_stats_sync bsync;
+ struct cgroup_base_stat bstat;
/*
* Snapshots at the last reading. These are used to calculate the
* deltas to propagate to the global counters.
*/
- struct task_cputime last_cputime;
+ struct cgroup_base_stat last_bstat;
/*
* Child cgroups with stat updates on this cpu since the last read
@@ -303,12 +311,6 @@ struct cgroup_rstat_cpu {
struct cgroup *updated_next; /* NULL iff not on the list */
};
-struct cgroup_stat {
- /* per-cpu statistics are collected into the folowing global counters */
- struct task_cputime cputime;
- struct prev_cputime prev_cputime;
-};
-
struct cgroup {
/* self css with NULL ->ss, points back to this cgroup */
struct cgroup_subsys_state self;
@@ -412,8 +414,9 @@ struct cgroup {
struct cgroup_rstat_cpu __percpu *rstat_cpu;
/* cgroup basic resource statistics */
- struct cgroup_stat pending_stat; /* pending from children */
- struct cgroup_stat stat;
+ struct cgroup_base_stat pending_bstat; /* pending from children */
+ struct cgroup_base_stat bstat;
+ struct prev_cputime prev_cputime; /* for printing out cputime */
/*
* list of pidlists, up to two for each namespace (one for procs, one