summaryrefslogtreecommitdiff
path: root/include/linux/cpumask.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-03-25 22:57:34 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-03-25 22:57:34 +0300
commitf768b35a2371ccf85255f608444d234062a1b5c9 (patch)
treea22500f2422baceb3aa07ec46ee72f221c7f49a1 /include/linux/cpumask.h
parentd704426300f2e95275267a387ffc3b8e5f4ae8b7 (diff)
parente9b60c7f97130795c7aa81a649ae4b93a172a277 (diff)
downloadlinux-f768b35a2371ccf85255f608444d234062a1b5c9.tar.xz
Merge tag 'xfs-6.3-fixes-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull xfs percpu counter fixes from Darrick Wong: "We discovered a filesystem summary counter corruption problem that was traced to cpu hot-remove racing with the call to percpu_counter_sum that sets the free block count in the superblock when writing it to disk. The root cause is that percpu_counter_sum doesn't cull from dying cpus and hence misses those counter values if the cpu shutdown hooks have not yet run to merge the values. I'm hoping this is a fairly painless fix to the problem, since the dying cpu mask should generally be empty. It's been in for-next for a week without any complaints from the bots. - Fix a race in the percpu counters summation code where the summation failed to add in the values for any CPUs that were dying but not yet dead. This fixes some minor discrepancies and incorrect assertions when running generic/650" * tag 'xfs-6.3-fixes-4' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: pcpcntr: remove percpu_counter_sum_all() fork: remove use of percpu_counter_sum_all pcpcntrs: fix dying cpu summation race cpumask: introduce for_each_cpu_or
Diffstat (limited to 'include/linux/cpumask.h')
-rw-r--r--include/linux/cpumask.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d4901ca8883c..ca736b05ec7b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -351,6 +351,23 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
+ * for_each_cpu_or - iterate over every cpu present in either mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_or(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_or(cpu, mask1, mask2) \
+ for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
* cpumask_any_but - return a "random" in a cpumask, but not this one.
* @mask: the cpumask to search
* @cpu: the cpu to ignore.