summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-01-20 15:40:04 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-01-20 15:40:04 +0300
commit2e4a4e36285fa9739cb45a26206c958037d6b773 (patch)
tree545b969a8dc82aab3fd5061e27b7e6d8abdb842f /drivers
parent95b2a034784658c4512db846918475d4954901d6 (diff)
parent198102c9103fc78d8478495971947af77edb05c1 (diff)
downloadlinux-2e4a4e36285fa9739cb45a26206c958037d6b773.tar.xz
Merge tag 'archtopo-cacheinfo-updates-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into driver-core-next
Sudeep writes: "cacheinfo and arch_topology updates for v6.3 The main change is to build the cache topology information for all the CPUs from the primary CPU. Currently the cacheinfo for secondary CPUs is created during the early boot on the respective CPU itself. Preemption and interrupts are disabled at this stage. On PREEMPT_RT kernels, allocating memory and even parsing the PPTT table for ACPI based systems triggers a: 'BUG: sleeping function called from invalid context' To prevent this bug, the cacheinfo is now allocated from the primary CPU when preemption and interrupts are enabled and before booting secondary CPUs. The cache levels/leaves are computed from DT/ACPI PPTT information only, without relying on any architecture specific mechanism if done so early. The other minor change included here is to handle shared caches at different levels when not all the CPUs on the system have the same cache hierarchy." * tag 'archtopo-cacheinfo-updates-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux: cacheinfo: Fix shared_cpu_map to handle shared caches at different levels arch_topology: Build cacheinfo from primary CPU ACPI: PPTT: Update acpi_find_last_cache_level() to acpi_get_cache_info() ACPI: PPTT: Remove acpi_find_cache_levels() cacheinfo: Check 'cache-unified' property to count cache leaves cacheinfo: Return error code in init_of_cache_level() cacheinfo: Use RISC-V's init_cache_level() as generic OF implementation
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pptt.c93
-rw-r--r--drivers/base/arch_topology.c12
-rw-r--r--drivers/base/cacheinfo.c161
3 files changed, 198 insertions, 68 deletions
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index c91342dcbcd6..10975bb603fb 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -81,6 +81,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
* acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache
* @table_hdr: Pointer to the head of the PPTT table
* @local_level: passed res reflects this cache level
+ * @split_levels: Number of split cache levels (data/instruction).
* @res: cache resource in the PPTT we want to walk
* @found: returns a pointer to the requested level if found
* @level: the requested cache level
@@ -100,6 +101,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
*/
static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
unsigned int local_level,
+ unsigned int *split_levels,
struct acpi_subtable_header *res,
struct acpi_pptt_cache **found,
unsigned int level, int type)
@@ -113,8 +115,17 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
while (cache) {
local_level++;
+ if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) {
+ cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache);
+ continue;
+ }
+
+ if (split_levels &&
+ (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) ||
+ acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR)))
+ *split_levels = local_level;
+
if (local_level == level &&
- cache->flags & ACPI_PPTT_CACHE_TYPE_VALID &&
acpi_pptt_match_type(cache->attributes, type)) {
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
@@ -135,8 +146,8 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
static struct acpi_pptt_cache *
acpi_find_cache_level(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu_node,
- unsigned int *starting_level, unsigned int level,
- int type)
+ unsigned int *starting_level, unsigned int *split_levels,
+ unsigned int level, int type)
{
struct acpi_subtable_header *res;
unsigned int number_of_levels = *starting_level;
@@ -149,7 +160,8 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
resource++;
local_level = acpi_pptt_walk_cache(table_hdr, *starting_level,
- res, &ret, level, type);
+ split_levels, res, &ret,
+ level, type);
/*
* we are looking for the max depth. Since its potentially
* possible for a given node to have resources with differing
@@ -165,29 +177,29 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
}
/**
- * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache
+ * levels and split cache levels (data/instruction).
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
+ * @levels: Number of levels if success.
+ * @split_levels: Number of split cache levels (data/instruction) if
+ * success. Can by NULL.
*
* Given a processor node containing a processing unit, walk into it and count
* how many levels exist solely for it, and then walk up each level until we hit
* the root node (ignore the package level because it may be possible to have
- * caches that exist across packages). Count the number of cache levels that
- * exist at each level on the way up.
- *
- * Return: Total number of levels found.
+ * caches that exist across packages). Count the number of cache levels and
+ * split cache levels (data/instruction) that exist at each level on the way
+ * up.
*/
-static int acpi_count_levels(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node)
+static void acpi_count_levels(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *levels, unsigned int *split_levels)
{
- int total_levels = 0;
-
do {
- acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0);
+ acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0);
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
} while (cpu_node);
-
- return total_levels;
}
/**
@@ -281,19 +293,6 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
return NULL;
}
-static int acpi_find_cache_levels(struct acpi_table_header *table_hdr,
- u32 acpi_cpu_id)
-{
- int number_of_levels = 0;
- struct acpi_pptt_processor *cpu;
-
- cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id);
- if (cpu)
- number_of_levels = acpi_count_levels(table_hdr, cpu);
-
- return number_of_levels;
-}
-
static u8 acpi_cache_type(enum cache_type type)
{
switch (type) {
@@ -334,7 +333,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
while (cpu_node && !found) {
found = acpi_find_cache_level(table_hdr, cpu_node,
- &total_levels, level, acpi_type);
+ &total_levels, NULL, level, acpi_type);
*node = cpu_node;
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
}
@@ -602,32 +601,48 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
}
/**
- * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
+ * acpi_get_cache_info() - Determine the number of cache levels and
+ * split cache levels (data/instruction) and for a PE.
* @cpu: Kernel logical CPU number
+ * @levels: Number of levels if success.
+ * @split_levels: Number of levels being split (i.e. data/instruction)
+ * if success. Can by NULL.
*
* Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels.
*
- * Return: Cache levels visible to this core.
+ * Return: -ENOENT if no PPTT table or no PPTT processor struct found.
+ * 0 on success.
*/
-int acpi_find_last_cache_level(unsigned int cpu)
+int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
+ unsigned int *split_levels)
{
- u32 acpi_cpu_id;
+ struct acpi_pptt_processor *cpu_node;
struct acpi_table_header *table;
- int number_of_levels = 0;
+ u32 acpi_cpu_id;
+
+ *levels = 0;
+ if (split_levels)
+ *split_levels = 0;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
- pr_debug("Cache Setup find last level CPU=%d\n", cpu);
+ pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
- number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id);
- pr_debug("Cache Setup find last level level=%d\n", number_of_levels);
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ return -ENOENT;
+
+ acpi_count_levels(table, cpu_node, levels, split_levels);
- return number_of_levels;
+ pr_debug("Cache Setup: last_level=%d split_levels=%d\n",
+ *levels, split_levels ? *split_levels : -1);
+
+ return 0;
}
/**
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index e7d6e6657ffa..b1c1dd38ab01 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -736,7 +736,7 @@ void update_siblings_masks(unsigned int cpuid)
ret = detect_cache_attributes(cpuid);
if (ret && ret != -ENOENT)
- pr_info("Early cacheinfo failed, ret = %d\n", ret);
+ pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
/* update core and thread sibling masks */
for_each_online_cpu(cpu) {
@@ -825,7 +825,7 @@ __weak int __init parse_acpi_topology(void)
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
- int ret;
+ int cpu, ret;
reset_cpu_topology();
ret = parse_acpi_topology();
@@ -840,6 +840,14 @@ void __init init_cpu_topology(void)
reset_cpu_topology();
return;
}
+
+ for_each_possible_cpu(cpu) {
+ ret = fetch_cache_info(cpu);
+ if (ret) {
+ pr_err("Early cacheinfo failed, ret = %d\n", ret);
+ break;
+ }
+ }
}
void store_cpu_topology(unsigned int cpuid)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 950b22cdb5f7..418a18acc8f9 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -229,8 +229,71 @@ static int cache_setup_of_node(unsigned int cpu)
return 0;
}
+
+static int of_count_cache_leaves(struct device_node *np)
+{
+ unsigned int leaves = 0;
+
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+
+ if (!leaves) {
+ /* The '[i-|d-|]cache-size' property is required, but
+ * if absent, fallback on the 'cache-unified' property.
+ */
+ if (of_property_read_bool(np, "cache-unified"))
+ return 1;
+ else
+ return 2;
+ }
+
+ return leaves;
+}
+
+int init_of_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
+ unsigned int levels = 0, leaves, level;
+
+ leaves = of_count_cache_leaves(np);
+ if (leaves > 0)
+ levels = 1;
+
+ prev = np;
+ while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
+ if (!of_device_is_compatible(np, "cache"))
+ goto err_out;
+ if (of_property_read_u32(np, "cache-level", &level))
+ goto err_out;
+ if (level <= levels)
+ goto err_out;
+
+ leaves += of_count_cache_leaves(np);
+ levels = level;
+ }
+
+ of_node_put(np);
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+
+ return 0;
+
+err_out:
+ of_node_put(np);
+ return -EINVAL;
+}
+
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+int init_of_cache_level(unsigned int cpu) { return 0; }
#endif
int __weak cache_setup_acpi(unsigned int cpu)
@@ -256,7 +319,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int index;
+ unsigned int index, sib_index;
int ret = 0;
if (this_cpu_ci->cpu_map_populated)
@@ -284,11 +347,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
-
- sib_leaf = per_cpu_cacheinfo_idx(i, index);
- if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
- cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ break;
+ }
}
}
/* record the maximum cache line size */
@@ -302,7 +367,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int sibling, index;
+ unsigned int sibling, index, sib_index;
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
@@ -313,9 +378,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
- cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ break;
+ }
+ }
}
}
}
@@ -326,10 +396,6 @@ static void free_cache_attributes(unsigned int cpu)
return;
cache_shared_cpu_map_remove(cpu);
-
- kfree(per_cpu_cacheinfo(cpu));
- per_cpu_cacheinfo(cpu) = NULL;
- cache_leaves(cpu) = 0;
}
int __weak init_cache_level(unsigned int cpu)
@@ -342,29 +408,71 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
+static inline
+int allocate_cache_info(int cpu)
+{
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct cacheinfo), GFP_ATOMIC);
+ if (!per_cpu_cacheinfo(cpu)) {
+ cache_leaves(cpu) = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int fetch_cache_info(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci;
+ unsigned int levels, split_levels;
+ int ret;
+
+ if (acpi_disabled) {
+ ret = init_of_cache_level(cpu);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = acpi_get_cache_info(cpu, &levels, &split_levels);
+ if (ret < 0)
+ return ret;
+
+ this_cpu_ci = get_cpu_cacheinfo(cpu);
+ this_cpu_ci->num_levels = levels;
+ /*
+ * This assumes that:
+ * - there cannot be any split caches (data/instruction)
+ * above a unified cache
+ * - data/instruction caches come by pair
+ */
+ this_cpu_ci->num_leaves = levels + split_levels;
+ }
+ if (!cache_leaves(cpu))
+ return -ENOENT;
+
+ return allocate_cache_info(cpu);
+}
+
int detect_cache_attributes(unsigned int cpu)
{
int ret;
- /* Since early detection of the cacheinfo is allowed via this
- * function and this also gets called as CPU hotplug callbacks via
- * cacheinfo_cpu_online, the initialisation can be skipped and only
- * CPU maps can be updated as the CPU online status would be update
- * if called via cacheinfo_cpu_online path.
+ /* Since early initialization/allocation of the cacheinfo is allowed
+ * via fetch_cache_info() and this also gets called as CPU hotplug
+ * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
+ * as it will happen only once (the cacheinfo memory is never freed).
+ * Just populate the cacheinfo.
*/
if (per_cpu_cacheinfo(cpu))
- goto update_cpu_map;
+ goto populate_leaves;
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
- if (per_cpu_cacheinfo(cpu) == NULL) {
- cache_leaves(cpu) = 0;
- return -ENOMEM;
- }
+ ret = allocate_cache_info(cpu);
+ if (ret)
+ return ret;
+populate_leaves:
/*
* populate_cache_leaves() may completely setup the cache leaves and
* shared_cpu_map or it may leave it partially setup.
@@ -373,7 +481,6 @@ int detect_cache_attributes(unsigned int cpu)
if (ret)
goto free_ci;
-update_cpu_map:
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are