1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/arch/riscv/kernel/cacheinfo.c
Pierre Gondois 5944ce092b arch_topology: Build cacheinfo from primary CPU
commit 3fcbf1c77d ("arch_topology: Fix cache attributes detection
in the CPU hotplug path")
adds a call to detect_cache_attributes() to populate the cacheinfo
before updating the siblings mask. detect_cache_attributes() allocates
memory and can take the PPTT mutex (on ACPI platforms). On PREEMPT_RT
kernels, on secondary CPUs, this triggers a:
  'BUG: sleeping function called from invalid context' [1]
as the code is executed with preemption and interrupts disabled.

The primary CPU was previously storing the cache information using
the now removed (struct cpu_topology).llc_id:
commit 5b8dc787ce ("arch_topology: Drop LLC identifier stash from
the CPU topology")

allocate_cache_info() tries to build the cacheinfo from the primary
CPU prior secondary CPUs boot, if the DT/ACPI description
contains cache information.
If allocate_cache_info() fails, then fallback to the current state
for the cacheinfo allocation. [1] will be triggered in such case.

When unplugging a CPU, the cacheinfo memory cannot be freed. If it
was, then the memory would be allocated early by the re-plugged
CPU and would trigger [1].

Note that populate_cache_leaves() might be called multiple times
due to populate_leaves being moved up. This is required since
detect_cache_attributes() might be called with per_cpu_cacheinfo(cpu)
being allocated but not populated.

[1]:
 | BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46
 | in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 0, name: swapper/111
 | preempt_count: 1, expected: 0
 | RCU nest depth: 1, expected: 1
 | 3 locks held by swapper/111/0:
 |  #0:  (&pcp->lock){+.+.}-{3:3}, at: get_page_from_freelist+0x218/0x12c8
 |  #1:  (rcu_read_lock){....}-{1:3}, at: rt_spin_trylock+0x48/0xf0
 |  #2:  (&zone->lock){+.+.}-{3:3}, at: rmqueue_bulk+0x64/0xa80
 | irq event stamp: 0
 | hardirqs last  enabled at (0):  0x0
 | hardirqs last disabled at (0):  copy_process+0x5dc/0x1ab8
 | softirqs last  enabled at (0):  copy_process+0x5dc/0x1ab8
 | softirqs last disabled at (0):  0x0
 | Preemption disabled at:
 |  migrate_enable+0x30/0x130
 | CPU: 111 PID: 0 Comm: swapper/111 Tainted: G        W          6.0.0-rc4-rt6-[...]
 | Call trace:
 |  __kmalloc+0xbc/0x1e8
 |  detect_cache_attributes+0x2d4/0x5f0
 |  update_siblings_masks+0x30/0x368
 |  store_cpu_topology+0x78/0xb8
 |  secondary_start_kernel+0xd0/0x198
 |  __secondary_switched+0xb0/0xb4

Signed-off-by: Pierre Gondois <pierre.gondois@arm.com>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Palmer Dabbelt <palmer@rivosinc.com>
Link: https://lore.kernel.org/r/20230104183033.755668-7-pierre.gondois@arm.com
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
2023-01-18 09:58:40 +00:00

147 lines
4.1 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 SiFive
*/
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/cacheinfo.h>
static struct riscv_cacheinfo_ops *rv_cache_ops;
void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
{
rv_cache_ops = ops;
}
EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
const struct attribute_group *
cache_get_priv_group(struct cacheinfo *this_leaf)
{
if (rv_cache_ops && rv_cache_ops->get_priv_group)
return rv_cache_ops->get_priv_group(this_leaf);
return NULL;
}
static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
{
/*
* Using raw_smp_processor_id() elides a preemptability check, but this
* is really indicative of a larger problem: the cacheinfo UABI assumes
* that cores have a homonogenous view of the cache hierarchy. That
* happens to be the case for the current set of RISC-V systems, but
* likely won't be true in general. Since there's no way to provide
* correct information for these systems via the current UABI we're
* just eliding the check for now.
*/
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
struct cacheinfo *this_leaf;
int index;
for (index = 0; index < this_cpu_ci->num_leaves; index++) {
this_leaf = this_cpu_ci->info_list + index;
if (this_leaf->level == level && this_leaf->type == type)
return this_leaf;
}
return NULL;
}
uintptr_t get_cache_size(u32 level, enum cache_type type)
{
struct cacheinfo *this_leaf = get_cacheinfo(level, type);
return this_leaf ? this_leaf->size : 0;
}
uintptr_t get_cache_geometry(u32 level, enum cache_type type)
{
struct cacheinfo *this_leaf = get_cacheinfo(level, type);
return this_leaf ? (this_leaf->ways_of_associativity << 16 |
this_leaf->coherency_line_size) :
0;
}
static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
unsigned int level, unsigned int size,
unsigned int sets, unsigned int line_size)
{
this_leaf->level = level;
this_leaf->type = type;
this_leaf->size = size;
this_leaf->number_of_sets = sets;
this_leaf->coherency_line_size = line_size;
/*
* If the cache is fully associative, there is no need to
* check the other properties.
*/
if (sets == 1)
return;
/*
* Set the ways number for n-ways associative, make sure
* all properties are big than zero.
*/
if (sets > 0 && size > 0 && line_size > 0)
this_leaf->ways_of_associativity = (size / sets) / line_size;
}
static void fill_cacheinfo(struct cacheinfo **this_leaf,
struct device_node *node, unsigned int level)
{
unsigned int size, sets, line_size;
if (!of_property_read_u32(node, "cache-size", &size) &&
!of_property_read_u32(node, "cache-block-size", &line_size) &&
!of_property_read_u32(node, "cache-sets", &sets)) {
ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size);
}
if (!of_property_read_u32(node, "i-cache-size", &size) &&
!of_property_read_u32(node, "i-cache-sets", &sets) &&
!of_property_read_u32(node, "i-cache-block-size", &line_size)) {
ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size);
}
if (!of_property_read_u32(node, "d-cache-size", &size) &&
!of_property_read_u32(node, "d-cache-sets", &sets) &&
!of_property_read_u32(node, "d-cache-block-size", &line_size)) {
ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size);
}
}
int populate_cache_leaves(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct device_node *np = of_cpu_device_node_get(cpu);
struct device_node *prev = NULL;
int levels = 1, level = 1;
/* Level 1 caches in cpu node */
fill_cacheinfo(&this_leaf, np, level);
/* Next level caches in cache nodes */
prev = np;
while ((np = of_find_next_cache_node(np))) {
of_node_put(prev);
prev = np;
if (!of_device_is_compatible(np, "cache"))
break;
if (of_property_read_u32(np, "cache-level", &level))
break;
if (level <= levels)
break;
fill_cacheinfo(&this_leaf, np, level);
levels = level;
}
of_node_put(np);
return 0;
}