1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00
linux/block/blk-mq-cpumap.c
Daniel Wagner a9ae6fe1c3 blk-mq: create correct map for fallback case
The fallback code in blk_mq_map_hw_queues is original from
blk_mq_pci_map_queues and was added to handle the case where
pci_irq_get_affinity will return NULL for !SMP configuration.

blk_mq_map_hw_queues replaces besides blk_mq_pci_map_queues also
blk_mq_virtio_map_queues which used to use blk_mq_map_queues for the
fallback.

It's possible to use blk_mq_map_queues for both cases though.
blk_mq_map_queues creates the same map as blk_mq_clear_mq_map for !SMP
that is CPU 0 will be mapped to hctx 0.

The WARN_ON_ONCE has to be dropped for virtio as the fallback is also
taken for certain configuration on default. Though there is still a
WARN_ON_ONCE check in lib/group_cpus.c:

       WARN_ON(nr_present + nr_others < numgrps);

which will trigger if the caller tries to create more hardware queues
than CPUs. It tests the same as the WARN_ON_ONCE in
blk_mq_pci_map_queues did.

Fixes: a5665c3d15 ("virtio: blk/scsi: replace blk_mq_virtio_map_queues with blk_mq_map_hw_queues")
Reported-by: Steven Rostedt <rostedt@goodmis.org>
Closes: https://lore.kernel.org/all/20250122093020.6e8a4e5b@gandalf.local.home/
Signed-off-by: Daniel Wagner <wagi@kernel.org>
Link: https://lore.kernel.org/r/20250123-fix-blk_mq_map_hw_queues-v1-1-08dbd01f2c39@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
2025-01-23 06:34:32 -07:00

92 lines
2.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* CPU <-> hardware queue mapping helpers
*
* Copyright (C) 2013-2014 Jens Axboe
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/group_cpus.h>
#include <linux/device/bus.h>
#include "blk.h"
#include "blk-mq.h"
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
const struct cpumask *masks;
unsigned int queue, cpu;
masks = group_cpus_evenly(qmap->nr_queues);
if (!masks) {
for_each_possible_cpu(cpu)
qmap->mq_map[cpu] = qmap->queue_offset;
return;
}
for (queue = 0; queue < qmap->nr_queues; queue++) {
for_each_cpu(cpu, &masks[queue])
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
kfree(masks);
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
/**
* blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
* @qmap: CPU to hardware queue map.
* @index: hardware queue index.
*
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
{
int i;
for_each_possible_cpu(i) {
if (index == qmap->mq_map[i])
return cpu_to_node(i);
}
return NUMA_NO_NODE;
}
/**
* blk_mq_map_hw_queues - Create CPU to hardware queue mapping
* @qmap: CPU to hardware queue map
* @dev: The device to map queues
* @offset: Queue offset to use for the device
*
* Create a CPU to hardware queue mapping in @qmap. The struct bus_type
* irq_get_affinity callback will be used to retrieve the affinity.
*/
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
struct device *dev, unsigned int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;
if (!dev->bus->irq_get_affinity)
goto fallback;
for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = dev->bus->irq_get_affinity(dev, queue + offset);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
return;
fallback:
blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);