1
0
Fork 0
mirror of synced 2025-03-06 20:59:54 +01:00

block: simplify tag allocation policy selection

Use a plain BLK_MQ_F_* flag to select the round robin tag selection
instead of overlaying an enum with just two possible values into the
flags space.

Doing so allows adding a BLK_MQ_F_MAX sentinel for simplified overflow
checking in the messy debugfs helpers.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20250106083531.799976-5-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2025-01-06 09:35:11 +01:00 committed by Jens Axboe
parent e7602bb4f3
commit ce32496ec1
14 changed files with 29 additions and 55 deletions

View file

@ -172,19 +172,13 @@ static int hctx_state_show(void *data, struct seq_file *m)
return 0;
}
#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
static const char *const alloc_policy_name[] = {
BLK_TAG_ALLOC_NAME(FIFO),
BLK_TAG_ALLOC_NAME(RR),
};
#undef BLK_TAG_ALLOC_NAME
#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
static const char *const hctx_flag_name[] = {
HCTX_FLAG_NAME(TAG_QUEUE_SHARED),
HCTX_FLAG_NAME(STACKING),
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
HCTX_FLAG_NAME(BLOCKING),
HCTX_FLAG_NAME(TAG_RR),
HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
};
#undef HCTX_FLAG_NAME
@ -192,22 +186,11 @@ static const char *const hctx_flag_name[] = {
static int hctx_flags_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) !=
BLK_MQ_F_ALLOC_POLICY_START_BIT);
BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX);
BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != ilog2(BLK_MQ_F_MAX));
seq_puts(m, "alloc_policy=");
if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
alloc_policy_name[alloc_policy])
seq_puts(m, alloc_policy_name[alloc_policy]);
else
seq_printf(m, "%d", alloc_policy);
seq_puts(m, " ");
blk_flags_show(m,
hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
blk_flags_show(m, hctx->flags, hctx_flag_name,
ARRAY_SIZE(hctx_flag_name));
seq_puts(m, "\n");
return 0;
}

View file

@ -545,11 +545,10 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
unsigned int reserved_tags,
int node, int alloc_policy)
unsigned int reserved_tags, unsigned int flags, int node)
{
unsigned int depth = total_tags - reserved_tags;
bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
bool round_robin = flags & BLK_MQ_F_TAG_RR;
struct blk_mq_tags *tags;
if (total_tags > BLK_MQ_TAG_MAX) {

View file

@ -3476,8 +3476,7 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
if (node == NUMA_NO_NODE)
node = set->numa_node;
tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
tags = blk_mq_init_tags(nr_tags, reserved_tags, set->flags, node);
if (!tags)
return NULL;

View file

@ -163,7 +163,7 @@ struct blk_mq_alloc_data {
};
struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
unsigned int reserved_tags, int node, int alloc_policy);
unsigned int reserved_tags, unsigned int flags, int node);
void blk_mq_free_tags(struct blk_mq_tags *tags);
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);

View file

@ -396,7 +396,7 @@ extern const struct attribute_group *ahci_sdev_groups[];
.shost_groups = ahci_shost_groups, \
.sdev_groups = ahci_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
extern struct ata_port_operations ahci_ops;

View file

@ -935,7 +935,7 @@ static const struct scsi_host_template pata_macio_sht = {
.device_configure = pata_macio_device_configure,
.sdev_groups = ata_common_sdev_groups,
.can_queue = ATA_DEF_QUEUE,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.tag_alloc_policy_rr = true,
};
static struct ata_port_operations pata_macio_ops = {

View file

@ -672,7 +672,7 @@ static const struct scsi_host_template mv6_sht = {
.dma_boundary = MV_DMA_BOUNDARY,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.tag_alloc_policy_rr = true,
.device_configure = ata_scsi_device_configure
};

View file

@ -385,7 +385,7 @@ static const struct scsi_host_template nv_adma_sht = {
.device_configure = nv_adma_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.tag_alloc_policy_rr = true,
};
static const struct scsi_host_template nv_swncq_sht = {
@ -396,7 +396,7 @@ static const struct scsi_host_template nv_swncq_sht = {
.device_configure = nv_swncq_device_configure,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.tag_alloc_policy_rr = true,
};
/*

View file

@ -378,7 +378,6 @@ static const struct scsi_host_template sil24_sht = {
.can_queue = SIL24_MAX_CMDS,
.sg_tablesize = SIL24_MAX_SGE,
.dma_boundary = ATA_DMA_BOUNDARY,
.tag_alloc_policy = BLK_TAG_ALLOC_FIFO,
.sdev_groups = ata_ncq_sdev_groups,
.change_queue_depth = ata_scsi_change_queue_depth,
.device_configure = ata_scsi_device_configure

View file

@ -3345,7 +3345,7 @@ static const struct scsi_host_template sht_v3_hw = {
.slave_alloc = hisi_sas_slave_alloc,
.shost_groups = host_v3_hw_groups,
.sdev_groups = sdev_groups_v3_hw,
.tag_alloc_policy = BLK_TAG_ALLOC_RR,
.tag_alloc_policy_rr = true,
.host_reset = hisi_sas_host_reset,
.host_tagset = 1,
.mq_poll = queue_complete_v3_hw,

View file

@ -2065,8 +2065,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
tag_set->queue_depth = shost->can_queue;
tag_set->cmd_size = cmd_size;
tag_set->numa_node = dev_to_node(shost->dma_dev);
tag_set->flags |=
BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
if (shost->hostt->tag_alloc_policy_rr)
tag_set->flags |= BLK_MQ_F_TAG_RR;
if (shost->queuecommand_may_block)
tag_set->flags |= BLK_MQ_F_BLOCKING;
tag_set->driver_data = shost;

View file

@ -296,13 +296,6 @@ enum blk_eh_timer_return {
BLK_EH_RESET_TIMER,
};
/* Keep alloc_policy_name[] in sync with the definitions below */
enum {
BLK_TAG_ALLOC_FIFO, /* allocate starting from 0 */
BLK_TAG_ALLOC_RR, /* allocate starting from last allocated tag */
BLK_TAG_ALLOC_MAX
};
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
@ -677,20 +670,19 @@ enum {
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
BLK_MQ_F_BLOCKING = 1 << 4,
/*
* Alloc tags on a round-robin base instead of the first available one.
*/
BLK_MQ_F_TAG_RR = 1 << 5,
/*
* Select 'none' during queue registration in case of a single hwq
* or shared hwqs instead of 'mq-deadline'.
*/
BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 7,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
BLK_MQ_F_MAX = 1 << 7,
};
#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
#define BLK_MQ_MAX_DEPTH (10240)
#define BLK_MQ_NO_HCTX_IDX (-1U)

View file

@ -1467,13 +1467,13 @@ extern const struct attribute_group *ata_common_sdev_groups[];
#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
__ATA_BASE_SHT(drv_name), \
.can_queue = drv_qd, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.tag_alloc_policy_rr = true, \
.device_configure = ata_scsi_device_configure
#define ATA_BASE_SHT(drv_name) \

View file

@ -438,8 +438,10 @@ struct scsi_host_template {
*/
short cmd_per_lun;
/* If use block layer to manage tags, this is tag allocation policy */
int tag_alloc_policy;
/*
* Allocate tags starting from last allocated tag.
*/
bool tag_alloc_policy_rr : 1;
/*
* Track QUEUE_FULL events and reduce queue depth on demand.