block: remove BLK_MQ_F_NO_SCHED
The only queues that really can't support a scheduler are those that do not have a gendisk associated with them, and thus can't be used for non-passthrough commands. In addition to those null_blk can optionally set the flag, which is a bad odd. Replace the null_blk usage with BLK_MQ_F_NO_SCHED_BY_DEFAULT to keep the expected semantics and then remove BLK_MQ_F_NO_SCHED as the non-disk queues never call into elevator_init_mq or blk_register_queue which adds the sysfs attributes. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20250106083531.799976-4-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
68ed451222
commit
e7602bb4f3
8 changed files with 3 additions and 29 deletions
|
@ -185,7 +185,6 @@ static const char *const hctx_flag_name[] = {
|
|||
HCTX_FLAG_NAME(STACKING),
|
||||
HCTX_FLAG_NAME(TAG_HCTX_SHARED),
|
||||
HCTX_FLAG_NAME(BLOCKING),
|
||||
HCTX_FLAG_NAME(NO_SCHED),
|
||||
HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT),
|
||||
};
|
||||
#undef HCTX_FLAG_NAME
|
||||
|
|
|
@ -381,7 +381,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
|||
set->queue_depth = 128;
|
||||
set->numa_node = NUMA_NO_NODE;
|
||||
set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
|
||||
set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
|
||||
set->flags = BLK_MQ_F_BLOCKING;
|
||||
if (blk_mq_alloc_tag_set(set))
|
||||
goto out_tag_set;
|
||||
|
||||
|
|
|
@ -547,14 +547,6 @@ void elv_unregister(struct elevator_type *e)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(elv_unregister);
|
||||
|
||||
static inline bool elv_support_iosched(struct request_queue *q)
|
||||
{
|
||||
if (!queue_is_mq(q) ||
|
||||
(q->tag_set->flags & BLK_MQ_F_NO_SCHED))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* For single queue devices, default to using mq-deadline. If we have multiple
|
||||
* queues or mq-deadline is not available, default to "none".
|
||||
|
@ -580,9 +572,6 @@ void elevator_init_mq(struct request_queue *q)
|
|||
struct elevator_type *e;
|
||||
int err;
|
||||
|
||||
if (!elv_support_iosched(q))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(blk_queue_registered(q));
|
||||
|
||||
if (unlikely(q->elevator))
|
||||
|
@ -714,9 +703,6 @@ void elv_iosched_load_module(struct gendisk *disk, const char *buf,
|
|||
struct elevator_type *found;
|
||||
const char *name;
|
||||
|
||||
if (!elv_support_iosched(disk->queue))
|
||||
return;
|
||||
|
||||
strscpy(elevator_name, buf, sizeof(elevator_name));
|
||||
name = strstrip(elevator_name);
|
||||
|
||||
|
@ -734,9 +720,6 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
|
|||
char elevator_name[ELV_NAME_MAX];
|
||||
int ret;
|
||||
|
||||
if (!elv_support_iosched(disk->queue))
|
||||
return count;
|
||||
|
||||
strscpy(elevator_name, buf, sizeof(elevator_name));
|
||||
ret = elevator_change(disk->queue, strstrip(elevator_name));
|
||||
if (!ret)
|
||||
|
@ -751,9 +734,6 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *name)
|
|||
struct elevator_type *cur = NULL, *e;
|
||||
int len = 0;
|
||||
|
||||
if (!elv_support_iosched(q))
|
||||
return sprintf(name, "none\n");
|
||||
|
||||
if (!q->elevator) {
|
||||
len += sprintf(name+len, "[none] ");
|
||||
} else {
|
||||
|
|
|
@ -1792,7 +1792,7 @@ static int null_init_global_tag_set(void)
|
|||
tag_set.queue_depth = g_hw_queue_depth;
|
||||
tag_set.numa_node = g_home_node;
|
||||
if (g_no_sched)
|
||||
tag_set.flags |= BLK_MQ_F_NO_SCHED;
|
||||
tag_set.flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
|
||||
if (g_shared_tag_bitmap)
|
||||
tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
|
||||
if (g_blocking)
|
||||
|
@ -1817,7 +1817,7 @@ static int null_setup_tagset(struct nullb *nullb)
|
|||
nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
|
||||
nullb->tag_set->numa_node = nullb->dev->home_node;
|
||||
if (nullb->dev->no_sched)
|
||||
nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
|
||||
nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED_BY_DEFAULT;
|
||||
if (nullb->dev->shared_tag_bitmap)
|
||||
nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
|
||||
if (nullb->dev->blocking)
|
||||
|
|
|
@ -1251,7 +1251,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
|
|||
anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
|
||||
anv->admin_tagset.numa_node = NUMA_NO_NODE;
|
||||
anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
|
||||
anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
|
||||
anv->admin_tagset.driver_data = &anv->adminq;
|
||||
|
||||
ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
|
||||
|
|
|
@ -4564,7 +4564,6 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
|
|||
/* Reserved for fabric connect and keep alive */
|
||||
set->reserved_tags = 2;
|
||||
set->numa_node = ctrl->numa_node;
|
||||
set->flags = BLK_MQ_F_NO_SCHED;
|
||||
if (ctrl->ops->flags & NVME_F_BLOCKING)
|
||||
set->flags |= BLK_MQ_F_BLOCKING;
|
||||
set->cmd_size = cmd_size;
|
||||
|
|
|
@ -10412,7 +10412,6 @@ static int ufshcd_add_scsi_host(struct ufs_hba *hba)
|
|||
.nr_hw_queues = 1,
|
||||
.queue_depth = hba->nutmrs,
|
||||
.ops = &ufshcd_tmf_ops,
|
||||
.flags = BLK_MQ_F_NO_SCHED,
|
||||
};
|
||||
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
|
||||
if (err < 0)
|
||||
|
|
|
@ -676,8 +676,6 @@ enum {
|
|||
BLK_MQ_F_STACKING = 1 << 2,
|
||||
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
|
||||
BLK_MQ_F_BLOCKING = 1 << 4,
|
||||
/* Do not allow an I/O scheduler to be configured. */
|
||||
BLK_MQ_F_NO_SCHED = 1 << 5,
|
||||
|
||||
/*
|
||||
* Select 'none' during queue registration in case of a single hwq
|
||||
|
|
Loading…
Add table
Reference in a new issue