mmc: pass queue_limits to blk_mq_alloc_disk
Pass the queue limit set at initialization time directly to blk_mq_alloc_disk instead of updating it right after the allocation. This requires refactoring the code a bit so that what was mmc_setup_queue before also allocates the gendisk now and actually sets all limits. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Ulf Hansson <ulf.hansson@linaro.org> Link: https://lore.kernel.org/r/20240215070300.2200308-18-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
494ea040bc
commit
616f876617
1 changed files with 56 additions and 49 deletions
|
@ -174,8 +174,8 @@ static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
|
||||||
return sg;
|
return sg;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmc_queue_setup_discard(struct request_queue *q,
|
static void mmc_queue_setup_discard(struct mmc_card *card,
|
||||||
struct mmc_card *card)
|
struct queue_limits *lim)
|
||||||
{
|
{
|
||||||
unsigned max_discard;
|
unsigned max_discard;
|
||||||
|
|
||||||
|
@ -183,15 +183,17 @@ static void mmc_queue_setup_discard(struct request_queue *q,
|
||||||
if (!max_discard)
|
if (!max_discard)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
blk_queue_max_discard_sectors(q, max_discard);
|
lim->max_hw_discard_sectors = max_discard;
|
||||||
q->limits.discard_granularity = card->pref_erase << 9;
|
if (mmc_can_secure_erase_trim(card))
|
||||||
|
lim->max_secure_erase_sectors = max_discard;
|
||||||
|
if (mmc_can_trim(card) && card->erased_byte == 0)
|
||||||
|
lim->max_write_zeroes_sectors = max_discard;
|
||||||
|
|
||||||
/* granularity must not be greater than max. discard */
|
/* granularity must not be greater than max. discard */
|
||||||
if (card->pref_erase > max_discard)
|
if (card->pref_erase > max_discard)
|
||||||
q->limits.discard_granularity = SECTOR_SIZE;
|
lim->discard_granularity = SECTOR_SIZE;
|
||||||
if (mmc_can_secure_erase_trim(card))
|
else
|
||||||
blk_queue_max_secure_erase_sectors(q, max_discard);
|
lim->discard_granularity = card->pref_erase << 9;
|
||||||
if (mmc_can_trim(card) && card->erased_byte == 0)
|
|
||||||
blk_queue_max_write_zeroes_sectors(q, max_discard);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned short mmc_get_max_segments(struct mmc_host *host)
|
static unsigned short mmc_get_max_segments(struct mmc_host *host)
|
||||||
|
@ -341,40 +343,53 @@ static const struct blk_mq_ops mmc_mq_ops = {
|
||||||
.timeout = mmc_mq_timed_out,
|
.timeout = mmc_mq_timed_out,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
|
static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
|
||||||
|
struct mmc_card *card)
|
||||||
{
|
{
|
||||||
struct mmc_host *host = card->host;
|
struct mmc_host *host = card->host;
|
||||||
unsigned block_size = 512;
|
struct queue_limits lim = { };
|
||||||
|
struct gendisk *disk;
|
||||||
|
|
||||||
|
if (mmc_can_erase(card))
|
||||||
|
mmc_queue_setup_discard(card, &lim);
|
||||||
|
|
||||||
|
if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
|
||||||
|
lim.bounce = BLK_BOUNCE_HIGH;
|
||||||
|
|
||||||
|
lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512);
|
||||||
|
|
||||||
|
if (mmc_card_mmc(card) && card->ext_csd.data_sector_size)
|
||||||
|
lim.logical_block_size = card->ext_csd.data_sector_size;
|
||||||
|
else
|
||||||
|
lim.logical_block_size = 512;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(lim.logical_block_size != 512 &&
|
||||||
|
lim.logical_block_size != 4096);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setting a virt_boundary implicity sets a max_segment_size, so try
|
||||||
|
* to set the hardware one here.
|
||||||
|
*/
|
||||||
|
if (host->can_dma_map_merge) {
|
||||||
|
lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host));
|
||||||
|
lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS;
|
||||||
|
} else {
|
||||||
|
lim.max_segment_size =
|
||||||
|
round_down(host->max_seg_size, lim.logical_block_size);
|
||||||
|
lim.max_segments = host->max_segs;
|
||||||
|
}
|
||||||
|
|
||||||
|
disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
|
||||||
|
if (IS_ERR(disk))
|
||||||
|
return disk;
|
||||||
|
mq->queue = disk->queue;
|
||||||
|
|
||||||
|
if (mmc_host_is_spi(host) && host->use_spi_crc)
|
||||||
|
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
|
||||||
|
blk_queue_rq_timeout(mq->queue, 60 * HZ);
|
||||||
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
|
||||||
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
|
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
|
||||||
if (mmc_can_erase(card))
|
|
||||||
mmc_queue_setup_discard(mq->queue, card);
|
|
||||||
|
|
||||||
if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
|
|
||||||
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
|
|
||||||
blk_queue_max_hw_sectors(mq->queue,
|
|
||||||
min(host->max_blk_count, host->max_req_size / 512));
|
|
||||||
if (host->can_dma_map_merge)
|
|
||||||
WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
|
|
||||||
mmc_dev(host)),
|
|
||||||
"merging was advertised but not possible");
|
|
||||||
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
|
|
||||||
|
|
||||||
if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
|
|
||||||
block_size = card->ext_csd.data_sector_size;
|
|
||||||
WARN_ON(block_size != 512 && block_size != 4096);
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_queue_logical_block_size(mq->queue, block_size);
|
|
||||||
/*
|
|
||||||
* After blk_queue_can_use_dma_map_merging() was called with succeed,
|
|
||||||
* since it calls blk_queue_virt_boundary(), the mmc should not call
|
|
||||||
* both blk_queue_max_segment_size().
|
|
||||||
*/
|
|
||||||
if (!host->can_dma_map_merge)
|
|
||||||
blk_queue_max_segment_size(mq->queue,
|
|
||||||
round_down(host->max_seg_size, block_size));
|
|
||||||
|
|
||||||
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
|
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
|
||||||
|
|
||||||
|
@ -386,6 +401,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
|
||||||
init_waitqueue_head(&mq->wait);
|
init_waitqueue_head(&mq->wait);
|
||||||
|
|
||||||
mmc_crypto_setup_queue(mq->queue, host);
|
mmc_crypto_setup_queue(mq->queue, host);
|
||||||
|
return disk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mmc_merge_capable(struct mmc_host *host)
|
static inline bool mmc_merge_capable(struct mmc_host *host)
|
||||||
|
@ -447,18 +463,9 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
|
|
||||||
disk = blk_mq_alloc_disk(&mq->tag_set, NULL, mq);
|
disk = mmc_alloc_disk(mq, card);
|
||||||
if (IS_ERR(disk)) {
|
if (IS_ERR(disk))
|
||||||
blk_mq_free_tag_set(&mq->tag_set);
|
blk_mq_free_tag_set(&mq->tag_set);
|
||||||
return disk;
|
|
||||||
}
|
|
||||||
mq->queue = disk->queue;
|
|
||||||
|
|
||||||
if (mmc_host_is_spi(host) && host->use_spi_crc)
|
|
||||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
|
|
||||||
blk_queue_rq_timeout(mq->queue, 60 * HZ);
|
|
||||||
|
|
||||||
mmc_setup_queue(mq, card);
|
|
||||||
return disk;
|
return disk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue