dm zoned: use GFP_NOIO in I/O path
Use GFP_NOIO for memory allocations in the I/O path. Other memory allocations in the initialization path can use GFP_KERNEL. Reported-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com> Reviewed-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
edbe9597ac
commit
4218a95546
3 changed files with 9 additions and 9 deletions
|
@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
|
||||||
|
|
||||||
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
|
ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
|
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
|
||||||
|
|
||||||
/* Flush drive cache (this will also sync data) */
|
/* Flush drive cache (this will also sync data) */
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
|
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
|
||||||
|
|
||||||
/* If there are no dirty metadata blocks, just flush the device cache */
|
/* If there are no dirty metadata blocks, just flush the device cache */
|
||||||
if (list_empty(&write_list)) {
|
if (list_empty(&write_list)) {
|
||||||
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
|
ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
|
||||||
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
|
(zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
|
||||||
}
|
}
|
||||||
|
|
||||||
page = alloc_page(GFP_KERNEL);
|
page = alloc_page(GFP_NOIO);
|
||||||
if (!page)
|
if (!page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
|
||||||
|
|
||||||
/* Get zone information from disk */
|
/* Get zone information from disk */
|
||||||
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
|
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
|
||||||
&blkz, &nr_blkz, GFP_KERNEL);
|
&blkz, &nr_blkz, GFP_NOIO);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dmz_dev_err(zmd->dev, "Get zone %u report failed",
|
dmz_dev_err(zmd->dev, "Get zone %u report failed",
|
||||||
dmz_id(zmd, zone));
|
dmz_id(zmd, zone));
|
||||||
|
@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
|
||||||
|
|
||||||
ret = blkdev_reset_zones(dev->bdev,
|
ret = blkdev_reset_zones(dev->bdev,
|
||||||
dmz_start_sect(zmd, zone),
|
dmz_start_sect(zmd, zone),
|
||||||
dev->zone_nr_sectors, GFP_KERNEL);
|
dev->zone_nr_sectors, GFP_NOIO);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dmz_dev_err(dev, "Reset zone %u failed %d",
|
dmz_dev_err(dev, "Reset zone %u failed %d",
|
||||||
dmz_id(zmd, zone), ret);
|
dmz_id(zmd, zone), ret);
|
||||||
|
|
|
@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
|
||||||
nr_blocks = block - wp_block;
|
nr_blocks = block - wp_block;
|
||||||
ret = blkdev_issue_zeroout(zrc->dev->bdev,
|
ret = blkdev_issue_zeroout(zrc->dev->bdev,
|
||||||
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
|
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
|
||||||
dmz_blk2sect(nr_blocks), GFP_NOFS, false);
|
dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dmz_dev_err(zrc->dev,
|
dmz_dev_err(zrc->dev,
|
||||||
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
|
"Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
|
||||||
|
|
|
@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Create a new chunk work */
|
/* Create a new chunk work */
|
||||||
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS);
|
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
|
||||||
if (!cw)
|
if (!cw)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
|
|
||||||
/* Chunk BIO work */
|
/* Chunk BIO work */
|
||||||
mutex_init(&dmz->chunk_lock);
|
mutex_init(&dmz->chunk_lock);
|
||||||
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS);
|
INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
|
||||||
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
|
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
|
||||||
0, dev->name);
|
0, dev->name);
|
||||||
if (!dmz->chunk_wq) {
|
if (!dmz->chunk_wq) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue