block: remove the ->rq_disk field in struct request
Just use the disk attached to the request_queue instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/20211126121802.2090656-4-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
79bb1dbd12
commit
f3fa33acca
28 changed files with 62 additions and 67 deletions
|
@ -145,7 +145,7 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
|
||||||
|
|
||||||
static void blk_account_io_flush(struct request *rq)
|
static void blk_account_io_flush(struct request *rq)
|
||||||
{
|
{
|
||||||
struct block_device *part = rq->rq_disk->part0;
|
struct block_device *part = rq->q->disk->part0;
|
||||||
|
|
||||||
part_stat_lock();
|
part_stat_lock();
|
||||||
part_stat_inc(part, ios[STAT_FLUSH]);
|
part_stat_inc(part, ios[STAT_FLUSH]);
|
||||||
|
@ -339,7 +339,6 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
||||||
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
||||||
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
|
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
|
||||||
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
||||||
flush_rq->rq_disk = first_rq->rq_disk;
|
|
||||||
flush_rq->end_io = flush_end_io;
|
flush_rq->end_io = flush_end_io;
|
||||||
/*
|
/*
|
||||||
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
|
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
|
||||||
|
|
|
@ -377,7 +377,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||||
rq->start_time_ns = ktime_get_ns();
|
rq->start_time_ns = ktime_get_ns();
|
||||||
else
|
else
|
||||||
rq->start_time_ns = 0;
|
rq->start_time_ns = 0;
|
||||||
rq->rq_disk = NULL;
|
|
||||||
rq->part = NULL;
|
rq->part = NULL;
|
||||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||||
rq->alloc_time_ns = alloc_time_ns;
|
rq->alloc_time_ns = alloc_time_ns;
|
||||||
|
@ -659,7 +658,7 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
|
||||||
void blk_dump_rq_flags(struct request *rq, char *msg)
|
void blk_dump_rq_flags(struct request *rq, char *msg)
|
||||||
{
|
{
|
||||||
printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
|
printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
|
||||||
rq->rq_disk ? rq->rq_disk->disk_name : "?",
|
rq->q->disk ? rq->q->disk->disk_name : "?",
|
||||||
(unsigned long long) rq->cmd_flags);
|
(unsigned long long) rq->cmd_flags);
|
||||||
|
|
||||||
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
|
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
|
||||||
|
@ -712,7 +711,7 @@ static void blk_print_req_error(struct request *req, blk_status_t status)
|
||||||
"%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
|
"%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
|
||||||
"phys_seg %u prio class %u\n",
|
"phys_seg %u prio class %u\n",
|
||||||
blk_status_to_str(status),
|
blk_status_to_str(status),
|
||||||
req->rq_disk ? req->rq_disk->disk_name : "?",
|
req->q->disk ? req->q->disk->disk_name : "?",
|
||||||
blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
|
blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
|
||||||
req->cmd_flags & ~REQ_OP_MASK,
|
req->cmd_flags & ~REQ_OP_MASK,
|
||||||
req->nr_phys_segments,
|
req->nr_phys_segments,
|
||||||
|
@ -853,8 +852,8 @@ static void __blk_account_io_start(struct request *rq)
|
||||||
/* passthrough requests can hold bios that do not have ->bi_bdev set */
|
/* passthrough requests can hold bios that do not have ->bi_bdev set */
|
||||||
if (rq->bio && rq->bio->bi_bdev)
|
if (rq->bio && rq->bio->bi_bdev)
|
||||||
rq->part = rq->bio->bi_bdev;
|
rq->part = rq->bio->bi_bdev;
|
||||||
else
|
else if (rq->q->disk)
|
||||||
rq->part = rq->rq_disk->part0;
|
rq->part = rq->q->disk->part0;
|
||||||
|
|
||||||
part_stat_lock();
|
part_stat_lock();
|
||||||
update_io_ticks(rq->part, jiffies, false);
|
update_io_ticks(rq->part, jiffies, false);
|
||||||
|
@ -1172,7 +1171,6 @@ void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
WARN_ON(!blk_rq_is_passthrough(rq));
|
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||||
|
|
||||||
rq->rq_disk = bd_disk;
|
|
||||||
rq->end_io = done;
|
rq->end_io = done;
|
||||||
|
|
||||||
blk_account_io_start(rq);
|
blk_account_io_start(rq);
|
||||||
|
@ -2902,8 +2900,8 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
|
||||||
if (ret != BLK_STS_OK)
|
if (ret != BLK_STS_OK)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (rq->rq_disk &&
|
if (rq->q->disk &&
|
||||||
should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
|
should_fail_request(rq->q->disk->part0, blk_rq_bytes(rq)))
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
if (blk_crypto_insert_cloned_request(rq))
|
if (blk_crypto_insert_cloned_request(rq))
|
||||||
|
|
|
@ -324,7 +324,7 @@ int blk_dev_init(void);
|
||||||
*/
|
*/
|
||||||
static inline bool blk_do_io_stat(struct request *rq)
|
static inline bool blk_do_io_stat(struct request *rq)
|
||||||
{
|
{
|
||||||
return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
|
return (rq->rq_flags & RQF_IO_STAT) && rq->q->disk;
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
|
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
|
||||||
|
|
|
@ -1505,7 +1505,7 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct request *rq = bd->rq;
|
struct request *rq = bd->rq;
|
||||||
struct amiga_floppy_struct *floppy = rq->rq_disk->private_data;
|
struct amiga_floppy_struct *floppy = rq->q->disk->private_data;
|
||||||
blk_status_t err;
|
blk_status_t err;
|
||||||
|
|
||||||
if (!spin_trylock_irq(&amiflop_lock))
|
if (!spin_trylock_irq(&amiflop_lock))
|
||||||
|
|
|
@ -1502,7 +1502,7 @@ static void setup_req_params( int drive )
|
||||||
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct atari_floppy_struct *floppy = bd->rq->rq_disk->private_data;
|
struct atari_floppy_struct *floppy = bd->rq->q->disk->private_data;
|
||||||
int drive = floppy - unit;
|
int drive = floppy - unit;
|
||||||
int type = floppy->type;
|
int type = floppy->type;
|
||||||
|
|
||||||
|
@ -1538,7 +1538,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
if (!UDT) {
|
if (!UDT) {
|
||||||
Probing = 1;
|
Probing = 1;
|
||||||
UDT = atari_disk_type + StartDiskType[DriveType];
|
UDT = atari_disk_type + StartDiskType[DriveType];
|
||||||
set_capacity(bd->rq->rq_disk, UDT->blocks);
|
set_capacity(bd->rq->q->disk, UDT->blocks);
|
||||||
UD.autoprobe = 1;
|
UD.autoprobe = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1558,7 +1558,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
}
|
}
|
||||||
type = minor2disktype[type].index;
|
type = minor2disktype[type].index;
|
||||||
UDT = &atari_disk_type[type];
|
UDT = &atari_disk_type[type];
|
||||||
set_capacity(bd->rq->rq_disk, UDT->blocks);
|
set_capacity(bd->rq->q->disk, UDT->blocks);
|
||||||
UD.autoprobe = 0;
|
UD.autoprobe = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2259,7 +2259,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
|
||||||
static void floppy_end_request(struct request *req, blk_status_t error)
|
static void floppy_end_request(struct request *req, blk_status_t error)
|
||||||
{
|
{
|
||||||
unsigned int nr_sectors = current_count_sectors;
|
unsigned int nr_sectors = current_count_sectors;
|
||||||
unsigned int drive = (unsigned long)req->rq_disk->private_data;
|
unsigned int drive = (unsigned long)req->q->disk->private_data;
|
||||||
|
|
||||||
/* current_count_sectors can be zero if transfer failed */
|
/* current_count_sectors can be zero if transfer failed */
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -2550,7 +2550,7 @@ static int make_raw_rw_request(void)
|
||||||
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
|
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
set_fdc((long)current_req->rq_disk->private_data);
|
set_fdc((long)current_req->q->disk->private_data);
|
||||||
|
|
||||||
raw_cmd = &default_raw_cmd;
|
raw_cmd = &default_raw_cmd;
|
||||||
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
|
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
|
||||||
|
@ -2792,7 +2792,7 @@ do_request:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
drive = (long)current_req->rq_disk->private_data;
|
drive = (long)current_req->q->disk->private_data;
|
||||||
set_fdc(drive);
|
set_fdc(drive);
|
||||||
reschedule_timeout(current_drive, "redo fd request");
|
reschedule_timeout(current_drive, "redo fd request");
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ TRACE_EVENT(nullb_zone_op,
|
||||||
__entry->op = req_op(cmd->rq);
|
__entry->op = req_op(cmd->rq);
|
||||||
__entry->zone_no = zone_no;
|
__entry->zone_no = zone_no;
|
||||||
__entry->zone_cond = zone_cond;
|
__entry->zone_cond = zone_cond;
|
||||||
__assign_disk_name(__entry->disk, cmd->rq->rq_disk);
|
__assign_disk_name(__entry->disk, cmd->rq->q->disk);
|
||||||
),
|
),
|
||||||
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
|
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
|
||||||
__print_disk_name(__entry->disk),
|
__print_disk_name(__entry->disk),
|
||||||
|
|
|
@ -690,7 +690,7 @@ static void pcd_request(void)
|
||||||
if (!pcd_req && !set_next_request())
|
if (!pcd_req && !set_next_request())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cd = pcd_req->rq_disk->private_data;
|
cd = pcd_req->q->disk->private_data;
|
||||||
if (cd != pcd_current)
|
if (cd != pcd_current)
|
||||||
pcd_bufblk = -1;
|
pcd_bufblk = -1;
|
||||||
pcd_current = cd;
|
pcd_current = cd;
|
||||||
|
|
|
@ -430,7 +430,7 @@ static void run_fsm(void)
|
||||||
int stop = 0;
|
int stop = 0;
|
||||||
|
|
||||||
if (!phase) {
|
if (!phase) {
|
||||||
pd_current = pd_req->rq_disk->private_data;
|
pd_current = pd_req->q->disk->private_data;
|
||||||
pi_current = pd_current->pi;
|
pi_current = pd_current->pi;
|
||||||
phase = do_pd_io_start;
|
phase = do_pd_io_start;
|
||||||
}
|
}
|
||||||
|
@ -492,7 +492,7 @@ static enum action do_pd_io_start(void)
|
||||||
case REQ_OP_WRITE:
|
case REQ_OP_WRITE:
|
||||||
pd_block = blk_rq_pos(pd_req);
|
pd_block = blk_rq_pos(pd_req);
|
||||||
pd_count = blk_rq_cur_sectors(pd_req);
|
pd_count = blk_rq_cur_sectors(pd_req);
|
||||||
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
|
if (pd_block + pd_count > get_capacity(pd_req->q->disk))
|
||||||
return Fail;
|
return Fail;
|
||||||
pd_run = blk_rq_sectors(pd_req);
|
pd_run = blk_rq_sectors(pd_req);
|
||||||
pd_buf = bio_data(pd_req->bio);
|
pd_buf = bio_data(pd_req->bio);
|
||||||
|
|
|
@ -746,12 +746,12 @@ repeat:
|
||||||
if (!pf_req && !set_next_request())
|
if (!pf_req && !set_next_request())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pf_current = pf_req->rq_disk->private_data;
|
pf_current = pf_req->q->disk->private_data;
|
||||||
pf_block = blk_rq_pos(pf_req);
|
pf_block = blk_rq_pos(pf_req);
|
||||||
pf_run = blk_rq_sectors(pf_req);
|
pf_run = blk_rq_sectors(pf_req);
|
||||||
pf_count = blk_rq_cur_sectors(pf_req);
|
pf_count = blk_rq_cur_sectors(pf_req);
|
||||||
|
|
||||||
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
|
if (pf_block + pf_count > get_capacity(pf_req->q->disk)) {
|
||||||
pf_end_request(BLK_STS_IOERR);
|
pf_end_request(BLK_STS_IOERR);
|
||||||
goto repeat;
|
goto repeat;
|
||||||
}
|
}
|
||||||
|
|
|
@ -393,7 +393,7 @@ static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
|
||||||
|
|
||||||
static void rnbd_softirq_done_fn(struct request *rq)
|
static void rnbd_softirq_done_fn(struct request *rq)
|
||||||
{
|
{
|
||||||
struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
|
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
|
||||||
struct rnbd_clt_session *sess = dev->sess;
|
struct rnbd_clt_session *sess = dev->sess;
|
||||||
struct rnbd_iu *iu;
|
struct rnbd_iu *iu;
|
||||||
|
|
||||||
|
@ -1133,7 +1133,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
const struct blk_mq_queue_data *bd)
|
const struct blk_mq_queue_data *bd)
|
||||||
{
|
{
|
||||||
struct request *rq = bd->rq;
|
struct request *rq = bd->rq;
|
||||||
struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
|
struct rnbd_clt_dev *dev = rq->q->disk->private_data;
|
||||||
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
|
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
|
||||||
int err;
|
int err;
|
||||||
blk_status_t ret = BLK_STS_IOERR;
|
blk_status_t ret = BLK_STS_IOERR;
|
||||||
|
|
|
@ -462,7 +462,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
|
||||||
|
|
||||||
static int __send_request(struct request *req)
|
static int __send_request(struct request *req)
|
||||||
{
|
{
|
||||||
struct vdc_port *port = req->rq_disk->private_data;
|
struct vdc_port *port = req->q->disk->private_data;
|
||||||
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
|
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
|
||||||
struct scatterlist sg[MAX_RING_COOKIES];
|
struct scatterlist sg[MAX_RING_COOKIES];
|
||||||
struct vdc_req_entry *rqe;
|
struct vdc_req_entry *rqe;
|
||||||
|
|
|
@ -550,7 +550,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
||||||
return DM_MAPIO_REQUEUE;
|
return DM_MAPIO_REQUEUE;
|
||||||
}
|
}
|
||||||
clone->bio = clone->biotail = NULL;
|
clone->bio = clone->biotail = NULL;
|
||||||
clone->rq_disk = bdev->bd_disk;
|
|
||||||
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||||
*__clone = clone;
|
*__clone = clone;
|
||||||
|
|
||||||
|
|
|
@ -1837,7 +1837,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
|
||||||
/* Reset if the card is in a bad state */
|
/* Reset if the card is in a bad state */
|
||||||
if (!mmc_host_is_spi(mq->card->host) &&
|
if (!mmc_host_is_spi(mq->card->host) &&
|
||||||
err && mmc_blk_reset(md, card->host, type)) {
|
err && mmc_blk_reset(md, card->host, type)) {
|
||||||
pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
|
pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
|
||||||
mqrq->retries = MMC_NO_RETRIES;
|
mqrq->retries = MMC_NO_RETRIES;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,7 @@ void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject)
|
||||||
|
|
||||||
void nvme_should_fail(struct request *req)
|
void nvme_should_fail(struct request *req)
|
||||||
{
|
{
|
||||||
struct gendisk *disk = req->rq_disk;
|
struct gendisk *disk = req->q->disk;
|
||||||
struct nvme_fault_inject *fault_inject = NULL;
|
struct nvme_fault_inject *fault_inject = NULL;
|
||||||
u16 status;
|
u16 status;
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ TRACE_EVENT(nvme_setup_cmd,
|
||||||
__entry->nsid = le32_to_cpu(cmd->common.nsid);
|
__entry->nsid = le32_to_cpu(cmd->common.nsid);
|
||||||
__entry->metadata = !!blk_integrity_rq(req);
|
__entry->metadata = !!blk_integrity_rq(req);
|
||||||
__entry->fctype = cmd->fabrics.fctype;
|
__entry->fctype = cmd->fabrics.fctype;
|
||||||
__assign_disk_name(__entry->disk, req->rq_disk);
|
__assign_disk_name(__entry->disk, req->q->disk);
|
||||||
memcpy(__entry->cdw10, &cmd->common.cdw10,
|
memcpy(__entry->cdw10, &cmd->common.cdw10,
|
||||||
sizeof(__entry->cdw10));
|
sizeof(__entry->cdw10));
|
||||||
),
|
),
|
||||||
|
@ -103,7 +103,7 @@ TRACE_EVENT(nvme_complete_rq,
|
||||||
__entry->retries = nvme_req(req)->retries;
|
__entry->retries = nvme_req(req)->retries;
|
||||||
__entry->flags = nvme_req(req)->flags;
|
__entry->flags = nvme_req(req)->flags;
|
||||||
__entry->status = nvme_req(req)->status;
|
__entry->status = nvme_req(req)->status;
|
||||||
__assign_disk_name(__entry->disk, req->rq_disk);
|
__assign_disk_name(__entry->disk, req->q->disk);
|
||||||
),
|
),
|
||||||
TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x",
|
TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x",
|
||||||
__entry->ctrl_id, __print_disk_name(__entry->disk),
|
__entry->ctrl_id, __print_disk_name(__entry->disk),
|
||||||
|
@ -153,7 +153,7 @@ TRACE_EVENT(nvme_sq,
|
||||||
),
|
),
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
|
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
|
||||||
__assign_disk_name(__entry->disk, req->rq_disk);
|
__assign_disk_name(__entry->disk, req->q->disk);
|
||||||
__entry->qid = nvme_req_qid(req);
|
__entry->qid = nvme_req_qid(req);
|
||||||
__entry->sq_head = le16_to_cpu(sq_head);
|
__entry->sq_head = le16_to_cpu(sq_head);
|
||||||
__entry->sq_tail = sq_tail;
|
__entry->sq_tail = sq_tail;
|
||||||
|
|
|
@ -543,8 +543,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
|
||||||
if (blk_update_request(req, error, bytes))
|
if (blk_update_request(req, error, bytes))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
// XXX:
|
||||||
if (blk_queue_add_random(q))
|
if (blk_queue_add_random(q))
|
||||||
add_disk_randomness(req->rq_disk);
|
add_disk_randomness(req->q->disk);
|
||||||
|
|
||||||
if (!blk_rq_is_passthrough(req)) {
|
if (!blk_rq_is_passthrough(req)) {
|
||||||
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
|
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
|
||||||
|
|
|
@ -30,7 +30,9 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
|
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
|
||||||
|
|
||||||
return rq->rq_disk ? rq->rq_disk->disk_name : NULL;
|
if (!rq->q->disk)
|
||||||
|
return NULL;
|
||||||
|
return rq->q->disk->disk_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
|
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
|
||||||
|
|
|
@ -872,7 +872,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct scsi_device *sdp = cmd->device;
|
struct scsi_device *sdp = cmd->device;
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
||||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||||
unsigned int data_len = 24;
|
unsigned int data_len = 24;
|
||||||
|
@ -908,7 +908,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
|
||||||
{
|
{
|
||||||
struct scsi_device *sdp = cmd->device;
|
struct scsi_device *sdp = cmd->device;
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
||||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||||
u32 data_len = sdp->sector_size;
|
u32 data_len = sdp->sector_size;
|
||||||
|
@ -940,7 +940,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
|
||||||
{
|
{
|
||||||
struct scsi_device *sdp = cmd->device;
|
struct scsi_device *sdp = cmd->device;
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
||||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||||
u32 data_len = sdp->sector_size;
|
u32 data_len = sdp->sector_size;
|
||||||
|
@ -971,7 +971,7 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_device *sdp = cmd->device;
|
struct scsi_device *sdp = cmd->device;
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
||||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||||
|
|
||||||
|
@ -1068,7 +1068,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_device *sdp = cmd->device;
|
struct scsi_device *sdp = cmd->device;
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
struct bio *bio = rq->bio;
|
struct bio *bio = rq->bio;
|
||||||
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
||||||
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||||
|
@ -1116,7 +1116,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
|
||||||
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
|
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
|
|
||||||
/* flush requests don't perform I/O, zero the S/G table */
|
/* flush requests don't perform I/O, zero the S/G table */
|
||||||
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
|
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
|
||||||
|
@ -1215,7 +1215,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_device *sdp = cmd->device;
|
struct scsi_device *sdp = cmd->device;
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
|
||||||
sector_t threshold;
|
sector_t threshold;
|
||||||
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
|
||||||
|
@ -1236,7 +1236,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
|
if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
|
||||||
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
|
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -1331,7 +1331,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
|
||||||
|
|
||||||
switch (req_op(rq)) {
|
switch (req_op(rq)) {
|
||||||
case REQ_OP_DISCARD:
|
case REQ_OP_DISCARD:
|
||||||
switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
|
switch (scsi_disk(rq->q->disk)->provisioning_mode) {
|
||||||
case SD_LBP_UNMAP:
|
case SD_LBP_UNMAP:
|
||||||
return sd_setup_unmap_cmnd(cmd);
|
return sd_setup_unmap_cmnd(cmd);
|
||||||
case SD_LBP_WS16:
|
case SD_LBP_WS16:
|
||||||
|
@ -1917,7 +1917,7 @@ static const struct block_device_operations sd_fops = {
|
||||||
**/
|
**/
|
||||||
static void sd_eh_reset(struct scsi_cmnd *scmd)
|
static void sd_eh_reset(struct scsi_cmnd *scmd)
|
||||||
{
|
{
|
||||||
struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
|
||||||
|
|
||||||
/* New SCSI EH run, reset gate variable */
|
/* New SCSI EH run, reset gate variable */
|
||||||
sdkp->ignore_medium_access_errors = false;
|
sdkp->ignore_medium_access_errors = false;
|
||||||
|
@ -1937,7 +1937,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
|
||||||
**/
|
**/
|
||||||
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
|
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
|
||||||
{
|
{
|
||||||
struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
|
||||||
struct scsi_device *sdev = scmd->device;
|
struct scsi_device *sdev = scmd->device;
|
||||||
|
|
||||||
if (!scsi_device_online(sdev) ||
|
if (!scsi_device_online(sdev) ||
|
||||||
|
@ -2034,7 +2034,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
||||||
unsigned int resid;
|
unsigned int resid;
|
||||||
struct scsi_sense_hdr sshdr;
|
struct scsi_sense_hdr sshdr;
|
||||||
struct request *req = scsi_cmd_to_rq(SCpnt);
|
struct request *req = scsi_cmd_to_rq(SCpnt);
|
||||||
struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(req->q->disk);
|
||||||
int sense_valid = 0;
|
int sense_valid = 0;
|
||||||
int sense_deferred = 0;
|
int sense_deferred = 0;
|
||||||
|
|
||||||
|
|
|
@ -244,7 +244,7 @@ out:
|
||||||
static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
|
static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
sector_t sector = blk_rq_pos(rq);
|
sector_t sector = blk_rq_pos(rq);
|
||||||
|
|
||||||
if (!sd_is_zoned(sdkp))
|
if (!sd_is_zoned(sdkp))
|
||||||
|
@ -322,7 +322,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
|
||||||
unsigned int nr_blocks)
|
unsigned int nr_blocks)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
unsigned int wp_offset, zno = blk_rq_zone_no(rq);
|
unsigned int wp_offset, zno = blk_rq_zone_no(rq);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
@ -388,7 +388,7 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
sector_t sector = blk_rq_pos(rq);
|
sector_t sector = blk_rq_pos(rq);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
sector_t block = sectors_to_logical(sdkp->device, sector);
|
sector_t block = sectors_to_logical(sdkp->device, sector);
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
|
@ -443,7 +443,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
|
||||||
{
|
{
|
||||||
int result = cmd->result;
|
int result = cmd->result;
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
|
||||||
unsigned int zno = blk_rq_zone_no(rq);
|
unsigned int zno = blk_rq_zone_no(rq);
|
||||||
enum req_opf op = req_op(rq);
|
enum req_opf op = req_op(rq);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -335,7 +335,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
|
||||||
int block_sectors = 0;
|
int block_sectors = 0;
|
||||||
long error_sector;
|
long error_sector;
|
||||||
struct request *rq = scsi_cmd_to_rq(SCpnt);
|
struct request *rq = scsi_cmd_to_rq(SCpnt);
|
||||||
struct scsi_cd *cd = scsi_cd(rq->rq_disk);
|
struct scsi_cd *cd = scsi_cd(rq->q->disk);
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
|
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
|
||||||
|
@ -402,7 +402,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
|
||||||
ret = scsi_alloc_sgtables(SCpnt);
|
ret = scsi_alloc_sgtables(SCpnt);
|
||||||
if (ret != BLK_STS_OK)
|
if (ret != BLK_STS_OK)
|
||||||
return ret;
|
return ret;
|
||||||
cd = scsi_cd(rq->rq_disk);
|
cd = scsi_cd(rq->q->disk);
|
||||||
|
|
||||||
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
|
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
|
||||||
"Doing sr request, block = %d\n", block));
|
"Doing sr request, block = %d\n", block));
|
||||||
|
|
|
@ -528,7 +528,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
|
||||||
if (!rq || !scsi_prot_sg_count(sc))
|
if (!rq || !scsi_prot_sg_count(sc))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bi = blk_get_integrity(rq->rq_disk);
|
bi = blk_get_integrity(rq->q->disk);
|
||||||
|
|
||||||
if (sc->sc_data_direction == DMA_TO_DEVICE)
|
if (sc->sc_data_direction == DMA_TO_DEVICE)
|
||||||
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
|
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
|
||||||
|
|
|
@ -551,7 +551,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
|
||||||
/* Did this command access the last sector? */
|
/* Did this command access the last sector? */
|
||||||
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
|
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
|
||||||
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
|
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
|
||||||
disk = scsi_cmd_to_rq(srb)->rq_disk;
|
disk = scsi_cmd_to_rq(srb)->q->disk;
|
||||||
if (!disk)
|
if (!disk)
|
||||||
goto done;
|
goto done;
|
||||||
sdkp = scsi_disk(disk);
|
sdkp = scsi_disk(disk);
|
||||||
|
|
|
@ -100,7 +100,6 @@ struct request {
|
||||||
struct request *rq_next;
|
struct request *rq_next;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct gendisk *rq_disk;
|
|
||||||
struct block_device *part;
|
struct block_device *part;
|
||||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||||
/* Time that the first bio started allocating this request. */
|
/* Time that the first bio started allocating this request. */
|
||||||
|
@ -890,9 +889,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
|
||||||
rq->__data_len = bio->bi_iter.bi_size;
|
rq->__data_len = bio->bi_iter.bi_size;
|
||||||
rq->bio = rq->biotail = bio;
|
rq->bio = rq->biotail = bio;
|
||||||
rq->ioprio = bio_prio(bio);
|
rq->ioprio = bio_prio(bio);
|
||||||
|
|
||||||
if (bio->bi_bdev)
|
|
||||||
rq->rq_disk = bio->bi_bdev->bd_disk;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
|
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
|
@ -164,7 +164,7 @@ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
|
||||||
{
|
{
|
||||||
struct request *rq = scsi_cmd_to_rq(cmd);
|
struct request *rq = scsi_cmd_to_rq(cmd);
|
||||||
|
|
||||||
return *(struct scsi_driver **)rq->rq_disk->private_data;
|
return *(struct scsi_driver **)rq->q->disk->private_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
void scsi_done(struct scsi_cmnd *cmd);
|
void scsi_done(struct scsi_cmnd *cmd);
|
||||||
|
|
|
@ -275,9 +275,9 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
|
||||||
do { \
|
do { \
|
||||||
struct request *__rq = scsi_cmd_to_rq((scmd)); \
|
struct request *__rq = scsi_cmd_to_rq((scmd)); \
|
||||||
\
|
\
|
||||||
if (__rq->rq_disk) \
|
if (__rq->q->disk) \
|
||||||
sdev_dbg((scmd)->device, "[%s] " fmt, \
|
sdev_dbg((scmd)->device, "[%s] " fmt, \
|
||||||
__rq->rq_disk->disk_name, ##a); \
|
__rq->q->disk->disk_name, ##a); \
|
||||||
else \
|
else \
|
||||||
sdev_dbg((scmd)->device, fmt, ##a); \
|
sdev_dbg((scmd)->device, fmt, ##a); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -85,7 +85,7 @@ TRACE_EVENT(block_rq_requeue,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
|
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
|
||||||
__entry->sector = blk_rq_trace_sector(rq);
|
__entry->sector = blk_rq_trace_sector(rq);
|
||||||
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
|
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ TRACE_EVENT(block_rq_complete,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
|
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
|
||||||
__entry->sector = blk_rq_pos(rq);
|
__entry->sector = blk_rq_pos(rq);
|
||||||
__entry->nr_sector = nr_bytes >> 9;
|
__entry->nr_sector = nr_bytes >> 9;
|
||||||
__entry->error = blk_status_to_errno(error);
|
__entry->error = blk_status_to_errno(error);
|
||||||
|
@ -161,7 +161,7 @@ DECLARE_EVENT_CLASS(block_rq,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
|
__entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
|
||||||
__entry->sector = blk_rq_trace_sector(rq);
|
__entry->sector = blk_rq_trace_sector(rq);
|
||||||
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
|
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
|
||||||
__entry->bytes = blk_rq_bytes(rq);
|
__entry->bytes = blk_rq_bytes(rq);
|
||||||
|
@ -512,7 +512,7 @@ TRACE_EVENT(block_rq_remap,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = disk_devt(rq->rq_disk);
|
__entry->dev = disk_devt(rq->q->disk);
|
||||||
__entry->sector = blk_rq_pos(rq);
|
__entry->sector = blk_rq_pos(rq);
|
||||||
__entry->nr_sector = blk_rq_sectors(rq);
|
__entry->nr_sector = blk_rq_sectors(rq);
|
||||||
__entry->old_dev = dev;
|
__entry->old_dev = dev;
|
||||||
|
|
|
@ -1045,7 +1045,7 @@ static void blk_add_trace_rq_remap(void *ignore, struct request *rq, dev_t dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
r.device_from = cpu_to_be32(dev);
|
r.device_from = cpu_to_be32(dev);
|
||||||
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
|
r.device_to = cpu_to_be32(disk_devt(rq->q->disk));
|
||||||
r.sector_from = cpu_to_be64(from);
|
r.sector_from = cpu_to_be64(from);
|
||||||
|
|
||||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
||||||
|
|
Loading…
Add table
Reference in a new issue