io_uring: inline io_read()'s iovec freeing
io_read() has not the simpliest control flow with a lot of jumps and it's hard to read. One of those is a out_free: label, which frees iovec. However, from the middle of io_read() iovec is NULL'ed and so kfree(iovec) is no-op, it leaves us with two place where we can inline it and further clean up the code. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7335e3bf9d
commit
5ea5dd4584
1 changed files with 13 additions and 18 deletions
|
@ -3530,14 +3530,18 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
|
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret)) {
|
||||||
goto out_free;
|
kfree(iovec);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = io_iter_do_read(req, iter);
|
ret = io_iter_do_read(req, iter);
|
||||||
|
|
||||||
if (ret == -EIOCBQUEUED) {
|
if (ret == -EIOCBQUEUED) {
|
||||||
ret = 0;
|
/* it's faster to check here then delegate to kfree */
|
||||||
goto out_free;
|
if (iovec)
|
||||||
|
kfree(iovec);
|
||||||
|
return 0;
|
||||||
} else if (ret == -EAGAIN) {
|
} else if (ret == -EAGAIN) {
|
||||||
/* IOPOLL retry should happen for io-wq threads */
|
/* IOPOLL retry should happen for io-wq threads */
|
||||||
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
|
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
|
||||||
|
@ -3560,8 +3564,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
|
||||||
return ret2;
|
return ret2;
|
||||||
|
|
||||||
rw = req->async_data;
|
rw = req->async_data;
|
||||||
/* it's copied and will be cleaned with ->io */
|
|
||||||
iovec = NULL;
|
|
||||||
/* now use our persistent iterator, if we aren't already */
|
/* now use our persistent iterator, if we aren't already */
|
||||||
iter = &rw->iter;
|
iter = &rw->iter;
|
||||||
retry:
|
retry:
|
||||||
|
@ -3580,21 +3582,14 @@ retry:
|
||||||
* do, then just retry at the new offset.
|
* do, then just retry at the new offset.
|
||||||
*/
|
*/
|
||||||
ret = io_iter_do_read(req, iter);
|
ret = io_iter_do_read(req, iter);
|
||||||
if (ret == -EIOCBQUEUED) {
|
if (ret == -EIOCBQUEUED)
|
||||||
ret = 0;
|
return 0;
|
||||||
goto out_free;
|
/* we got some bytes, but not all. retry. */
|
||||||
} else if (ret > 0 && ret < io_size) {
|
if (ret > 0 && ret < io_size)
|
||||||
/* we got some bytes, but not all. retry. */
|
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
|
||||||
done:
|
done:
|
||||||
kiocb_done(kiocb, ret, cs);
|
kiocb_done(kiocb, ret, cs);
|
||||||
ret = 0;
|
return 0;
|
||||||
out_free:
|
|
||||||
/* it's reportedly faster than delegating the null check to kfree() */
|
|
||||||
if (iovec)
|
|
||||||
kfree(iovec);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||||
|
|
Loading…
Add table
Reference in a new issue