io_uring-6.14-20250214
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmevfEIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpojbEADB9wm0H+iYatPICnhl2tmO+PPghk9X7brt Y5G417G5+jw7Y8Sh0f+IfLnWXLj8ce17SXmTPnDvkZebjxejfki5OOoXQ0aLN3av KC5Uc4O/XPwPIKOzeHxmN2lSTjtKk95DCsKNuUnZ0UoAp+eoXo5+3EfPIkwS9ddW VlxWWeN7+xQio4j7Xn9GYOwy1Yl7F+vg73o3z1vFzM5kqUxylKoK7QG9B3D+yIbM hdLod+1hYQp/nJHwV996T0NRXKsbxWbPHShyWq8zqf2UWd6rqvLwze8pRXvQ1msP ZZCa0od3v7CgQmuJP2DVMO0XCPDgxqWnnBENI8hXmzj6r/K/LuJtF0OO22+9avKI PnYdY+9Lw+zGamjcShW6SFHDnSNRUImKpibehpM7+BRKe1kPnD75M9kk6zvNhSIa fA+h9PZ0Cjrm1kfs3nQRSPAa0CxrgNRyXaCRqX4UCXD+SSQL5BBREf9CO95/SbHg nmrRAGnbq2a2H4IGgVRqgqnn4dIeJRlB/q+I9BhJK/dJAK2w2QDgBuyWREqsRsTp DtjGudpDyJH60+Mpmq61NWIJv/1m6yvsvgIkN5U1LIXB47ihYuO4hUYxW4WJU+YR XMv8Y2nsX1WhhFGYZ77jFhWGI25u2v1tY8Yw4/UZrUDovJXe4cl7J1aPTB9m21la Zf2Bb6elCA== =+MSk -----END PGP SIGNATURE----- Merge tag 'io_uring-6.14-20250214' of git://git.kernel.dk/linux Pull io_uring fixes from Jens Axboe: - fixes for a potential data corruption issue with IORING_OP_URING_CMD, where not all the SQE data is stable. Will be revisited in the future, for now it ends up with just always copying it beyond prep to provide the same guarantees as all other opcodes - make the waitid opcode setup async data like any other opcodes (no real fix here, just a consistency thing) - fix for waitid io_tw_state abuse - when a buffer group is type is changed, do so by allocating a new buffer group entry and discard the old one, rather than migrating * tag 'io_uring-6.14-20250214' of git://git.kernel.dk/linux: io_uring/uring_cmd: unconditionally copy SQEs at prep time io_uring/waitid: setup async data in the prep handler io_uring/uring_cmd: remove dead req_has_async_data() check io_uring/uring_cmd: switch sqe to async_data on EAGAIN io_uring/uring_cmd: don't assume io_uring_cmd_data layout io_uring/kbuf: reallocate buf lists on upgrade io_uring/waitid: don't abuse io_tw_state
This commit is contained in:
commit
ea71732474
3 changed files with 30 additions and 32 deletions
|
@ -415,6 +415,13 @@ void io_destroy_buffers(struct io_ring_ctx *ctx)
|
|||
}
|
||||
}
|
||||
|
||||
static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
|
||||
{
|
||||
scoped_guard(mutex, &ctx->mmap_lock)
|
||||
WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
|
||||
io_put_bl(ctx, bl);
|
||||
}
|
||||
|
||||
int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
|
||||
|
@ -636,12 +643,13 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
|
|||
/* if mapped buffer ring OR classic exists, don't allow */
|
||||
if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
|
||||
return -EEXIST;
|
||||
} else {
|
||||
free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
|
||||
if (!bl)
|
||||
return -ENOMEM;
|
||||
io_destroy_bl(ctx, bl);
|
||||
}
|
||||
|
||||
free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
|
||||
if (!bl)
|
||||
return -ENOMEM;
|
||||
|
||||
mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT;
|
||||
ring_size = flex_array_size(br, bufs, reg.ring_entries);
|
||||
|
||||
|
|
|
@ -54,9 +54,6 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
|
|||
continue;
|
||||
|
||||
if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
|
||||
/* ->sqe isn't available if no async data */
|
||||
if (!req_has_async_data(req))
|
||||
cmd->sqe = NULL;
|
||||
file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
|
||||
IO_URING_F_COMPLETE_DEFER);
|
||||
ret = true;
|
||||
|
@ -179,12 +176,13 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
|
|||
return -ENOMEM;
|
||||
cache->op_data = NULL;
|
||||
|
||||
if (!(req->flags & REQ_F_FORCE_ASYNC)) {
|
||||
/* defer memcpy until we need it */
|
||||
ioucmd->sqe = sqe;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally cache the SQE for now - this is only needed for
|
||||
* requests that go async, but prep handlers must ensure that any
|
||||
* sqe data is stable beyond prep. Since uring_cmd is special in
|
||||
* that it doesn't read in per-op data, play it safe and ensure that
|
||||
* any SQE data is stable beyond prep. This can later get relaxed.
|
||||
*/
|
||||
memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
|
||||
ioucmd->sqe = cache->sqes;
|
||||
return 0;
|
||||
|
@ -249,16 +247,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
|
|||
}
|
||||
|
||||
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
|
||||
if (ret == -EAGAIN) {
|
||||
struct io_uring_cmd_data *cache = req->async_data;
|
||||
|
||||
if (ioucmd->sqe != (void *) cache)
|
||||
memcpy(cache->sqes, ioucmd->sqe, uring_sqe_size(req->ctx));
|
||||
return -EAGAIN;
|
||||
} else if (ret == -EIOCBQUEUED) {
|
||||
return -EIOCBQUEUED;
|
||||
}
|
||||
|
||||
if (ret == -EAGAIN || ret == -EIOCBQUEUED)
|
||||
return ret;
|
||||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
io_req_uring_cleanup(req, issue_flags);
|
||||
|
|
|
@ -118,7 +118,6 @@ static int io_waitid_finish(struct io_kiocb *req, int ret)
|
|||
static void io_waitid_complete(struct io_kiocb *req, int ret)
|
||||
{
|
||||
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
|
||||
struct io_tw_state ts = {};
|
||||
|
||||
/* anyone completing better be holding a reference */
|
||||
WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
|
||||
|
@ -131,7 +130,6 @@ static void io_waitid_complete(struct io_kiocb *req, int ret)
|
|||
if (ret < 0)
|
||||
req_set_fail(req);
|
||||
io_req_set_res(req, ret, 0);
|
||||
io_req_task_complete(req, &ts);
|
||||
}
|
||||
|
||||
static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
||||
|
@ -153,6 +151,7 @@ static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
|
|||
list_del_init(&iwa->wo.child_wait.entry);
|
||||
spin_unlock_irq(&iw->head->lock);
|
||||
io_waitid_complete(req, -ECANCELED);
|
||||
io_req_queue_tw_complete(req, -ECANCELED);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -258,6 +257,7 @@ static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts)
|
|||
}
|
||||
|
||||
io_waitid_complete(req, ret);
|
||||
io_req_task_complete(req, ts);
|
||||
}
|
||||
|
||||
static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
|
||||
|
@ -285,10 +285,16 @@ static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
|
|||
int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
||||
{
|
||||
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
|
||||
struct io_waitid_async *iwa;
|
||||
|
||||
if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
iwa = io_uring_alloc_async_data(NULL, req);
|
||||
if (!unlikely(iwa))
|
||||
return -ENOMEM;
|
||||
iwa->req = req;
|
||||
|
||||
iw->which = READ_ONCE(sqe->len);
|
||||
iw->upid = READ_ONCE(sqe->fd);
|
||||
iw->options = READ_ONCE(sqe->file_index);
|
||||
|
@ -299,16 +305,10 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||
int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
|
||||
{
|
||||
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
|
||||
struct io_waitid_async *iwa = req->async_data;
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
struct io_waitid_async *iwa;
|
||||
int ret;
|
||||
|
||||
iwa = io_uring_alloc_async_data(NULL, req);
|
||||
if (!iwa)
|
||||
return -ENOMEM;
|
||||
|
||||
iwa->req = req;
|
||||
|
||||
ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
|
||||
iw->options, NULL);
|
||||
if (ret)
|
||||
|
|
Loading…
Add table
Reference in a new issue