eventfd: provide a eventfd_signal_mask() helper
This is identical to eventfd_signal(), but it allows the caller to pass in a mask to be used for the poll wakeup key. The use case is avoiding repeated multishot triggers if we have a dependency between eventfd and io_uring. If we setup an eventfd context and register that as the io_uring eventfd, and at the same time queue a multishot poll request for the eventfd context, then any CQE posted will repeatedly trigger the multishot request until it terminates when the CQ ring overflows. In preparation for io_uring detecting this circular dependency, add the mentioned helper so that io_uring can pass in EPOLL_URING as part of the poll wakeup key. Cc: stable@vger.kernel.org # 6.0 [axboe: fold in !CONFIG_EVENTFD fix from Zhang Qilong] Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
caf1aeaffc
commit
03e02acda8
2 changed files with 28 additions and 16 deletions
37
fs/eventfd.c
37
fs/eventfd.c
|
@ -43,21 +43,7 @@ struct eventfd_ctx {
|
||||||
int id;
|
int id;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask)
|
||||||
* eventfd_signal - Adds @n to the eventfd counter.
|
|
||||||
* @ctx: [in] Pointer to the eventfd context.
|
|
||||||
* @n: [in] Value of the counter to be added to the eventfd internal counter.
|
|
||||||
* The value cannot be negative.
|
|
||||||
*
|
|
||||||
* This function is supposed to be called by the kernel in paths that do not
|
|
||||||
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
|
||||||
* value, and we signal this as overflow condition by returning a EPOLLERR
|
|
||||||
* to poll(2).
|
|
||||||
*
|
|
||||||
* Returns the amount by which the counter was incremented. This will be less
|
|
||||||
* than @n if the counter has overflowed.
|
|
||||||
*/
|
|
||||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -78,12 +64,31 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
||||||
n = ULLONG_MAX - ctx->count;
|
n = ULLONG_MAX - ctx->count;
|
||||||
ctx->count += n;
|
ctx->count += n;
|
||||||
if (waitqueue_active(&ctx->wqh))
|
if (waitqueue_active(&ctx->wqh))
|
||||||
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
|
wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
|
||||||
current->in_eventfd = 0;
|
current->in_eventfd = 0;
|
||||||
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
||||||
|
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* eventfd_signal - Adds @n to the eventfd counter.
|
||||||
|
* @ctx: [in] Pointer to the eventfd context.
|
||||||
|
* @n: [in] Value of the counter to be added to the eventfd internal counter.
|
||||||
|
* The value cannot be negative.
|
||||||
|
*
|
||||||
|
* This function is supposed to be called by the kernel in paths that do not
|
||||||
|
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
|
||||||
|
* value, and we signal this as overflow condition by returning a EPOLLERR
|
||||||
|
* to poll(2).
|
||||||
|
*
|
||||||
|
* Returns the amount by which the counter was incremented. This will be less
|
||||||
|
* than @n if the counter has overflowed.
|
||||||
|
*/
|
||||||
|
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
|
||||||
|
{
|
||||||
|
return eventfd_signal_mask(ctx, n, 0);
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(eventfd_signal);
|
EXPORT_SYMBOL_GPL(eventfd_signal);
|
||||||
|
|
||||||
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
|
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
|
||||||
|
|
|
@ -40,6 +40,7 @@ struct file *eventfd_fget(int fd);
|
||||||
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
||||||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
||||||
|
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask);
|
||||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||||
__u64 *cnt);
|
__u64 *cnt);
|
||||||
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
|
||||||
|
@ -66,6 +67,12 @@ static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
|
||||||
|
unsigned mask)
|
||||||
|
{
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue