init_once is called when an object doesn't come from the cache, and hence needs initial clearing of certain members. While the whole struct could get cleared by memset() in that case, a few of the cache members are large enough that this may cause unnecessary overhead if the caches used aren't large enough to satisfy the workload. For those cases, some churn of kmalloc+kfree is to be expected. Ensure that the 3 users that need clearing put the members they need cleared at the start of the struct, and wrap the rest of the struct in a struct group so the offset is known. While at it, improve the interaction with KASAN such that when/if KASAN writes to members inside the struct that should be retained over caching, it won't trip over itself. For rw and net, the retaining of the iovec over caching is disabled if KASAN is enabled. A helper will free and clear those members in that case. Signed-off-by: Jens Axboe <axboe@kernel.dk>
67 lines
2.2 KiB
C
67 lines
2.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/net.h>
|
|
#include <linux/uio.h>
|
|
|
|
struct io_async_msghdr {
|
|
#if defined(CONFIG_NET)
|
|
struct iovec *free_iov;
|
|
/* points to an allocated iov, if NULL we use fast_iov instead */
|
|
int free_iov_nr;
|
|
struct_group(clear,
|
|
int namelen;
|
|
struct iovec fast_iov;
|
|
__kernel_size_t controllen;
|
|
__kernel_size_t payloadlen;
|
|
struct sockaddr __user *uaddr;
|
|
struct msghdr msg;
|
|
struct sockaddr_storage addr;
|
|
);
|
|
#else
|
|
struct_group(clear);
|
|
#endif
|
|
};
|
|
|
|
#if defined(CONFIG_NET)
|
|
|
|
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
|
|
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_send(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_recv(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
void io_sendrecv_fail(struct io_kiocb *req);
|
|
|
|
int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_accept(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_socket(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_connect(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
|
|
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
void io_send_zc_cleanup(struct io_kiocb *req);
|
|
|
|
int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_bind(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
|
|
int io_listen(struct io_kiocb *req, unsigned int issue_flags);
|
|
|
|
void io_netmsg_cache_free(const void *entry);
|
|
#else
|
|
static inline void io_netmsg_cache_free(const void *entry)
|
|
{
|
|
}
|
|
#endif
|