io_uring: add alloc_cache.c
Avoid inlining all and everything from alloc_cache.h and move cold bits into a new file. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de> Link: https://lore.kernel.org/r/06984c6cd58e703f7cfae5ab3067912f9f635a06.1738087204.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
16ac51a0a7
commit
d19af0e936
3 changed files with 54 additions and 36 deletions
|
@ -13,7 +13,7 @@ obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
|
|||
sync.o msg_ring.o advise.o openclose.o \
|
||||
epoll.o statx.o timeout.o fdinfo.o \
|
||||
cancel.o waitid.o register.o \
|
||||
truncate.o memmap.o
|
||||
truncate.o memmap.o alloc_cache.o
|
||||
obj-$(CONFIG_IO_WQ) += io-wq.o
|
||||
obj-$(CONFIG_FUTEX) += futex.o
|
||||
obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o
|
||||
|
|
44
io_uring/alloc_cache.c
Normal file
44
io_uring/alloc_cache.c
Normal file
|
@ -0,0 +1,44 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "alloc_cache.h"
|
||||
|
||||
void io_alloc_cache_free(struct io_alloc_cache *cache,
|
||||
void (*free)(const void *))
|
||||
{
|
||||
void *entry;
|
||||
|
||||
if (!cache->entries)
|
||||
return;
|
||||
|
||||
while ((entry = io_alloc_cache_get(cache)) != NULL)
|
||||
free(entry);
|
||||
|
||||
kvfree(cache->entries);
|
||||
cache->entries = NULL;
|
||||
}
|
||||
|
||||
/* returns false if the cache was initialized properly */
|
||||
bool io_alloc_cache_init(struct io_alloc_cache *cache,
|
||||
unsigned max_nr, unsigned int size,
|
||||
unsigned int init_bytes)
|
||||
{
|
||||
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
|
||||
if (!cache->entries)
|
||||
return true;
|
||||
|
||||
cache->nr_cached = 0;
|
||||
cache->max_cached = max_nr;
|
||||
cache->elem_size = size;
|
||||
cache->init_clear = init_bytes;
|
||||
return false;
|
||||
}
|
||||
|
||||
void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp)
|
||||
{
|
||||
void *obj;
|
||||
|
||||
obj = kmalloc(cache->elem_size, gfp);
|
||||
if (obj && cache->init_clear)
|
||||
memset(obj, 0, cache->init_clear);
|
||||
return obj;
|
||||
}
|
|
@ -8,6 +8,14 @@
|
|||
*/
|
||||
#define IO_ALLOC_CACHE_MAX 128
|
||||
|
||||
void io_alloc_cache_free(struct io_alloc_cache *cache,
|
||||
void (*free)(const void *));
|
||||
bool io_alloc_cache_init(struct io_alloc_cache *cache,
|
||||
unsigned max_nr, unsigned int size,
|
||||
unsigned int init_bytes);
|
||||
|
||||
void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
|
||||
|
||||
static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_KASAN)) {
|
||||
|
@ -57,41 +65,7 @@ static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
|
|||
obj = io_alloc_cache_get(cache);
|
||||
if (obj)
|
||||
return obj;
|
||||
|
||||
obj = kmalloc(cache->elem_size, gfp);
|
||||
if (obj && cache->init_clear)
|
||||
memset(obj, 0, cache->init_clear);
|
||||
return obj;
|
||||
return io_cache_alloc_new(cache, gfp);
|
||||
}
|
||||
|
||||
/* returns false if the cache was initialized properly */
|
||||
static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
|
||||
unsigned max_nr, unsigned int size,
|
||||
unsigned int init_bytes)
|
||||
{
|
||||
cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
|
||||
if (cache->entries) {
|
||||
cache->nr_cached = 0;
|
||||
cache->max_cached = max_nr;
|
||||
cache->elem_size = size;
|
||||
cache->init_clear = init_bytes;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
|
||||
void (*free)(const void *))
|
||||
{
|
||||
void *entry;
|
||||
|
||||
if (!cache->entries)
|
||||
return;
|
||||
|
||||
while ((entry = io_alloc_cache_get(cache)) != NULL)
|
||||
free(entry);
|
||||
|
||||
kvfree(cache->entries);
|
||||
cache->entries = NULL;
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Add table
Reference in a new issue