net: Use nested-BH locking for napi_alloc_cache.
napi_alloc_cache is a per-CPU variable and relies on disabled BH for its locking. Without per-CPU locking in local_bh_disable() on PREEMPT_RT this data structure requires explicit locking. Add a local_lock_t to the data structure and use local_lock_nested_bh() for locking. This change adds only lockdep coverage and does not alter the functional behaviour for !PREEMPT_RT. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://patch.msgid.link/20240620132727.660738-5-bigeasy@linutronix.de Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
43d7ca2907
commit
bdacf3e349
1 changed files with 24 additions and 5 deletions
|
@ -277,6 +277,7 @@ static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct napi_alloc_cache {
|
struct napi_alloc_cache {
|
||||||
|
local_lock_t bh_lock;
|
||||||
struct page_frag_cache page;
|
struct page_frag_cache page;
|
||||||
struct page_frag_1k page_small;
|
struct page_frag_1k page_small;
|
||||||
unsigned int skb_count;
|
unsigned int skb_count;
|
||||||
|
@ -284,7 +285,9 @@ struct napi_alloc_cache {
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
|
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
|
||||||
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
|
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = {
|
||||||
|
.bh_lock = INIT_LOCAL_LOCK(bh_lock),
|
||||||
|
};
|
||||||
|
|
||||||
/* Double check that napi_get_frags() allocates skbs with
|
/* Double check that napi_get_frags() allocates skbs with
|
||||||
* skb->head being backed by slab, not a page fragment.
|
* skb->head being backed by slab, not a page fragment.
|
||||||
|
@ -306,11 +309,16 @@ void napi_get_frags_check(struct napi_struct *napi)
|
||||||
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
|
void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
|
||||||
{
|
{
|
||||||
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||||||
|
void *data;
|
||||||
|
|
||||||
fragsz = SKB_DATA_ALIGN(fragsz);
|
fragsz = SKB_DATA_ALIGN(fragsz);
|
||||||
|
|
||||||
return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
|
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
|
data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
|
||||||
align_mask);
|
align_mask);
|
||||||
|
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
|
return data;
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__napi_alloc_frag_align);
|
EXPORT_SYMBOL(__napi_alloc_frag_align);
|
||||||
|
|
||||||
|
@ -338,16 +346,20 @@ static struct sk_buff *napi_skb_cache_get(void)
|
||||||
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
|
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
if (unlikely(!nc->skb_count)) {
|
if (unlikely(!nc->skb_count)) {
|
||||||
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
|
nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
|
||||||
GFP_ATOMIC,
|
GFP_ATOMIC,
|
||||||
NAPI_SKB_CACHE_BULK,
|
NAPI_SKB_CACHE_BULK,
|
||||||
nc->skb_cache);
|
nc->skb_cache);
|
||||||
if (unlikely(!nc->skb_count))
|
if (unlikely(!nc->skb_count)) {
|
||||||
|
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
skb = nc->skb_cache[--nc->skb_count];
|
skb = nc->skb_cache[--nc->skb_count];
|
||||||
|
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
|
kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache));
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
|
@ -740,9 +752,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
|
||||||
pfmemalloc = nc->pfmemalloc;
|
pfmemalloc = nc->pfmemalloc;
|
||||||
} else {
|
} else {
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
|
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
|
|
||||||
nc = this_cpu_ptr(&napi_alloc_cache.page);
|
nc = this_cpu_ptr(&napi_alloc_cache.page);
|
||||||
data = page_frag_alloc(nc, len, gfp_mask);
|
data = page_frag_alloc(nc, len, gfp_mask);
|
||||||
pfmemalloc = nc->pfmemalloc;
|
pfmemalloc = nc->pfmemalloc;
|
||||||
|
|
||||||
|
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -806,11 +822,11 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
|
||||||
goto skb_success;
|
goto skb_success;
|
||||||
}
|
}
|
||||||
|
|
||||||
nc = this_cpu_ptr(&napi_alloc_cache);
|
|
||||||
|
|
||||||
if (sk_memalloc_socks())
|
if (sk_memalloc_socks())
|
||||||
gfp_mask |= __GFP_MEMALLOC;
|
gfp_mask |= __GFP_MEMALLOC;
|
||||||
|
|
||||||
|
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
|
nc = this_cpu_ptr(&napi_alloc_cache);
|
||||||
if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
|
if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
|
||||||
/* we are artificially inflating the allocation size, but
|
/* we are artificially inflating the allocation size, but
|
||||||
* that is not as bad as it may look like, as:
|
* that is not as bad as it may look like, as:
|
||||||
|
@ -832,6 +848,7 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len)
|
||||||
data = page_frag_alloc(&nc->page, len, gfp_mask);
|
data = page_frag_alloc(&nc->page, len, gfp_mask);
|
||||||
pfmemalloc = nc->page.pfmemalloc;
|
pfmemalloc = nc->page.pfmemalloc;
|
||||||
}
|
}
|
||||||
|
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
|
|
||||||
if (unlikely(!data))
|
if (unlikely(!data))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -1431,6 +1448,7 @@ static void napi_skb_cache_put(struct sk_buff *skb)
|
||||||
if (!kasan_mempool_poison_object(skb))
|
if (!kasan_mempool_poison_object(skb))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
local_lock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
nc->skb_cache[nc->skb_count++] = skb;
|
nc->skb_cache[nc->skb_count++] = skb;
|
||||||
|
|
||||||
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
|
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
|
||||||
|
@ -1442,6 +1460,7 @@ static void napi_skb_cache_put(struct sk_buff *skb)
|
||||||
nc->skb_cache + NAPI_SKB_CACHE_HALF);
|
nc->skb_cache + NAPI_SKB_CACHE_HALF);
|
||||||
nc->skb_count = NAPI_SKB_CACHE_HALF;
|
nc->skb_count = NAPI_SKB_CACHE_HALF;
|
||||||
}
|
}
|
||||||
|
local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
|
void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
|
||||||
|
|
Loading…
Add table
Reference in a new issue