|
@@ -347,8 +347,16 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
|
|
|
}
|
|
|
EXPORT_SYMBOL(build_skb);
|
|
|
|
|
|
+#define NAPI_SKB_CACHE_SIZE 64
|
|
|
+
|
|
|
+struct napi_alloc_cache {
|
|
|
+ struct page_frag_cache page;
|
|
|
+ size_t skb_count;
|
|
|
+ void *skb_cache[NAPI_SKB_CACHE_SIZE];
|
|
|
+};
|
|
|
+
|
|
|
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
|
|
|
-static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
|
|
|
+static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
|
|
|
|
|
|
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
{
|
|
@@ -378,9 +386,9 @@ EXPORT_SYMBOL(netdev_alloc_frag);
|
|
|
|
|
|
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
{
|
|
|
- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
|
|
|
- return __alloc_page_frag(nc, fragsz, gfp_mask);
|
|
|
+ return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
|
|
|
}
|
|
|
|
|
|
void *napi_alloc_frag(unsigned int fragsz)
|
|
@@ -474,7 +482,7 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
|
|
|
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
|
|
gfp_t gfp_mask)
|
|
|
{
|
|
|
- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
struct sk_buff *skb;
|
|
|
void *data;
|
|
|
|
|
@@ -494,7 +502,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
|
|
if (sk_memalloc_socks())
|
|
|
gfp_mask |= __GFP_MEMALLOC;
|
|
|
|
|
|
- data = __alloc_page_frag(nc, len, gfp_mask);
|
|
|
+ data = __alloc_page_frag(&nc->page, len, gfp_mask);
|
|
|
if (unlikely(!data))
|
|
|
return NULL;
|
|
|
|
|
@@ -505,7 +513,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
|
|
}
|
|
|
|
|
|
/* use OR instead of assignment to avoid clearing of bits in mask */
|
|
|
- if (nc->pfmemalloc)
|
|
|
+ if (nc->page.pfmemalloc)
|
|
|
skb->pfmemalloc = 1;
|
|
|
skb->head_frag = 1;
|
|
|
|
|
@@ -747,6 +755,73 @@ void consume_skb(struct sk_buff *skb)
|
|
|
}
|
|
|
EXPORT_SYMBOL(consume_skb);
|
|
|
|
|
|
+void __kfree_skb_flush(void)
|
|
|
+{
|
|
|
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
+
|
|
|
+ /* flush skb_cache if containing objects */
|
|
|
+ if (nc->skb_count) {
|
|
|
+ kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
|
|
|
+ nc->skb_cache);
|
|
|
+ nc->skb_count = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static inline void _kfree_skb_defer(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
|
|
+
|
|
|
+ /* drop skb->head and call any destructors for packet */
|
|
|
+ skb_release_all(skb);
|
|
|
+
|
|
|
+ /* record skb to CPU local list */
|
|
|
+ nc->skb_cache[nc->skb_count++] = skb;
|
|
|
+
|
|
|
+#ifdef CONFIG_SLUB
|
|
|
+ /* SLUB writes into objects when freeing */
|
|
|
+ prefetchw(skb);
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* flush skb_cache if it is filled */
|
|
|
+ if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
|
|
|
+ kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
|
|
|
+ nc->skb_cache);
|
|
|
+ nc->skb_count = 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+void __kfree_skb_defer(struct sk_buff *skb)
|
|
|
+{
|
|
|
+ _kfree_skb_defer(skb);
|
|
|
+}
|
|
|
+
|
|
|
+void napi_consume_skb(struct sk_buff *skb, int budget)
|
|
|
+{
|
|
|
+ if (unlikely(!skb))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* if budget is 0 assume netpoll w/ IRQs disabled */
|
|
|
+ if (unlikely(!budget)) {
|
|
|
+ dev_consume_skb_irq(skb);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (likely(atomic_read(&skb->users) == 1))
|
|
|
+ smp_rmb();
|
|
|
+ else if (likely(!atomic_dec_and_test(&skb->users)))
|
|
|
+ return;
|
|
|
+ /* if reaching here SKB is ready to free */
|
|
|
+ trace_consume_skb(skb);
|
|
|
+
|
|
|
+ /* if SKB is a clone, don't handle this case */
|
|
|
+ if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
|
|
|
+ __kfree_skb(skb);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ _kfree_skb_defer(skb);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(napi_consume_skb);
|
|
|
+
|
|
|
/* Make sure a field is enclosed inside headers_start/headers_end section */
|
|
|
#define CHECK_SKB_FIELD(field) \
|
|
|
BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
|