|
@@ -336,59 +336,85 @@ struct netdev_alloc_cache {
|
|
|
unsigned int pagecnt_bias;
|
|
|
};
|
|
|
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
|
|
|
+static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
|
|
|
|
|
|
-static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
+static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
|
|
|
+ gfp_t gfp_mask)
|
|
|
{
|
|
|
- struct netdev_alloc_cache *nc;
|
|
|
- void *data = NULL;
|
|
|
- int order;
|
|
|
- unsigned long flags;
|
|
|
+ const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
|
|
|
+ struct page *page = NULL;
|
|
|
+ gfp_t gfp = gfp_mask;
|
|
|
+
|
|
|
+ if (order) {
|
|
|
+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
|
|
|
+ page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
|
|
|
+ nc->frag.size = PAGE_SIZE << (page ? order : 0);
|
|
|
+ }
|
|
|
|
|
|
- local_irq_save(flags);
|
|
|
- nc = this_cpu_ptr(&netdev_alloc_cache);
|
|
|
- if (unlikely(!nc->frag.page)) {
|
|
|
+ if (unlikely(!page))
|
|
|
+ page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
|
|
|
+
|
|
|
+ nc->frag.page = page;
|
|
|
+
|
|
|
+ return page;
|
|
|
+}
|
|
|
+
|
|
|
+static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
|
|
|
+ unsigned int fragsz, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
|
|
|
+ struct page *page = nc->frag.page;
|
|
|
+ unsigned int size;
|
|
|
+ int offset;
|
|
|
+
|
|
|
+ if (unlikely(!page)) {
|
|
|
refill:
|
|
|
- for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
|
|
|
- gfp_t gfp = gfp_mask;
|
|
|
+ page = __page_frag_refill(nc, gfp_mask);
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /* if size can vary use frag.size else just use PAGE_SIZE */
|
|
|
+ size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
|
|
|
|
|
|
- if (order)
|
|
|
- gfp |= __GFP_COMP | __GFP_NOWARN;
|
|
|
- nc->frag.page = alloc_pages(gfp, order);
|
|
|
- if (likely(nc->frag.page))
|
|
|
- break;
|
|
|
- if (--order < 0)
|
|
|
- goto end;
|
|
|
- }
|
|
|
- nc->frag.size = PAGE_SIZE << order;
|
|
|
/* Even if we own the page, we do not use atomic_set().
|
|
|
* This would break get_page_unless_zero() users.
|
|
|
*/
|
|
|
- atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
|
|
|
- &nc->frag.page->_count);
|
|
|
- nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
|
|
|
- nc->frag.offset = 0;
|
|
|
+ atomic_add(size - 1, &page->_count);
|
|
|
+
|
|
|
+ /* reset page count bias and offset to start of new frag */
|
|
|
+ nc->pagecnt_bias = size;
|
|
|
+ nc->frag.offset = size;
|
|
|
}
|
|
|
|
|
|
- if (nc->frag.offset + fragsz > nc->frag.size) {
|
|
|
- if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
|
|
|
- if (!atomic_sub_and_test(nc->pagecnt_bias,
|
|
|
- &nc->frag.page->_count))
|
|
|
- goto refill;
|
|
|
- /* OK, page count is 0, we can safely set it */
|
|
|
- atomic_set(&nc->frag.page->_count,
|
|
|
- NETDEV_PAGECNT_MAX_BIAS);
|
|
|
- } else {
|
|
|
- atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
|
|
|
- &nc->frag.page->_count);
|
|
|
- }
|
|
|
- nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
|
|
|
- nc->frag.offset = 0;
|
|
|
+ offset = nc->frag.offset - fragsz;
|
|
|
+ if (unlikely(offset < 0)) {
|
|
|
+ if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
|
|
|
+ goto refill;
|
|
|
+
|
|
|
+ /* if size can vary use frag.size else just use PAGE_SIZE */
|
|
|
+ size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
|
|
|
+
|
|
|
+ /* OK, page count is 0, we can safely set it */
|
|
|
+ atomic_set(&page->_count, size);
|
|
|
+
|
|
|
+ /* reset page count bias and offset to start of new frag */
|
|
|
+ nc->pagecnt_bias = size;
|
|
|
+ offset = size - fragsz;
|
|
|
}
|
|
|
|
|
|
- data = page_address(nc->frag.page) + nc->frag.offset;
|
|
|
- nc->frag.offset += fragsz;
|
|
|
nc->pagecnt_bias--;
|
|
|
-end:
|
|
|
+ nc->frag.offset = offset;
|
|
|
+
|
|
|
+ return page_address(page) + offset;
|
|
|
+}
|
|
|
+
|
|
|
+static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ void *data;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
|
|
|
local_irq_restore(flags);
|
|
|
return data;
|
|
|
}
|
|
@@ -406,6 +432,17 @@ void *netdev_alloc_frag(unsigned int fragsz)
|
|
|
}
|
|
|
EXPORT_SYMBOL(netdev_alloc_frag);
|
|
|
|
|
|
+static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
|
|
|
+}
|
|
|
+
|
|
|
+void *napi_alloc_frag(unsigned int fragsz)
|
|
|
+{
|
|
|
+ return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(napi_alloc_frag);
|
|
|
+
|
|
|
/**
|
|
|
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
|
|
|
* @dev: network device to receive on
|