Эх сурвалжийг харах

cxgb4/cxgb4vf: Replace __skb_alloc_page with __dev_alloc_page

Drop the bloated use of __skb_alloc_page and replace it with
__dev_alloc_page.  In addition update the one other spot that is
allocating a page so that it allocates with the correct flags.

Cc: Hariprasad S <hariprasad@chelsio.com>
Cc: Casey Leedom <leedom@chelsio.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Alexander Duyck 10 жил өмнө
parent
commit
aa9cd31c3f

+ 3 - 3
drivers/net/ethernet/chelsio/cxgb4/sge.c

@@ -576,7 +576,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 	__be64 *d = &q->desc[q->pidx];
 	__be64 *d = &q->desc[q->pidx];
 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
 
 
-	gfp |= __GFP_NOWARN | __GFP_COLD;
+	gfp |= __GFP_NOWARN;
 
 
 	if (s->fl_pg_order == 0)
 	if (s->fl_pg_order == 0)
 		goto alloc_small_pages;
 		goto alloc_small_pages;
@@ -585,7 +585,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 	 * Prefer large buffers
 	 * Prefer large buffers
 	 */
 	 */
 	while (n) {
 	while (n) {
-		pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
+		pg = __dev_alloc_pages(gfp, s->fl_pg_order);
 		if (unlikely(!pg)) {
 		if (unlikely(!pg)) {
 			q->large_alloc_failed++;
 			q->large_alloc_failed++;
 			break;       /* fall back to single pages */
 			break;       /* fall back to single pages */
@@ -615,7 +615,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
 
 
 alloc_small_pages:
 alloc_small_pages:
 	while (n--) {
 	while (n--) {
-		pg = __skb_alloc_page(gfp, NULL);
+		pg = __dev_alloc_page(gfp);
 		if (unlikely(!pg)) {
 		if (unlikely(!pg)) {
 			q->alloc_failed++;
 			q->alloc_failed++;
 			break;
 			break;

+ 4 - 3
drivers/net/ethernet/chelsio/cxgb4vf/sge.c

@@ -602,6 +602,8 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 	 */
 	 */
 	BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
 	BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
 
 
+	gfp |= __GFP_NOWARN;
+
 	/*
 	/*
 	 * If we support large pages, prefer large buffers and fail over to
 	 * If we support large pages, prefer large buffers and fail over to
 	 * small pages if we can't allocate large pages to satisfy the refill.
 	 * small pages if we can't allocate large pages to satisfy the refill.
@@ -612,8 +614,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 		goto alloc_small_pages;
 		goto alloc_small_pages;
 
 
 	while (n) {
 	while (n) {
-		page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
-				   FL_PG_ORDER);
+		page = __dev_alloc_pages(gfp, FL_PG_ORDER);
 		if (unlikely(!page)) {
 		if (unlikely(!page)) {
 			/*
 			/*
 			 * We've failed inour attempt to allocate a "large
 			 * We've failed inour attempt to allocate a "large
@@ -657,7 +658,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 
 
 alloc_small_pages:
 alloc_small_pages:
 	while (n--) {
 	while (n--) {
-		page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL);
+		page = __dev_alloc_page(gfp);
 		if (unlikely(!page)) {
 		if (unlikely(!page)) {
 			fl->alloc_failed++;
 			fl->alloc_failed++;
 			break;
 			break;