rx.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2005-2013 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/socket.h>
  11. #include <linux/in.h>
  12. #include <linux/slab.h>
  13. #include <linux/ip.h>
  14. #include <linux/ipv6.h>
  15. #include <linux/tcp.h>
  16. #include <linux/udp.h>
  17. #include <linux/prefetch.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/iommu.h>
  20. #include <net/ip.h>
  21. #include <net/checksum.h>
  22. #include "net_driver.h"
  23. #include "efx.h"
  24. #include "filter.h"
  25. #include "nic.h"
  26. #include "selftest.h"
  27. #include "workarounds.h"
  28. /* Preferred number of descriptors to fill at once */
  29. #define EFX_RX_PREFERRED_BATCH 8U
  30. /* Number of RX buffers to recycle pages for. When creating the RX page recycle
  31. * ring, this number is divided by the number of buffers per page to calculate
  32. * the number of pages to store in the RX page recycle ring.
  33. */
  34. #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
  35. #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
  36. /* Size of buffer allocated for skb header area. */
  37. #define EFX_SKB_HEADERS 128u
  38. /* This is the percentage fill level below which new RX descriptors
  39. * will be added to the RX descriptor ring.
  40. */
  41. static unsigned int rx_refill_threshold;
  42. /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
  43. #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
  44. EFX_RX_USR_BUF_SIZE)
  45. /*
  46. * RX maximum head room required.
  47. *
  48. * This must be at least 1 to prevent overflow, plus one packet-worth
  49. * to allow pipelined receives.
  50. */
  51. #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
  52. static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
  53. {
  54. return page_address(buf->page) + buf->page_offset;
  55. }
  56. static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
  57. {
  58. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  59. return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
  60. #else
  61. const u8 *data = eh + efx->rx_packet_hash_offset;
  62. return (u32)data[0] |
  63. (u32)data[1] << 8 |
  64. (u32)data[2] << 16 |
  65. (u32)data[3] << 24;
  66. #endif
  67. }
  68. static inline struct efx_rx_buffer *
  69. efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
  70. {
  71. if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
  72. return efx_rx_buffer(rx_queue, 0);
  73. else
  74. return rx_buf + 1;
  75. }
  76. static inline void efx_sync_rx_buffer(struct efx_nic *efx,
  77. struct efx_rx_buffer *rx_buf,
  78. unsigned int len)
  79. {
  80. dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
  81. DMA_FROM_DEVICE);
  82. }
  83. void efx_rx_config_page_split(struct efx_nic *efx)
  84. {
  85. efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
  86. EFX_RX_BUF_ALIGNMENT);
  87. efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
  88. ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
  89. efx->rx_page_buf_step);
  90. efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
  91. efx->rx_bufs_per_page;
  92. efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
  93. efx->rx_bufs_per_page);
  94. }
  95. /* Check the RX page recycle ring for a page that can be reused. */
  96. static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
  97. {
  98. struct efx_nic *efx = rx_queue->efx;
  99. struct page *page;
  100. struct efx_rx_page_state *state;
  101. unsigned index;
  102. index = rx_queue->page_remove & rx_queue->page_ptr_mask;
  103. page = rx_queue->page_ring[index];
  104. if (page == NULL)
  105. return NULL;
  106. rx_queue->page_ring[index] = NULL;
  107. /* page_remove cannot exceed page_add. */
  108. if (rx_queue->page_remove != rx_queue->page_add)
  109. ++rx_queue->page_remove;
  110. /* If page_count is 1 then we hold the only reference to this page. */
  111. if (page_count(page) == 1) {
  112. ++rx_queue->page_recycle_count;
  113. return page;
  114. } else {
  115. state = page_address(page);
  116. dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  117. PAGE_SIZE << efx->rx_buffer_order,
  118. DMA_FROM_DEVICE);
  119. put_page(page);
  120. ++rx_queue->page_recycle_failed;
  121. }
  122. return NULL;
  123. }
  124. /**
  125. * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
  126. *
  127. * @rx_queue: Efx RX queue
  128. *
  129. * This allocates a batch of pages, maps them for DMA, and populates
  130. * struct efx_rx_buffers for each one. Return a negative error code or
  131. * 0 on success. If a single page can be used for multiple buffers,
  132. * then the page will either be inserted fully, or not at all.
  133. */
  134. static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
  135. {
  136. struct efx_nic *efx = rx_queue->efx;
  137. struct efx_rx_buffer *rx_buf;
  138. struct page *page;
  139. unsigned int page_offset;
  140. struct efx_rx_page_state *state;
  141. dma_addr_t dma_addr;
  142. unsigned index, count;
  143. count = 0;
  144. do {
  145. page = efx_reuse_page(rx_queue);
  146. if (page == NULL) {
  147. page = alloc_pages(__GFP_COLD | __GFP_COMP |
  148. (atomic ? GFP_ATOMIC : GFP_KERNEL),
  149. efx->rx_buffer_order);
  150. if (unlikely(page == NULL))
  151. return -ENOMEM;
  152. dma_addr =
  153. dma_map_page(&efx->pci_dev->dev, page, 0,
  154. PAGE_SIZE << efx->rx_buffer_order,
  155. DMA_FROM_DEVICE);
  156. if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
  157. dma_addr))) {
  158. __free_pages(page, efx->rx_buffer_order);
  159. return -EIO;
  160. }
  161. state = page_address(page);
  162. state->dma_addr = dma_addr;
  163. } else {
  164. state = page_address(page);
  165. dma_addr = state->dma_addr;
  166. }
  167. dma_addr += sizeof(struct efx_rx_page_state);
  168. page_offset = sizeof(struct efx_rx_page_state);
  169. do {
  170. index = rx_queue->added_count & rx_queue->ptr_mask;
  171. rx_buf = efx_rx_buffer(rx_queue, index);
  172. rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
  173. rx_buf->page = page;
  174. rx_buf->page_offset = page_offset + efx->rx_ip_align;
  175. rx_buf->len = efx->rx_dma_len;
  176. rx_buf->flags = 0;
  177. ++rx_queue->added_count;
  178. get_page(page);
  179. dma_addr += efx->rx_page_buf_step;
  180. page_offset += efx->rx_page_buf_step;
  181. } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
  182. rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
  183. } while (++count < efx->rx_pages_per_batch);
  184. return 0;
  185. }
  186. /* Unmap a DMA-mapped page. This function is only called for the final RX
  187. * buffer in a page.
  188. */
  189. static void efx_unmap_rx_buffer(struct efx_nic *efx,
  190. struct efx_rx_buffer *rx_buf)
  191. {
  192. struct page *page = rx_buf->page;
  193. if (page) {
  194. struct efx_rx_page_state *state = page_address(page);
  195. dma_unmap_page(&efx->pci_dev->dev,
  196. state->dma_addr,
  197. PAGE_SIZE << efx->rx_buffer_order,
  198. DMA_FROM_DEVICE);
  199. }
  200. }
  201. static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
  202. {
  203. if (rx_buf->page) {
  204. put_page(rx_buf->page);
  205. rx_buf->page = NULL;
  206. }
  207. }
  208. /* Attempt to recycle the page if there is an RX recycle ring; the page can
  209. * only be added if this is the final RX buffer, to prevent pages being used in
  210. * the descriptor ring and appearing in the recycle ring simultaneously.
  211. */
  212. static void efx_recycle_rx_page(struct efx_channel *channel,
  213. struct efx_rx_buffer *rx_buf)
  214. {
  215. struct page *page = rx_buf->page;
  216. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  217. struct efx_nic *efx = rx_queue->efx;
  218. unsigned index;
  219. /* Only recycle the page after processing the final buffer. */
  220. if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
  221. return;
  222. index = rx_queue->page_add & rx_queue->page_ptr_mask;
  223. if (rx_queue->page_ring[index] == NULL) {
  224. unsigned read_index = rx_queue->page_remove &
  225. rx_queue->page_ptr_mask;
  226. /* The next slot in the recycle ring is available, but
  227. * increment page_remove if the read pointer currently
  228. * points here.
  229. */
  230. if (read_index == index)
  231. ++rx_queue->page_remove;
  232. rx_queue->page_ring[index] = page;
  233. ++rx_queue->page_add;
  234. return;
  235. }
  236. ++rx_queue->page_recycle_full;
  237. efx_unmap_rx_buffer(efx, rx_buf);
  238. put_page(rx_buf->page);
  239. }
  240. static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
  241. struct efx_rx_buffer *rx_buf)
  242. {
  243. /* Release the page reference we hold for the buffer. */
  244. if (rx_buf->page)
  245. put_page(rx_buf->page);
  246. /* If this is the last buffer in a page, unmap and free it. */
  247. if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
  248. efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
  249. efx_free_rx_buffer(rx_buf);
  250. }
  251. rx_buf->page = NULL;
  252. }
  253. /* Recycle the pages that are used by buffers that have just been received. */
  254. static void efx_recycle_rx_pages(struct efx_channel *channel,
  255. struct efx_rx_buffer *rx_buf,
  256. unsigned int n_frags)
  257. {
  258. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  259. do {
  260. efx_recycle_rx_page(channel, rx_buf);
  261. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  262. } while (--n_frags);
  263. }
  264. static void efx_discard_rx_packet(struct efx_channel *channel,
  265. struct efx_rx_buffer *rx_buf,
  266. unsigned int n_frags)
  267. {
  268. struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
  269. efx_recycle_rx_pages(channel, rx_buf, n_frags);
  270. do {
  271. efx_free_rx_buffer(rx_buf);
  272. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  273. } while (--n_frags);
  274. }
  275. /**
  276. * efx_fast_push_rx_descriptors - push new RX descriptors quickly
  277. * @rx_queue: RX descriptor queue
  278. *
  279. * This will aim to fill the RX descriptor queue up to
  280. * @rx_queue->@max_fill. If there is insufficient atomic
  281. * memory to do so, a slow fill will be scheduled.
  282. *
  283. * The caller must provide serialisation (none is used here). In practise,
  284. * this means this function must run from the NAPI handler, or be called
  285. * when NAPI is disabled.
  286. */
  287. void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
  288. {
  289. struct efx_nic *efx = rx_queue->efx;
  290. unsigned int fill_level, batch_size;
  291. int space, rc = 0;
  292. if (!rx_queue->refill_enabled)
  293. return;
  294. /* Calculate current fill level, and exit if we don't need to fill */
  295. fill_level = (rx_queue->added_count - rx_queue->removed_count);
  296. EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
  297. if (fill_level >= rx_queue->fast_fill_trigger)
  298. goto out;
  299. /* Record minimum fill level */
  300. if (unlikely(fill_level < rx_queue->min_fill)) {
  301. if (fill_level)
  302. rx_queue->min_fill = fill_level;
  303. }
  304. batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
  305. space = rx_queue->max_fill - fill_level;
  306. EFX_BUG_ON_PARANOID(space < batch_size);
  307. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  308. "RX queue %d fast-filling descriptor ring from"
  309. " level %d to level %d\n",
  310. efx_rx_queue_index(rx_queue), fill_level,
  311. rx_queue->max_fill);
  312. do {
  313. rc = efx_init_rx_buffers(rx_queue, atomic);
  314. if (unlikely(rc)) {
  315. /* Ensure that we don't leave the rx queue empty */
  316. if (rx_queue->added_count == rx_queue->removed_count)
  317. efx_schedule_slow_fill(rx_queue);
  318. goto out;
  319. }
  320. } while ((space -= batch_size) >= batch_size);
  321. netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
  322. "RX queue %d fast-filled descriptor ring "
  323. "to level %d\n", efx_rx_queue_index(rx_queue),
  324. rx_queue->added_count - rx_queue->removed_count);
  325. out:
  326. if (rx_queue->notified_count != rx_queue->added_count)
  327. efx_nic_notify_rx_desc(rx_queue);
  328. }
  329. void efx_rx_slow_fill(unsigned long context)
  330. {
  331. struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
  332. /* Post an event to cause NAPI to run and refill the queue */
  333. efx_nic_generate_fill_event(rx_queue);
  334. ++rx_queue->slow_fill_count;
  335. }
  336. static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
  337. struct efx_rx_buffer *rx_buf,
  338. int len)
  339. {
  340. struct efx_nic *efx = rx_queue->efx;
  341. unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
  342. if (likely(len <= max_len))
  343. return;
  344. /* The packet must be discarded, but this is only a fatal error
  345. * if the caller indicated it was
  346. */
  347. rx_buf->flags |= EFX_RX_PKT_DISCARD;
  348. if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
  349. if (net_ratelimit())
  350. netif_err(efx, rx_err, efx->net_dev,
  351. " RX queue %d seriously overlength "
  352. "RX event (0x%x > 0x%x+0x%x). Leaking\n",
  353. efx_rx_queue_index(rx_queue), len, max_len,
  354. efx->type->rx_buffer_padding);
  355. efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
  356. } else {
  357. if (net_ratelimit())
  358. netif_err(efx, rx_err, efx->net_dev,
  359. " RX queue %d overlength RX event "
  360. "(0x%x > 0x%x)\n",
  361. efx_rx_queue_index(rx_queue), len, max_len);
  362. }
  363. efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
  364. }
  365. /* Pass a received packet up through GRO. GRO can handle pages
  366. * regardless of checksum state and skbs with a good checksum.
  367. */
  368. static void
  369. efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
  370. unsigned int n_frags, u8 *eh)
  371. {
  372. struct napi_struct *napi = &channel->napi_str;
  373. gro_result_t gro_result;
  374. struct efx_nic *efx = channel->efx;
  375. struct sk_buff *skb;
  376. skb = napi_get_frags(napi);
  377. if (unlikely(!skb)) {
  378. while (n_frags--) {
  379. put_page(rx_buf->page);
  380. rx_buf->page = NULL;
  381. rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
  382. }
  383. return;
  384. }
  385. if (efx->net_dev->features & NETIF_F_RXHASH)
  386. skb->rxhash = efx_rx_buf_hash(efx, eh);
  387. skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
  388. CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
  389. for (;;) {
  390. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  391. rx_buf->page, rx_buf->page_offset,
  392. rx_buf->len);
  393. rx_buf->page = NULL;
  394. skb->len += rx_buf->len;
  395. if (skb_shinfo(skb)->nr_frags == n_frags)
  396. break;
  397. rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
  398. }
  399. skb->data_len = skb->len;
  400. skb->truesize += n_frags * efx->rx_buffer_truesize;
  401. skb_record_rx_queue(skb, channel->rx_queue.core_index);
  402. gro_result = napi_gro_frags(napi);
  403. if (gro_result != GRO_DROP)
  404. channel->irq_mod_score += 2;
  405. }
  406. /* Allocate and construct an SKB around page fragments */
  407. static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
  408. struct efx_rx_buffer *rx_buf,
  409. unsigned int n_frags,
  410. u8 *eh, int hdr_len)
  411. {
  412. struct efx_nic *efx = channel->efx;
  413. struct sk_buff *skb;
  414. /* Allocate an SKB to store the headers */
  415. skb = netdev_alloc_skb(efx->net_dev,
  416. efx->rx_ip_align + efx->rx_prefix_size +
  417. hdr_len);
  418. if (unlikely(skb == NULL))
  419. return NULL;
  420. EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
  421. memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
  422. efx->rx_prefix_size + hdr_len);
  423. skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
  424. __skb_put(skb, hdr_len);
  425. /* Append the remaining page(s) onto the frag list */
  426. if (rx_buf->len > hdr_len) {
  427. rx_buf->page_offset += hdr_len;
  428. rx_buf->len -= hdr_len;
  429. for (;;) {
  430. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  431. rx_buf->page, rx_buf->page_offset,
  432. rx_buf->len);
  433. rx_buf->page = NULL;
  434. skb->len += rx_buf->len;
  435. skb->data_len += rx_buf->len;
  436. if (skb_shinfo(skb)->nr_frags == n_frags)
  437. break;
  438. rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
  439. }
  440. } else {
  441. __free_pages(rx_buf->page, efx->rx_buffer_order);
  442. rx_buf->page = NULL;
  443. n_frags = 0;
  444. }
  445. skb->truesize += n_frags * efx->rx_buffer_truesize;
  446. /* Move past the ethernet header */
  447. skb->protocol = eth_type_trans(skb, efx->net_dev);
  448. return skb;
  449. }
  450. void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
  451. unsigned int n_frags, unsigned int len, u16 flags)
  452. {
  453. struct efx_nic *efx = rx_queue->efx;
  454. struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
  455. struct efx_rx_buffer *rx_buf;
  456. rx_buf = efx_rx_buffer(rx_queue, index);
  457. rx_buf->flags |= flags;
  458. /* Validate the number of fragments and completed length */
  459. if (n_frags == 1) {
  460. if (!(flags & EFX_RX_PKT_PREFIX_LEN))
  461. efx_rx_packet__check_len(rx_queue, rx_buf, len);
  462. } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
  463. unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
  464. unlikely(len > n_frags * efx->rx_dma_len) ||
  465. unlikely(!efx->rx_scatter)) {
  466. /* If this isn't an explicit discard request, either
  467. * the hardware or the driver is broken.
  468. */
  469. WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
  470. rx_buf->flags |= EFX_RX_PKT_DISCARD;
  471. }
  472. netif_vdbg(efx, rx_status, efx->net_dev,
  473. "RX queue %d received ids %x-%x len %d %s%s\n",
  474. efx_rx_queue_index(rx_queue), index,
  475. (index + n_frags - 1) & rx_queue->ptr_mask, len,
  476. (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
  477. (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
  478. /* Discard packet, if instructed to do so. Process the
  479. * previous receive first.
  480. */
  481. if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
  482. efx_rx_flush_packet(channel);
  483. efx_discard_rx_packet(channel, rx_buf, n_frags);
  484. return;
  485. }
  486. if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
  487. rx_buf->len = len;
  488. /* Release and/or sync the DMA mapping - assumes all RX buffers
  489. * consumed in-order per RX queue.
  490. */
  491. efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
  492. /* Prefetch nice and early so data will (hopefully) be in cache by
  493. * the time we look at it.
  494. */
  495. prefetch(efx_rx_buf_va(rx_buf));
  496. rx_buf->page_offset += efx->rx_prefix_size;
  497. rx_buf->len -= efx->rx_prefix_size;
  498. if (n_frags > 1) {
  499. /* Release/sync DMA mapping for additional fragments.
  500. * Fix length for last fragment.
  501. */
  502. unsigned int tail_frags = n_frags - 1;
  503. for (;;) {
  504. rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
  505. if (--tail_frags == 0)
  506. break;
  507. efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
  508. }
  509. rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
  510. efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
  511. }
  512. /* All fragments have been DMA-synced, so recycle pages. */
  513. rx_buf = efx_rx_buffer(rx_queue, index);
  514. efx_recycle_rx_pages(channel, rx_buf, n_frags);
  515. /* Pipeline receives so that we give time for packet headers to be
  516. * prefetched into cache.
  517. */
  518. efx_rx_flush_packet(channel);
  519. channel->rx_pkt_n_frags = n_frags;
  520. channel->rx_pkt_index = index;
  521. }
  522. static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
  523. struct efx_rx_buffer *rx_buf,
  524. unsigned int n_frags)
  525. {
  526. struct sk_buff *skb;
  527. u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
  528. skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
  529. if (unlikely(skb == NULL)) {
  530. efx_free_rx_buffer(rx_buf);
  531. return;
  532. }
  533. skb_record_rx_queue(skb, channel->rx_queue.core_index);
  534. /* Set the SKB flags */
  535. skb_checksum_none_assert(skb);
  536. if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
  537. skb->ip_summed = CHECKSUM_UNNECESSARY;
  538. efx_rx_skb_attach_timestamp(channel, skb);
  539. if (channel->type->receive_skb)
  540. if (channel->type->receive_skb(channel, skb))
  541. return;
  542. /* Pass the packet up */
  543. netif_receive_skb(skb);
  544. }
  545. /* Handle a received packet. Second half: Touches packet payload. */
  546. void __efx_rx_packet(struct efx_channel *channel)
  547. {
  548. struct efx_nic *efx = channel->efx;
  549. struct efx_rx_buffer *rx_buf =
  550. efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
  551. u8 *eh = efx_rx_buf_va(rx_buf);
  552. /* Read length from the prefix if necessary. This already
  553. * excludes the length of the prefix itself.
  554. */
  555. if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
  556. rx_buf->len = le16_to_cpup((__le16 *)
  557. (eh + efx->rx_packet_len_offset));
  558. /* If we're in loopback test, then pass the packet directly to the
  559. * loopback layer, and free the rx_buf here
  560. */
  561. if (unlikely(efx->loopback_selftest)) {
  562. efx_loopback_rx_packet(efx, eh, rx_buf->len);
  563. efx_free_rx_buffer(rx_buf);
  564. goto out;
  565. }
  566. if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
  567. rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
  568. if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
  569. efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
  570. else
  571. efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
  572. out:
  573. channel->rx_pkt_n_frags = 0;
  574. }
  575. int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
  576. {
  577. struct efx_nic *efx = rx_queue->efx;
  578. unsigned int entries;
  579. int rc;
  580. /* Create the smallest power-of-two aligned ring */
  581. entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
  582. EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
  583. rx_queue->ptr_mask = entries - 1;
  584. netif_dbg(efx, probe, efx->net_dev,
  585. "creating RX queue %d size %#x mask %#x\n",
  586. efx_rx_queue_index(rx_queue), efx->rxq_entries,
  587. rx_queue->ptr_mask);
  588. /* Allocate RX buffers */
  589. rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
  590. GFP_KERNEL);
  591. if (!rx_queue->buffer)
  592. return -ENOMEM;
  593. rc = efx_nic_probe_rx(rx_queue);
  594. if (rc) {
  595. kfree(rx_queue->buffer);
  596. rx_queue->buffer = NULL;
  597. }
  598. return rc;
  599. }
  600. static void efx_init_rx_recycle_ring(struct efx_nic *efx,
  601. struct efx_rx_queue *rx_queue)
  602. {
  603. unsigned int bufs_in_recycle_ring, page_ring_size;
  604. /* Set the RX recycle ring size */
  605. #ifdef CONFIG_PPC64
  606. bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
  607. #else
  608. if (iommu_present(&pci_bus_type))
  609. bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
  610. else
  611. bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
  612. #endif /* CONFIG_PPC64 */
  613. page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
  614. efx->rx_bufs_per_page);
  615. rx_queue->page_ring = kcalloc(page_ring_size,
  616. sizeof(*rx_queue->page_ring), GFP_KERNEL);
  617. rx_queue->page_ptr_mask = page_ring_size - 1;
  618. }
  619. void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
  620. {
  621. struct efx_nic *efx = rx_queue->efx;
  622. unsigned int max_fill, trigger, max_trigger;
  623. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  624. "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
  625. /* Initialise ptr fields */
  626. rx_queue->added_count = 0;
  627. rx_queue->notified_count = 0;
  628. rx_queue->removed_count = 0;
  629. rx_queue->min_fill = -1U;
  630. efx_init_rx_recycle_ring(efx, rx_queue);
  631. rx_queue->page_remove = 0;
  632. rx_queue->page_add = rx_queue->page_ptr_mask + 1;
  633. rx_queue->page_recycle_count = 0;
  634. rx_queue->page_recycle_failed = 0;
  635. rx_queue->page_recycle_full = 0;
  636. /* Initialise limit fields */
  637. max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
  638. max_trigger =
  639. max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
  640. if (rx_refill_threshold != 0) {
  641. trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
  642. if (trigger > max_trigger)
  643. trigger = max_trigger;
  644. } else {
  645. trigger = max_trigger;
  646. }
  647. rx_queue->max_fill = max_fill;
  648. rx_queue->fast_fill_trigger = trigger;
  649. rx_queue->refill_enabled = true;
  650. /* Set up RX descriptor ring */
  651. efx_nic_init_rx(rx_queue);
  652. }
  653. void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
  654. {
  655. int i;
  656. struct efx_nic *efx = rx_queue->efx;
  657. struct efx_rx_buffer *rx_buf;
  658. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  659. "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
  660. del_timer_sync(&rx_queue->slow_fill);
  661. /* Release RX buffers from the current read ptr to the write ptr */
  662. if (rx_queue->buffer) {
  663. for (i = rx_queue->removed_count; i < rx_queue->added_count;
  664. i++) {
  665. unsigned index = i & rx_queue->ptr_mask;
  666. rx_buf = efx_rx_buffer(rx_queue, index);
  667. efx_fini_rx_buffer(rx_queue, rx_buf);
  668. }
  669. }
  670. /* Unmap and release the pages in the recycle ring. Remove the ring. */
  671. for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
  672. struct page *page = rx_queue->page_ring[i];
  673. struct efx_rx_page_state *state;
  674. if (page == NULL)
  675. continue;
  676. state = page_address(page);
  677. dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
  678. PAGE_SIZE << efx->rx_buffer_order,
  679. DMA_FROM_DEVICE);
  680. put_page(page);
  681. }
  682. kfree(rx_queue->page_ring);
  683. rx_queue->page_ring = NULL;
  684. }
  685. void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
  686. {
  687. netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
  688. "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
  689. efx_nic_remove_rx(rx_queue);
  690. kfree(rx_queue->buffer);
  691. rx_queue->buffer = NULL;
  692. }
  693. module_param(rx_refill_threshold, uint, 0444);
  694. MODULE_PARM_DESC(rx_refill_threshold,
  695. "RX descriptor ring refill threshold (%)");
  696. #ifdef CONFIG_RFS_ACCEL
  697. int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
  698. u16 rxq_index, u32 flow_id)
  699. {
  700. struct efx_nic *efx = netdev_priv(net_dev);
  701. struct efx_channel *channel;
  702. struct efx_filter_spec spec;
  703. const __be16 *ports;
  704. __be16 ether_type;
  705. int nhoff;
  706. int rc;
  707. /* The core RPS/RFS code has already parsed and validated
  708. * VLAN, IP and transport headers. We assume they are in the
  709. * header area.
  710. */
  711. if (skb->protocol == htons(ETH_P_8021Q)) {
  712. const struct vlan_hdr *vh =
  713. (const struct vlan_hdr *)skb->data;
  714. /* We can't filter on the IP 5-tuple and the vlan
  715. * together, so just strip the vlan header and filter
  716. * on the IP part.
  717. */
  718. EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
  719. ether_type = vh->h_vlan_encapsulated_proto;
  720. nhoff = sizeof(struct vlan_hdr);
  721. } else {
  722. ether_type = skb->protocol;
  723. nhoff = 0;
  724. }
  725. if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
  726. return -EPROTONOSUPPORT;
  727. efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
  728. efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
  729. rxq_index);
  730. spec.match_flags =
  731. EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
  732. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
  733. EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
  734. spec.ether_type = ether_type;
  735. if (ether_type == htons(ETH_P_IP)) {
  736. const struct iphdr *ip =
  737. (const struct iphdr *)(skb->data + nhoff);
  738. EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
  739. if (ip_is_fragment(ip))
  740. return -EPROTONOSUPPORT;
  741. spec.ip_proto = ip->protocol;
  742. spec.rem_host[0] = ip->saddr;
  743. spec.loc_host[0] = ip->daddr;
  744. EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
  745. ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
  746. } else {
  747. const struct ipv6hdr *ip6 =
  748. (const struct ipv6hdr *)(skb->data + nhoff);
  749. EFX_BUG_ON_PARANOID(skb_headlen(skb) <
  750. nhoff + sizeof(*ip6) + 4);
  751. spec.ip_proto = ip6->nexthdr;
  752. memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
  753. memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
  754. ports = (const __be16 *)(ip6 + 1);
  755. }
  756. spec.rem_port = ports[0];
  757. spec.loc_port = ports[1];
  758. rc = efx->type->filter_rfs_insert(efx, &spec);
  759. if (rc < 0)
  760. return rc;
  761. /* Remember this so we can check whether to expire the filter later */
  762. efx->rps_flow_id[rc] = flow_id;
  763. channel = efx_get_channel(efx, skb_get_rx_queue(skb));
  764. ++channel->rfs_filters_added;
  765. if (ether_type == htons(ETH_P_IP))
  766. netif_info(efx, rx_status, efx->net_dev,
  767. "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
  768. (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
  769. spec.rem_host, ntohs(ports[0]), spec.loc_host,
  770. ntohs(ports[1]), rxq_index, flow_id, rc);
  771. else
  772. netif_info(efx, rx_status, efx->net_dev,
  773. "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
  774. (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
  775. spec.rem_host, ntohs(ports[0]), spec.loc_host,
  776. ntohs(ports[1]), rxq_index, flow_id, rc);
  777. return rc;
  778. }
  779. bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
  780. {
  781. bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
  782. unsigned int index, size;
  783. u32 flow_id;
  784. if (!spin_trylock_bh(&efx->filter_lock))
  785. return false;
  786. expire_one = efx->type->filter_rfs_expire_one;
  787. index = efx->rps_expire_index;
  788. size = efx->type->max_rx_ip_filters;
  789. while (quota--) {
  790. flow_id = efx->rps_flow_id[index];
  791. if (expire_one(efx, flow_id, index))
  792. netif_info(efx, rx_status, efx->net_dev,
  793. "expired filter %d [flow %u]\n",
  794. index, flow_id);
  795. if (++index == size)
  796. index = 0;
  797. }
  798. efx->rps_expire_index = index;
  799. spin_unlock_bh(&efx->filter_lock);
  800. return true;
  801. }
  802. #endif /* CONFIG_RFS_ACCEL */
  803. /**
  804. * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
  805. * @spec: Specification to test
  806. *
  807. * Return: %true if the specification is a non-drop RX filter that
  808. * matches a local MAC address I/G bit value of 1 or matches a local
  809. * IPv4 or IPv6 address value in the respective multicast address
  810. * range. Otherwise %false.
  811. */
  812. bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
  813. {
  814. if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
  815. spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
  816. return false;
  817. if (spec->match_flags &
  818. (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
  819. is_multicast_ether_addr(spec->loc_mac))
  820. return true;
  821. if ((spec->match_flags &
  822. (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
  823. (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
  824. if (spec->ether_type == htons(ETH_P_IP) &&
  825. ipv4_is_multicast(spec->loc_host[0]))
  826. return true;
  827. if (spec->ether_type == htons(ETH_P_IPV6) &&
  828. ((const u8 *)spec->loc_host)[0] == 0xff)
  829. return true;
  830. }
  831. return false;
  832. }