i40e_txrx.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /*******************************************************************************
  2. *
  3. * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
  4. * Copyright(c) 2013 - 2016 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. * Contact Information:
  22. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. ******************************************************************************/
  26. #ifndef _I40E_TXRX_H_
  27. #define _I40E_TXRX_H_
  28. /* Interrupt Throttling and Rate Limiting Goodies */
  29. #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
  30. #define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
  31. #define I40E_ITR_100K 0x0005
  32. #define I40E_ITR_50K 0x000A
  33. #define I40E_ITR_20K 0x0019
  34. #define I40E_ITR_18K 0x001B
  35. #define I40E_ITR_8K 0x003E
  36. #define I40E_ITR_4K 0x007A
  37. #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
  38. #define I40E_ITR_RX_DEF I40E_ITR_20K
  39. #define I40E_ITR_TX_DEF I40E_ITR_20K
  40. #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
  41. #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
  42. #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
  43. #define I40E_DEFAULT_IRQ_WORK 256
  44. #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
  45. #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
  46. #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
  47. /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
  48. * the value of the rate limit is non-zero
  49. */
  50. #define INTRL_ENA BIT(6)
  51. #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
  52. #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
  53. #define I40E_INTRL_8K 125 /* 8000 ints/sec */
  54. #define I40E_INTRL_62K 16 /* 62500 ints/sec */
  55. #define I40E_INTRL_83K 12 /* 83333 ints/sec */
  56. #define I40E_QUEUE_END_OF_LIST 0x7FF
  57. /* this enum matches hardware bits and is meant to be used by DYN_CTLN
  58. * registers and QINT registers or more generally anywhere in the manual
  59. * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
  60. * register but instead is a special value meaning "don't update" ITR0/1/2.
  61. */
  62. enum i40e_dyn_idx_t {
  63. I40E_IDX_ITR0 = 0,
  64. I40E_IDX_ITR1 = 1,
  65. I40E_IDX_ITR2 = 2,
  66. I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
  67. };
  68. /* these are indexes into ITRN registers */
  69. #define I40E_RX_ITR I40E_IDX_ITR0
  70. #define I40E_TX_ITR I40E_IDX_ITR1
  71. #define I40E_PE_ITR I40E_IDX_ITR2
  72. /* Supported RSS offloads */
  73. #define I40E_DEFAULT_RSS_HENA ( \
  74. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
  75. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
  76. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
  77. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
  78. BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
  79. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
  80. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
  81. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
  82. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
  83. BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
  84. BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
  85. #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
  86. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
  87. BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
  88. BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
  89. BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
  90. BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
  91. BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
  92. /* Supported Rx Buffer Sizes (a multiple of 128) */
  93. #define I40E_RXBUFFER_256 256
  94. #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
  95. #define I40E_RXBUFFER_2048 2048
  96. #define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
  97. #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
  98. /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
  99. * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
  100. * this adds up to 512 bytes of extra data meaning the smallest allocation
  101. * we could have is 1K.
  102. * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
  103. * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
  104. */
  105. #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
  106. #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
  107. #define i40e_rx_desc i40e_32byte_rx_desc
  108. #define I40E_RX_DMA_ATTR \
  109. (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
  110. /* Attempt to maximize the headroom available for incoming frames. We
  111. * use a 2K buffer for receives and need 1536/1534 to store the data for
  112. * the frame. This leaves us with 512 bytes of room. From that we need
  113. * to deduct the space needed for the shared info and the padding needed
  114. * to IP align the frame.
  115. *
  116. * Note: For cache line sizes 256 or larger this value is going to end
  117. * up negative. In these cases we should fall back to the legacy
  118. * receive path.
  119. */
  120. #if (PAGE_SIZE < 8192)
  121. #define I40E_2K_TOO_SMALL_WITH_PADDING \
  122. ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
  123. static inline int i40e_compute_pad(int rx_buf_len)
  124. {
  125. int page_size, pad_size;
  126. page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
  127. pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
  128. return pad_size;
  129. }
  130. static inline int i40e_skb_pad(void)
  131. {
  132. int rx_buf_len;
  133. /* If a 2K buffer cannot handle a standard Ethernet frame then
  134. * optimize padding for a 3K buffer instead of a 1.5K buffer.
  135. *
  136. * For a 3K buffer we need to add enough padding to allow for
  137. * tailroom due to NET_IP_ALIGN possibly shifting us out of
  138. * cache-line alignment.
  139. */
  140. if (I40E_2K_TOO_SMALL_WITH_PADDING)
  141. rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
  142. else
  143. rx_buf_len = I40E_RXBUFFER_1536;
  144. /* if needed make room for NET_IP_ALIGN */
  145. rx_buf_len -= NET_IP_ALIGN;
  146. return i40e_compute_pad(rx_buf_len);
  147. }
  148. #define I40E_SKB_PAD i40e_skb_pad()
  149. #else
  150. #define I40E_2K_TOO_SMALL_WITH_PADDING false
  151. #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
  152. #endif
  153. /**
  154. * i40e_test_staterr - tests bits in Rx descriptor status and error fields
  155. * @rx_desc: pointer to receive descriptor (in le64 format)
  156. * @stat_err_bits: value to mask
  157. *
  158. * This function does some fast chicanery in order to return the
  159. * value of the mask which is really only used for boolean tests.
  160. * The status_error_len doesn't need to be shifted because it begins
  161. * at offset zero.
  162. */
  163. static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
  164. const u64 stat_err_bits)
  165. {
  166. return !!(rx_desc->wb.qword1.status_error_len &
  167. cpu_to_le64(stat_err_bits));
  168. }
  169. /* How many Rx Buffers do we bundle into one write to the hardware ? */
  170. #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
  171. #define I40E_RX_INCREMENT(r, i) \
  172. do { \
  173. (i)++; \
  174. if ((i) == (r)->count) \
  175. i = 0; \
  176. r->next_to_clean = i; \
  177. } while (0)
  178. #define I40E_RX_NEXT_DESC(r, i, n) \
  179. do { \
  180. (i)++; \
  181. if ((i) == (r)->count) \
  182. i = 0; \
  183. (n) = I40E_RX_DESC((r), (i)); \
  184. } while (0)
  185. #define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
  186. do { \
  187. I40E_RX_NEXT_DESC((r), (i), (n)); \
  188. prefetch((n)); \
  189. } while (0)
  190. #define I40E_MAX_BUFFER_TXD 8
  191. #define I40E_MIN_TX_LEN 17
  192. /* The size limit for a transmit buffer in a descriptor is (16K - 1).
  193. * In order to align with the read requests we will align the value to
  194. * the nearest 4K which represents our maximum read request size.
  195. */
  196. #define I40E_MAX_READ_REQ_SIZE 4096
  197. #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
  198. #define I40E_MAX_DATA_PER_TXD_ALIGNED \
  199. (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
  200. /**
  201. * i40e_txd_use_count - estimate the number of descriptors needed for Tx
  202. * @size: transmit request size in bytes
  203. *
  204. * Due to hardware alignment restrictions (4K alignment), we need to
  205. * assume that we can have no more than 12K of data per descriptor, even
  206. * though each descriptor can take up to 16K - 1 bytes of aligned memory.
  207. * Thus, we need to divide by 12K. But division is slow! Instead,
  208. * we decompose the operation into shifts and one relatively cheap
  209. * multiply operation.
  210. *
  211. * To divide by 12K, we first divide by 4K, then divide by 3:
  212. * To divide by 4K, shift right by 12 bits
  213. * To divide by 3, multiply by 85, then divide by 256
  214. * (Divide by 256 is done by shifting right by 8 bits)
  215. * Finally, we add one to round up. Because 256 isn't an exact multiple of
  216. * 3, we'll underestimate near each multiple of 12K. This is actually more
  217. * accurate as we have 4K - 1 of wiggle room that we can fit into the last
  218. * segment. For our purposes this is accurate out to 1M which is orders of
  219. * magnitude greater than our largest possible GSO size.
  220. *
  221. * This would then be implemented as:
  222. * return (((size >> 12) * 85) >> 8) + 1;
  223. *
  224. * Since multiplication and division are commutative, we can reorder
  225. * operations into:
  226. * return ((size * 85) >> 20) + 1;
  227. */
  228. static inline unsigned int i40e_txd_use_count(unsigned int size)
  229. {
  230. return ((size * 85) >> 20) + 1;
  231. }
  232. /* Tx Descriptors needed, worst case */
  233. #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
  234. #define I40E_MIN_DESC_PENDING 4
  235. #define I40E_TX_FLAGS_HW_VLAN BIT(1)
  236. #define I40E_TX_FLAGS_SW_VLAN BIT(2)
  237. #define I40E_TX_FLAGS_TSO BIT(3)
  238. #define I40E_TX_FLAGS_IPV4 BIT(4)
  239. #define I40E_TX_FLAGS_IPV6 BIT(5)
  240. #define I40E_TX_FLAGS_FCCRC BIT(6)
  241. #define I40E_TX_FLAGS_FSO BIT(7)
  242. #define I40E_TX_FLAGS_FD_SB BIT(9)
  243. #define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
  244. #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
  245. #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
  246. #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
  247. #define I40E_TX_FLAGS_VLAN_SHIFT 16
  248. struct i40e_tx_buffer {
  249. struct i40e_tx_desc *next_to_watch;
  250. union {
  251. struct sk_buff *skb;
  252. void *raw_buf;
  253. };
  254. unsigned int bytecount;
  255. unsigned short gso_segs;
  256. DEFINE_DMA_UNMAP_ADDR(dma);
  257. DEFINE_DMA_UNMAP_LEN(len);
  258. u32 tx_flags;
  259. };
  260. struct i40e_rx_buffer {
  261. dma_addr_t dma;
  262. struct page *page;
  263. #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
  264. __u32 page_offset;
  265. #else
  266. __u16 page_offset;
  267. #endif
  268. __u16 pagecnt_bias;
  269. };
  270. struct i40e_queue_stats {
  271. u64 packets;
  272. u64 bytes;
  273. };
  274. struct i40e_tx_queue_stats {
  275. u64 restart_queue;
  276. u64 tx_busy;
  277. u64 tx_done_old;
  278. u64 tx_linearize;
  279. u64 tx_force_wb;
  280. u64 tx_lost_interrupt;
  281. };
  282. struct i40e_rx_queue_stats {
  283. u64 non_eop_descs;
  284. u64 alloc_page_failed;
  285. u64 alloc_buff_failed;
  286. u64 page_reuse_count;
  287. u64 realloc_count;
  288. };
  289. enum i40e_ring_state_t {
  290. __I40E_TX_FDIR_INIT_DONE,
  291. __I40E_TX_XPS_INIT_DONE,
  292. };
  293. /* some useful defines for virtchannel interface, which
  294. * is the only remaining user of header split
  295. */
  296. #define I40E_RX_DTYPE_NO_SPLIT 0
  297. #define I40E_RX_DTYPE_HEADER_SPLIT 1
  298. #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
  299. #define I40E_RX_SPLIT_L2 0x1
  300. #define I40E_RX_SPLIT_IP 0x2
  301. #define I40E_RX_SPLIT_TCP_UDP 0x4
  302. #define I40E_RX_SPLIT_SCTP 0x8
  303. /* struct that defines a descriptor ring, associated with a VSI */
  304. struct i40e_ring {
  305. struct i40e_ring *next; /* pointer to next ring in q_vector */
  306. void *desc; /* Descriptor ring memory */
  307. struct device *dev; /* Used for DMA mapping */
  308. struct net_device *netdev; /* netdev ring maps to */
  309. union {
  310. struct i40e_tx_buffer *tx_bi;
  311. struct i40e_rx_buffer *rx_bi;
  312. };
  313. unsigned long state;
  314. u16 queue_index; /* Queue number of ring */
  315. u8 dcb_tc; /* Traffic class of ring */
  316. u8 __iomem *tail;
  317. /* high bit set means dynamic, use accessors routines to read/write.
  318. * hardware only supports 2us resolution for the ITR registers.
  319. * these values always store the USER setting, and must be converted
  320. * before programming to a register.
  321. */
  322. u16 rx_itr_setting;
  323. u16 tx_itr_setting;
  324. u16 count; /* Number of descriptors */
  325. u16 reg_idx; /* HW register index of the ring */
  326. u16 rx_buf_len;
  327. /* used in interrupt processing */
  328. u16 next_to_use;
  329. u16 next_to_clean;
  330. u8 atr_sample_rate;
  331. u8 atr_count;
  332. bool ring_active; /* is ring online or not */
  333. bool arm_wb; /* do something to arm write back */
  334. u8 packet_stride;
  335. u16 flags;
  336. #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
  337. #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
  338. /* stats structs */
  339. struct i40e_queue_stats stats;
  340. struct u64_stats_sync syncp;
  341. union {
  342. struct i40e_tx_queue_stats tx_stats;
  343. struct i40e_rx_queue_stats rx_stats;
  344. };
  345. unsigned int size; /* length of descriptor ring in bytes */
  346. dma_addr_t dma; /* physical address of ring */
  347. struct i40e_vsi *vsi; /* Backreference to associated VSI */
  348. struct i40e_q_vector *q_vector; /* Backreference to associated vector */
  349. struct rcu_head rcu; /* to avoid race on free */
  350. u16 next_to_alloc;
  351. struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must
  352. * return before it sees the EOP for
  353. * the current packet, we save that skb
  354. * here and resume receiving this
  355. * packet the next time
  356. * i40evf_clean_rx_ring_irq() is called
  357. * for this ring.
  358. */
  359. } ____cacheline_internodealigned_in_smp;
  360. static inline bool ring_uses_build_skb(struct i40e_ring *ring)
  361. {
  362. return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
  363. }
  364. static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
  365. {
  366. ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
  367. }
  368. static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
  369. {
  370. ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
  371. }
  372. enum i40e_latency_range {
  373. I40E_LOWEST_LATENCY = 0,
  374. I40E_LOW_LATENCY = 1,
  375. I40E_BULK_LATENCY = 2,
  376. };
  377. struct i40e_ring_container {
  378. /* array of pointers to rings */
  379. struct i40e_ring *ring;
  380. unsigned int total_bytes; /* total bytes processed this int */
  381. unsigned int total_packets; /* total packets processed this int */
  382. unsigned long last_itr_update; /* jiffies of last ITR update */
  383. u16 count;
  384. enum i40e_latency_range latency_range;
  385. u16 itr;
  386. };
  387. /* iterator for handling rings in ring container */
  388. #define i40e_for_each_ring(pos, head) \
  389. for (pos = (head).ring; pos != NULL; pos = pos->next)
  390. static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
  391. {
  392. #if (PAGE_SIZE < 8192)
  393. if (ring->rx_buf_len > (PAGE_SIZE / 2))
  394. return 1;
  395. #endif
  396. return 0;
  397. }
  398. #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
  399. bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
  400. netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
  401. void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
  402. void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
  403. int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
  404. int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
  405. void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
  406. void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
  407. int i40evf_napi_poll(struct napi_struct *napi, int budget);
  408. void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
  409. u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
  410. int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
  411. bool __i40evf_chk_linearize(struct sk_buff *skb);
  412. /**
  413. * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
  414. * @skb: send buffer
  415. * @tx_ring: ring to send buffer on
  416. *
  417. * Returns number of data descriptors needed for this skb. Returns 0 to indicate
  418. * there is not enough descriptors available in this ring since we need at least
  419. * one descriptor.
  420. **/
  421. static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
  422. {
  423. const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  424. unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
  425. int count = 0, size = skb_headlen(skb);
  426. for (;;) {
  427. count += i40e_txd_use_count(size);
  428. if (!nr_frags--)
  429. break;
  430. size = skb_frag_size(frag++);
  431. }
  432. return count;
  433. }
  434. /**
  435. * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
  436. * @tx_ring: the ring to be checked
  437. * @size: the size buffer we want to assure is available
  438. *
  439. * Returns 0 if stop is not needed
  440. **/
  441. static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  442. {
  443. if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
  444. return 0;
  445. return __i40evf_maybe_stop_tx(tx_ring, size);
  446. }
  447. /**
  448. * i40e_chk_linearize - Check if there are more than 8 fragments per packet
  449. * @skb: send buffer
  450. * @count: number of buffers used
  451. *
  452. * Note: Our HW can't scatter-gather more than 8 fragments to build
  453. * a packet on the wire and so we need to figure out the cases where we
  454. * need to linearize the skb.
  455. **/
  456. static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
  457. {
  458. /* Both TSO and single send will work if count is less than 8 */
  459. if (likely(count < I40E_MAX_BUFFER_TXD))
  460. return false;
  461. if (skb_is_gso(skb))
  462. return __i40evf_chk_linearize(skb);
  463. /* we can support up to 8 data buffers for a single send */
  464. return count != I40E_MAX_BUFFER_TXD;
  465. }
  466. /**
  467. * @ring: Tx ring to find the netdev equivalent of
  468. **/
  469. static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
  470. {
  471. return netdev_get_tx_queue(ring->netdev, ring->queue_index);
  472. }
  473. #endif /* _I40E_TXRX_H_ */