ice_txrx.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #ifndef _ICE_TXRX_H_
  4. #define _ICE_TXRX_H_
  5. #define ICE_DFLT_IRQ_WORK 256
  6. #define ICE_RXBUF_2048 2048
  7. #define ICE_MAX_CHAINED_RX_BUFS 5
  8. #define ICE_MAX_BUF_TXD 8
  9. #define ICE_MIN_TX_LEN 17
  10. /* The size limit for a transmit buffer in a descriptor is (16K - 1).
  11. * In order to align with the read requests we will align the value to
  12. * the nearest 4K which represents our maximum read request size.
  13. */
  14. #define ICE_MAX_READ_REQ_SIZE 4096
  15. #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
  16. #define ICE_MAX_DATA_PER_TXD_ALIGNED \
  17. (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
  18. #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
  19. #define ICE_MAX_TXQ_PER_TXQG 128
  20. /* Tx Descriptors needed, worst case */
  21. #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
  22. #define ICE_DESC_UNUSED(R) \
  23. ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
  24. (R)->next_to_clean - (R)->next_to_use - 1)
  25. #define ICE_TX_FLAGS_TSO BIT(0)
  26. #define ICE_TX_FLAGS_HW_VLAN BIT(1)
  27. #define ICE_TX_FLAGS_SW_VLAN BIT(2)
  28. #define ICE_TX_FLAGS_VLAN_M 0xffff0000
  29. #define ICE_TX_FLAGS_VLAN_S 16
  30. struct ice_tx_buf {
  31. struct ice_tx_desc *next_to_watch;
  32. struct sk_buff *skb;
  33. unsigned int bytecount;
  34. unsigned short gso_segs;
  35. u32 tx_flags;
  36. DEFINE_DMA_UNMAP_ADDR(dma);
  37. DEFINE_DMA_UNMAP_LEN(len);
  38. };
  39. struct ice_tx_offload_params {
  40. u8 header_len;
  41. u32 td_cmd;
  42. u32 td_offset;
  43. u32 td_l2tag1;
  44. u16 cd_l2tag2;
  45. u32 cd_tunnel_params;
  46. u64 cd_qw1;
  47. struct ice_ring *tx_ring;
  48. };
  49. struct ice_rx_buf {
  50. struct sk_buff *skb;
  51. dma_addr_t dma;
  52. struct page *page;
  53. unsigned int page_offset;
  54. };
  55. struct ice_q_stats {
  56. u64 pkts;
  57. u64 bytes;
  58. };
  59. struct ice_txq_stats {
  60. u64 restart_q;
  61. u64 tx_busy;
  62. u64 tx_linearize;
  63. };
  64. struct ice_rxq_stats {
  65. u64 non_eop_descs;
  66. u64 alloc_page_failed;
  67. u64 alloc_buf_failed;
  68. u64 page_reuse_count;
  69. };
  70. /* this enum matches hardware bits and is meant to be used by DYN_CTLN
  71. * registers and QINT registers or more generally anywhere in the manual
  72. * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
  73. * register but instead is a special value meaning "don't update" ITR0/1/2.
  74. */
  75. enum ice_dyn_idx_t {
  76. ICE_IDX_ITR0 = 0,
  77. ICE_IDX_ITR1 = 1,
  78. ICE_IDX_ITR2 = 2,
  79. ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
  80. };
  81. /* Header split modes defined by DTYPE field of Rx RLAN context */
  82. enum ice_rx_dtype {
  83. ICE_RX_DTYPE_NO_SPLIT = 0,
  84. ICE_RX_DTYPE_HEADER_SPLIT = 1,
  85. ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
  86. };
  87. /* indices into GLINT_ITR registers */
  88. #define ICE_RX_ITR ICE_IDX_ITR0
  89. #define ICE_TX_ITR ICE_IDX_ITR1
  90. #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
  91. #define ICE_ITR_8K 0x003E
  92. /* apply ITR HW granularity translation to program the HW registers */
  93. #define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
  94. /* Legacy or Advanced Mode Queue */
  95. #define ICE_TX_ADVANCED 0
  96. #define ICE_TX_LEGACY 1
  97. /* descriptor ring, associated with a VSI */
  98. struct ice_ring {
  99. struct ice_ring *next; /* pointer to next ring in q_vector */
  100. void *desc; /* Descriptor ring memory */
  101. struct device *dev; /* Used for DMA mapping */
  102. struct net_device *netdev; /* netdev ring maps to */
  103. struct ice_vsi *vsi; /* Backreference to associated VSI */
  104. struct ice_q_vector *q_vector; /* Backreference to associated vector */
  105. u8 __iomem *tail;
  106. union {
  107. struct ice_tx_buf *tx_buf;
  108. struct ice_rx_buf *rx_buf;
  109. };
  110. u16 q_index; /* Queue number of ring */
  111. u32 txq_teid; /* Added Tx queue TEID */
  112. /* high bit set means dynamic, use accessor routines to read/write.
  113. * hardware supports 2us/1us resolution for the ITR registers.
  114. * these values always store the USER setting, and must be converted
  115. * before programming to a register.
  116. */
  117. u16 rx_itr_setting;
  118. u16 tx_itr_setting;
  119. u16 count; /* Number of descriptors */
  120. u16 reg_idx; /* HW register index of the ring */
  121. /* used in interrupt processing */
  122. u16 next_to_use;
  123. u16 next_to_clean;
  124. u8 ring_active; /* is ring online or not */
  125. /* stats structs */
  126. struct ice_q_stats stats;
  127. struct u64_stats_sync syncp;
  128. union {
  129. struct ice_txq_stats tx_stats;
  130. struct ice_rxq_stats rx_stats;
  131. };
  132. unsigned int size; /* length of descriptor ring in bytes */
  133. dma_addr_t dma; /* physical address of ring */
  134. struct rcu_head rcu; /* to avoid race on free */
  135. u16 next_to_alloc;
  136. } ____cacheline_internodealigned_in_smp;
  137. enum ice_latency_range {
  138. ICE_LOWEST_LATENCY = 0,
  139. ICE_LOW_LATENCY = 1,
  140. ICE_BULK_LATENCY = 2,
  141. ICE_ULTRA_LATENCY = 3,
  142. };
  143. struct ice_ring_container {
  144. /* array of pointers to rings */
  145. struct ice_ring *ring;
  146. unsigned int total_bytes; /* total bytes processed this int */
  147. unsigned int total_pkts; /* total packets processed this int */
  148. enum ice_latency_range latency_range;
  149. u16 itr;
  150. };
  151. /* iterator for handling rings in ring container */
  152. #define ice_for_each_ring(pos, head) \
  153. for (pos = (head).ring; pos; pos = pos->next)
  154. bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
  155. netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
  156. void ice_clean_tx_ring(struct ice_ring *tx_ring);
  157. void ice_clean_rx_ring(struct ice_ring *rx_ring);
  158. int ice_setup_tx_ring(struct ice_ring *tx_ring);
  159. int ice_setup_rx_ring(struct ice_ring *rx_ring);
  160. void ice_free_tx_ring(struct ice_ring *tx_ring);
  161. void ice_free_rx_ring(struct ice_ring *rx_ring);
  162. int ice_napi_poll(struct napi_struct *napi, int budget);
  163. #endif /* _ICE_TXRX_H_ */