ice_txrx.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #ifndef _ICE_TXRX_H_
  4. #define _ICE_TXRX_H_
  5. #define ICE_DFLT_IRQ_WORK 256
  6. #define ICE_RXBUF_2048 2048
  7. #define ICE_MAX_CHAINED_RX_BUFS 5
  8. #define ICE_MAX_BUF_TXD 8
  9. #define ICE_MIN_TX_LEN 17
  10. /* The size limit for a transmit buffer in a descriptor is (16K - 1).
  11. * In order to align with the read requests we will align the value to
  12. * the nearest 4K which represents our maximum read request size.
  13. */
  14. #define ICE_MAX_READ_REQ_SIZE 4096
  15. #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
  16. #define ICE_MAX_DATA_PER_TXD_ALIGNED \
  17. (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
  18. #define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
  19. #define ICE_MAX_TXQ_PER_TXQG 128
  20. /* Tx Descriptors needed, worst case */
  21. #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
  22. #define ICE_DESC_UNUSED(R) \
  23. ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
  24. (R)->next_to_clean - (R)->next_to_use - 1)
  25. #define ICE_TX_FLAGS_TSO BIT(0)
  26. #define ICE_TX_FLAGS_HW_VLAN BIT(1)
  27. #define ICE_TX_FLAGS_SW_VLAN BIT(2)
  28. #define ICE_TX_FLAGS_VLAN_M 0xffff0000
  29. #define ICE_TX_FLAGS_VLAN_S 16
  30. struct ice_tx_buf {
  31. struct ice_tx_desc *next_to_watch;
  32. struct sk_buff *skb;
  33. unsigned int bytecount;
  34. unsigned short gso_segs;
  35. u32 tx_flags;
  36. DEFINE_DMA_UNMAP_ADDR(dma);
  37. DEFINE_DMA_UNMAP_LEN(len);
  38. };
  39. struct ice_tx_offload_params {
  40. u8 header_len;
  41. u32 td_cmd;
  42. u32 td_offset;
  43. u32 td_l2tag1;
  44. u16 cd_l2tag2;
  45. u32 cd_tunnel_params;
  46. u64 cd_qw1;
  47. struct ice_ring *tx_ring;
  48. };
  49. struct ice_rx_buf {
  50. struct sk_buff *skb;
  51. dma_addr_t dma;
  52. struct page *page;
  53. unsigned int page_offset;
  54. };
  55. struct ice_q_stats {
  56. u64 pkts;
  57. u64 bytes;
  58. };
  59. struct ice_txq_stats {
  60. u64 restart_q;
  61. u64 tx_busy;
  62. u64 tx_linearize;
  63. int prev_pkt; /* negative if no pending Tx descriptors */
  64. };
  65. struct ice_rxq_stats {
  66. u64 non_eop_descs;
  67. u64 alloc_page_failed;
  68. u64 alloc_buf_failed;
  69. u64 page_reuse_count;
  70. };
  71. /* this enum matches hardware bits and is meant to be used by DYN_CTLN
  72. * registers and QINT registers or more generally anywhere in the manual
  73. * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
  74. * register but instead is a special value meaning "don't update" ITR0/1/2.
  75. */
  76. enum ice_dyn_idx_t {
  77. ICE_IDX_ITR0 = 0,
  78. ICE_IDX_ITR1 = 1,
  79. ICE_IDX_ITR2 = 2,
  80. ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
  81. };
  82. /* Header split modes defined by DTYPE field of Rx RLAN context */
  83. enum ice_rx_dtype {
  84. ICE_RX_DTYPE_NO_SPLIT = 0,
  85. ICE_RX_DTYPE_HEADER_SPLIT = 1,
  86. ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
  87. };
  88. /* indices into GLINT_ITR registers */
  89. #define ICE_RX_ITR ICE_IDX_ITR0
  90. #define ICE_TX_ITR ICE_IDX_ITR1
  91. #define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
  92. #define ICE_ITR_8K 0x003E
  93. /* apply ITR HW granularity translation to program the HW registers */
  94. #define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
  95. /* Legacy or Advanced Mode Queue */
  96. #define ICE_TX_ADVANCED 0
  97. #define ICE_TX_LEGACY 1
  98. /* descriptor ring, associated with a VSI */
  99. struct ice_ring {
  100. struct ice_ring *next; /* pointer to next ring in q_vector */
  101. void *desc; /* Descriptor ring memory */
  102. struct device *dev; /* Used for DMA mapping */
  103. struct net_device *netdev; /* netdev ring maps to */
  104. struct ice_vsi *vsi; /* Backreference to associated VSI */
  105. struct ice_q_vector *q_vector; /* Backreference to associated vector */
  106. u8 __iomem *tail;
  107. union {
  108. struct ice_tx_buf *tx_buf;
  109. struct ice_rx_buf *rx_buf;
  110. };
  111. u16 q_index; /* Queue number of ring */
  112. u32 txq_teid; /* Added Tx queue TEID */
  113. /* high bit set means dynamic, use accessor routines to read/write.
  114. * hardware supports 2us/1us resolution for the ITR registers.
  115. * these values always store the USER setting, and must be converted
  116. * before programming to a register.
  117. */
  118. u16 rx_itr_setting;
  119. u16 tx_itr_setting;
  120. u16 count; /* Number of descriptors */
  121. u16 reg_idx; /* HW register index of the ring */
  122. /* used in interrupt processing */
  123. u16 next_to_use;
  124. u16 next_to_clean;
  125. u8 ring_active; /* is ring online or not */
  126. /* stats structs */
  127. struct ice_q_stats stats;
  128. struct u64_stats_sync syncp;
  129. union {
  130. struct ice_txq_stats tx_stats;
  131. struct ice_rxq_stats rx_stats;
  132. };
  133. unsigned int size; /* length of descriptor ring in bytes */
  134. dma_addr_t dma; /* physical address of ring */
  135. struct rcu_head rcu; /* to avoid race on free */
  136. u16 next_to_alloc;
  137. } ____cacheline_internodealigned_in_smp;
  138. enum ice_latency_range {
  139. ICE_LOWEST_LATENCY = 0,
  140. ICE_LOW_LATENCY = 1,
  141. ICE_BULK_LATENCY = 2,
  142. ICE_ULTRA_LATENCY = 3,
  143. };
  144. struct ice_ring_container {
  145. /* array of pointers to rings */
  146. struct ice_ring *ring;
  147. unsigned int total_bytes; /* total bytes processed this int */
  148. unsigned int total_pkts; /* total packets processed this int */
  149. enum ice_latency_range latency_range;
  150. u16 itr;
  151. };
  152. /* iterator for handling rings in ring container */
  153. #define ice_for_each_ring(pos, head) \
  154. for (pos = (head).ring; pos; pos = pos->next)
  155. bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
  156. netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
  157. void ice_clean_tx_ring(struct ice_ring *tx_ring);
  158. void ice_clean_rx_ring(struct ice_ring *rx_ring);
  159. int ice_setup_tx_ring(struct ice_ring *tx_ring);
  160. int ice_setup_rx_ring(struct ice_ring *rx_ring);
  161. void ice_free_tx_ring(struct ice_ring *tx_ring);
  162. void ice_free_rx_ring(struct ice_ring *rx_ring);
  163. int ice_napi_poll(struct napi_struct *napi, int budget);
  164. #endif /* _ICE_TXRX_H_ */