ice_lan_tx_rx.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (c) 2018, Intel Corporation. */
  3. #ifndef _ICE_LAN_TX_RX_H_
  4. #define _ICE_LAN_TX_RX_H_
  5. union ice_32byte_rx_desc {
  6. struct {
  7. __le64 pkt_addr; /* Packet buffer address */
  8. __le64 hdr_addr; /* Header buffer address */
  9. /* bit 0 of hdr_addr is DD bit */
  10. __le64 rsvd1;
  11. __le64 rsvd2;
  12. } read;
  13. struct {
  14. struct {
  15. struct {
  16. __le16 mirroring_status;
  17. __le16 l2tag1;
  18. } lo_dword;
  19. union {
  20. __le32 rss; /* RSS Hash */
  21. __le32 fd_id; /* Flow Director filter id */
  22. } hi_dword;
  23. } qword0;
  24. struct {
  25. /* status/error/PTYPE/length */
  26. __le64 status_error_len;
  27. } qword1;
  28. struct {
  29. __le16 ext_status; /* extended status */
  30. __le16 rsvd;
  31. __le16 l2tag2_1;
  32. __le16 l2tag2_2;
  33. } qword2;
  34. struct {
  35. __le32 reserved;
  36. __le32 fd_id;
  37. } qword3;
  38. } wb; /* writeback */
  39. };
  40. struct ice_rx_ptype_decoded {
  41. u32 ptype:10;
  42. u32 known:1;
  43. u32 outer_ip:1;
  44. u32 outer_ip_ver:2;
  45. u32 outer_frag:1;
  46. u32 tunnel_type:3;
  47. u32 tunnel_end_prot:2;
  48. u32 tunnel_end_frag:1;
  49. u32 inner_prot:4;
  50. u32 payload_layer:3;
  51. };
  52. enum ice_rx_ptype_outer_ip {
  53. ICE_RX_PTYPE_OUTER_L2 = 0,
  54. ICE_RX_PTYPE_OUTER_IP = 1,
  55. };
  56. enum ice_rx_ptype_outer_ip_ver {
  57. ICE_RX_PTYPE_OUTER_NONE = 0,
  58. ICE_RX_PTYPE_OUTER_IPV4 = 1,
  59. ICE_RX_PTYPE_OUTER_IPV6 = 2,
  60. };
  61. enum ice_rx_ptype_outer_fragmented {
  62. ICE_RX_PTYPE_NOT_FRAG = 0,
  63. ICE_RX_PTYPE_FRAG = 1,
  64. };
  65. enum ice_rx_ptype_tunnel_type {
  66. ICE_RX_PTYPE_TUNNEL_NONE = 0,
  67. ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
  68. ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
  69. ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
  70. ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
  71. };
  72. enum ice_rx_ptype_tunnel_end_prot {
  73. ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
  74. ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
  75. ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
  76. };
  77. enum ice_rx_ptype_inner_prot {
  78. ICE_RX_PTYPE_INNER_PROT_NONE = 0,
  79. ICE_RX_PTYPE_INNER_PROT_UDP = 1,
  80. ICE_RX_PTYPE_INNER_PROT_TCP = 2,
  81. ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
  82. ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
  83. ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
  84. };
  85. enum ice_rx_ptype_payload_layer {
  86. ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
  87. ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
  88. ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
  89. ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
  90. };
  91. /* RX Flex Descriptor
  92. * This descriptor is used instead of the legacy version descriptor when
  93. * ice_rlan_ctx.adv_desc is set
  94. */
  95. union ice_32b_rx_flex_desc {
  96. struct {
  97. __le64 pkt_addr; /* Packet buffer address */
  98. __le64 hdr_addr; /* Header buffer address */
  99. /* bit 0 of hdr_addr is DD bit */
  100. __le64 rsvd1;
  101. __le64 rsvd2;
  102. } read;
  103. struct {
  104. /* Qword 0 */
  105. u8 rxdid; /* descriptor builder profile id */
  106. u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
  107. __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
  108. __le16 pkt_len; /* [15:14] are reserved */
  109. __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
  110. /* sph=[11:11] */
  111. /* ff1/ext=[15:12] */
  112. /* Qword 1 */
  113. __le16 status_error0;
  114. __le16 l2tag1;
  115. __le16 flex_meta0;
  116. __le16 flex_meta1;
  117. /* Qword 2 */
  118. __le16 status_error1;
  119. u8 flex_flags2;
  120. u8 time_stamp_low;
  121. __le16 l2tag2_1st;
  122. __le16 l2tag2_2nd;
  123. /* Qword 3 */
  124. __le16 flex_meta2;
  125. __le16 flex_meta3;
  126. union {
  127. struct {
  128. __le16 flex_meta4;
  129. __le16 flex_meta5;
  130. } flex;
  131. __le32 ts_high;
  132. } flex_ts;
  133. } wb; /* writeback */
  134. };
  135. /* Rx Flex Descriptor NIC Profile
  136. * This descriptor corresponds to RxDID 2 which contains
  137. * metadata fields for RSS, flow id and timestamp info
  138. */
  139. struct ice_32b_rx_flex_desc_nic {
  140. /* Qword 0 */
  141. u8 rxdid;
  142. u8 mir_id_umb_cast;
  143. __le16 ptype_flexi_flags0;
  144. __le16 pkt_len;
  145. __le16 hdr_len_sph_flex_flags1;
  146. /* Qword 1 */
  147. __le16 status_error0;
  148. __le16 l2tag1;
  149. __le32 rss_hash;
  150. /* Qword 2 */
  151. __le16 status_error1;
  152. u8 flexi_flags2;
  153. u8 ts_low;
  154. __le16 l2tag2_1st;
  155. __le16 l2tag2_2nd;
  156. /* Qword 3 */
  157. __le32 flow_id;
  158. union {
  159. struct {
  160. __le16 vlan_id;
  161. __le16 flow_id_ipv6;
  162. } flex;
  163. __le32 ts_high;
  164. } flex_ts;
  165. };
  166. /* Receive Flex Descriptor profile IDs: There are a total
  167. * of 64 profiles where profile IDs 0/1 are for legacy; and
  168. * profiles 2-63 are flex profiles that can be programmed
  169. * with a specific metadata (profile 7 reserved for HW)
  170. */
  171. enum ice_rxdid {
  172. ICE_RXDID_LEGACY_0 = 0,
  173. ICE_RXDID_LEGACY_1 = 1,
  174. ICE_RXDID_FLEX_NIC = 2,
  175. ICE_RXDID_FLEX_NIC_2 = 6,
  176. ICE_RXDID_HW = 7,
  177. ICE_RXDID_LAST = 63,
  178. };
  179. /* Receive Flex Descriptor Rx opcode values */
  180. #define ICE_RX_OPC_MDID 0x01
  181. /* Receive Descriptor MDID values */
  182. enum ice_flex_rx_mdid {
  183. ICE_RX_MDID_FLOW_ID_LOWER = 5,
  184. ICE_RX_MDID_FLOW_ID_HIGH,
  185. ICE_RX_MDID_SRC_VSI = 19,
  186. ICE_RX_MDID_HASH_LOW = 56,
  187. ICE_RX_MDID_HASH_HIGH,
  188. };
  189. /* Rx Flag64 packet flag bits */
  190. enum ice_rx_flg64_bits {
  191. ICE_RXFLG_PKT_DSI = 0,
  192. ICE_RXFLG_EVLAN_x8100 = 15,
  193. ICE_RXFLG_EVLAN_x9100,
  194. ICE_RXFLG_VLAN_x8100,
  195. ICE_RXFLG_TNL_MAC = 22,
  196. ICE_RXFLG_TNL_VLAN,
  197. ICE_RXFLG_PKT_FRG,
  198. ICE_RXFLG_FIN = 32,
  199. ICE_RXFLG_SYN,
  200. ICE_RXFLG_RST,
  201. ICE_RXFLG_TNL0 = 38,
  202. ICE_RXFLG_TNL1,
  203. ICE_RXFLG_TNL2,
  204. ICE_RXFLG_UDP_GRE,
  205. ICE_RXFLG_RSVD = 63
  206. };
  207. /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
  208. #define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
  209. /* for ice_32byte_rx_flex_desc.pkt_length member */
  210. #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
  211. enum ice_rx_flex_desc_status_error_0_bits {
  212. /* Note: These are predefined bit offsets */
  213. ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
  214. ICE_RX_FLEX_DESC_STATUS0_EOF_S,
  215. ICE_RX_FLEX_DESC_STATUS0_HBO_S,
  216. ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
  217. ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
  218. ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
  219. ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
  220. ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
  221. ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
  222. ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
  223. ICE_RX_FLEX_DESC_STATUS0_RXE_S,
  224. ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
  225. ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
  226. ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
  227. ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
  228. ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
  229. ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
  230. };
  231. #define ICE_RXQ_CTX_SIZE_DWORDS 8
  232. #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
  233. /* RLAN Rx queue context data
  234. *
  235. * The sizes of the variables may be larger than needed due to crossing byte
  236. * boundaries. If we do not have the width of the variable set to the correct
  237. * size then we could end up shifting bits off the top of the variable when the
  238. * variable is at the top of a byte and crosses over into the next byte.
  239. */
  240. struct ice_rlan_ctx {
  241. u16 head;
  242. u16 cpuid; /* bigger than needed, see above for reason */
  243. #define ICE_RLAN_BASE_S 7
  244. u64 base;
  245. u16 qlen;
  246. #define ICE_RLAN_CTX_DBUF_S 7
  247. u16 dbuf; /* bigger than needed, see above for reason */
  248. #define ICE_RLAN_CTX_HBUF_S 6
  249. u16 hbuf; /* bigger than needed, see above for reason */
  250. u8 dtype;
  251. u8 dsize;
  252. u8 crcstrip;
  253. u8 l2tsel;
  254. u8 hsplit_0;
  255. u8 hsplit_1;
  256. u8 showiv;
  257. u32 rxmax; /* bigger than needed, see above for reason */
  258. u8 tphrdesc_ena;
  259. u8 tphwdesc_ena;
  260. u8 tphdata_ena;
  261. u8 tphhead_ena;
  262. u16 lrxqthresh; /* bigger than needed, see above for reason */
  263. };
  264. struct ice_ctx_ele {
  265. u16 offset;
  266. u16 size_of;
  267. u16 width;
  268. u16 lsb;
  269. };
  270. #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
  271. .offset = offsetof(struct _struct, _ele), \
  272. .size_of = FIELD_SIZEOF(struct _struct, _ele), \
  273. .width = _width, \
  274. .lsb = _lsb, \
  275. }
  276. /* for hsplit_0 field of Rx RLAN context */
  277. enum ice_rlan_ctx_rx_hsplit_0 {
  278. ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
  279. ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1,
  280. ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2,
  281. ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
  282. ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8,
  283. };
  284. /* for hsplit_1 field of Rx RLAN context */
  285. enum ice_rlan_ctx_rx_hsplit_1 {
  286. ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0,
  287. ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1,
  288. ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
  289. };
  290. /* TX Descriptor */
  291. struct ice_tx_desc {
  292. __le64 buf_addr; /* Address of descriptor's data buf */
  293. __le64 cmd_type_offset_bsz;
  294. };
  295. enum ice_tx_desc_dtype_value {
  296. ICE_TX_DESC_DTYPE_DATA = 0x0,
  297. ICE_TX_DESC_DTYPE_CTX = 0x1,
  298. /* DESC_DONE - HW has completed write-back of descriptor */
  299. ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
  300. };
  301. #define ICE_TXD_QW1_CMD_S 4
  302. #define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
  303. enum ice_tx_desc_cmd_bits {
  304. ICE_TX_DESC_CMD_EOP = 0x0001,
  305. ICE_TX_DESC_CMD_RS = 0x0002,
  306. ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
  307. ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
  308. ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
  309. ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
  310. ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
  311. ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
  312. };
  313. #define ICE_TXD_QW1_OFFSET_S 16
  314. #define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
  315. enum ice_tx_desc_len_fields {
  316. /* Note: These are predefined bit offsets */
  317. ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
  318. ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
  319. ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
  320. };
  321. #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
  322. #define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
  323. #define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
  324. /* Tx descriptor field limits in bytes */
  325. #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
  326. ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
  327. #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
  328. ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
  329. #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
  330. ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
  331. #define ICE_TXD_QW1_TX_BUF_SZ_S 34
  332. #define ICE_TXD_QW1_L2TAG1_S 48
  333. /* Context descriptors */
  334. struct ice_tx_ctx_desc {
  335. __le32 tunneling_params;
  336. __le16 l2tag2;
  337. __le16 rsvd;
  338. __le64 qw1;
  339. };
  340. #define ICE_TXD_CTX_QW1_CMD_S 4
  341. #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
  342. #define ICE_TXD_CTX_QW1_TSO_LEN_S 30
  343. #define ICE_TXD_CTX_QW1_TSO_LEN_M \
  344. (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
  345. #define ICE_TXD_CTX_QW1_MSS_S 50
  346. enum ice_tx_ctx_desc_cmd_bits {
  347. ICE_TX_CTX_DESC_TSO = 0x01,
  348. ICE_TX_CTX_DESC_TSYN = 0x02,
  349. ICE_TX_CTX_DESC_IL2TAG2 = 0x04,
  350. ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
  351. ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
  352. ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
  353. ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
  354. ICE_TX_CTX_DESC_SWTCH_VSI = 0x30,
  355. ICE_TX_CTX_DESC_RESERVED = 0x40
  356. };
  357. #define ICE_LAN_TXQ_MAX_QGRPS 127
  358. #define ICE_LAN_TXQ_MAX_QDIS 1023
  359. /* Tx queue context data
  360. *
  361. * The sizes of the variables may be larger than needed due to crossing byte
  362. * boundaries. If we do not have the width of the variable set to the correct
  363. * size then we could end up shifting bits off the top of the variable when the
  364. * variable is at the top of a byte and crosses over into the next byte.
  365. */
  366. struct ice_tlan_ctx {
  367. #define ICE_TLAN_CTX_BASE_S 7
  368. u64 base; /* base is defined in 128-byte units */
  369. u8 port_num;
  370. u16 cgd_num; /* bigger than needed, see above for reason */
  371. u8 pf_num;
  372. u16 vmvf_num;
  373. u8 vmvf_type;
  374. #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
  375. #define ICE_TLAN_CTX_VMVF_TYPE_PF 2
  376. u16 src_vsi;
  377. u8 tsyn_ena;
  378. u8 alt_vlan;
  379. u16 cpuid; /* bigger than needed, see above for reason */
  380. u8 wb_mode;
  381. u8 tphrd_desc;
  382. u8 tphrd;
  383. u8 tphwr_desc;
  384. u16 cmpq_id;
  385. u16 qnum_in_func;
  386. u8 itr_notification_mode;
  387. u8 adjust_prof_id;
  388. u32 qlen; /* bigger than needed, see above for reason */
  389. u8 quanta_prof_idx;
  390. u8 tso_ena;
  391. u16 tso_qnum;
  392. u8 legacy_int;
  393. u8 drop_ena;
  394. u8 cache_prof_idx;
  395. u8 pkt_shaper_prof_idx;
  396. u8 int_q_state; /* width not needed - internal do not write */
  397. };
  398. /* macro to make the table lines short */
  399. #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
  400. { PTYPE, \
  401. 1, \
  402. ICE_RX_PTYPE_OUTER_##OUTER_IP, \
  403. ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
  404. ICE_RX_PTYPE_##OUTER_FRAG, \
  405. ICE_RX_PTYPE_TUNNEL_##T, \
  406. ICE_RX_PTYPE_TUNNEL_END_##TE, \
  407. ICE_RX_PTYPE_##TEF, \
  408. ICE_RX_PTYPE_INNER_PROT_##I, \
  409. ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
  410. #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
  411. /* shorter macros makes the table fit but are terse */
  412. #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
  413. /* Lookup table mapping the HW PTYPE to the bit field for decoding */
  414. static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
  415. /* L2 Packet types */
  416. ICE_PTT_UNUSED_ENTRY(0),
  417. ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
  418. ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
  419. };
  420. static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
  421. {
  422. return ice_ptype_lkup[ptype];
  423. }
  424. #endif /* _ICE_LAN_TX_RX_H_ */