bnxt.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2015 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #ifndef BNXT_H
  10. #define BNXT_H
  11. #define DRV_MODULE_NAME "bnxt_en"
  12. #define DRV_MODULE_VERSION "1.0.0"
  13. #define DRV_VER_MAJ 1
  14. #define DRV_VER_MIN 0
  15. #define DRV_VER_UPD 0
  16. struct tx_bd {
  17. __le32 tx_bd_len_flags_type;
  18. #define TX_BD_TYPE (0x3f << 0)
  19. #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0)
  20. #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0)
  21. #define TX_BD_FLAGS_PACKET_END (1 << 6)
  22. #define TX_BD_FLAGS_NO_CMPL (1 << 7)
  23. #define TX_BD_FLAGS_BD_CNT (0x1f << 8)
  24. #define TX_BD_FLAGS_BD_CNT_SHIFT 8
  25. #define TX_BD_FLAGS_LHINT (3 << 13)
  26. #define TX_BD_FLAGS_LHINT_SHIFT 13
  27. #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13)
  28. #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13)
  29. #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13)
  30. #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13)
  31. #define TX_BD_FLAGS_COAL_NOW (1 << 15)
  32. #define TX_BD_LEN (0xffff << 16)
  33. #define TX_BD_LEN_SHIFT 16
  34. u32 tx_bd_opaque;
  35. __le64 tx_bd_haddr;
  36. } __packed;
  37. struct tx_bd_ext {
  38. __le32 tx_bd_hsize_lflags;
  39. #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0)
  40. #define TX_BD_FLAGS_IP_CKSUM (1 << 1)
  41. #define TX_BD_FLAGS_NO_CRC (1 << 2)
  42. #define TX_BD_FLAGS_STAMP (1 << 3)
  43. #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4)
  44. #define TX_BD_FLAGS_LSO (1 << 5)
  45. #define TX_BD_FLAGS_IPID_FMT (1 << 6)
  46. #define TX_BD_FLAGS_T_IPID (1 << 7)
  47. #define TX_BD_HSIZE (0xff << 16)
  48. #define TX_BD_HSIZE_SHIFT 16
  49. __le32 tx_bd_mss;
  50. __le32 tx_bd_cfa_action;
  51. #define TX_BD_CFA_ACTION (0xffff << 16)
  52. #define TX_BD_CFA_ACTION_SHIFT 16
  53. __le32 tx_bd_cfa_meta;
  54. #define TX_BD_CFA_META_MASK 0xfffffff
  55. #define TX_BD_CFA_META_VID_MASK 0xfff
  56. #define TX_BD_CFA_META_PRI_MASK (0xf << 12)
  57. #define TX_BD_CFA_META_PRI_SHIFT 12
  58. #define TX_BD_CFA_META_TPID_MASK (3 << 16)
  59. #define TX_BD_CFA_META_TPID_SHIFT 16
  60. #define TX_BD_CFA_META_KEY (0xf << 28)
  61. #define TX_BD_CFA_META_KEY_SHIFT 28
  62. #define TX_BD_CFA_META_KEY_VLAN (1 << 28)
  63. };
  64. struct rx_bd {
  65. __le32 rx_bd_len_flags_type;
  66. #define RX_BD_TYPE (0x3f << 0)
  67. #define RX_BD_TYPE_RX_PACKET_BD 0x4
  68. #define RX_BD_TYPE_RX_BUFFER_BD 0x5
  69. #define RX_BD_TYPE_RX_AGG_BD 0x6
  70. #define RX_BD_TYPE_16B_BD_SIZE (0 << 4)
  71. #define RX_BD_TYPE_32B_BD_SIZE (1 << 4)
  72. #define RX_BD_TYPE_48B_BD_SIZE (2 << 4)
  73. #define RX_BD_TYPE_64B_BD_SIZE (3 << 4)
  74. #define RX_BD_FLAGS_SOP (1 << 6)
  75. #define RX_BD_FLAGS_EOP (1 << 7)
  76. #define RX_BD_FLAGS_BUFFERS (3 << 8)
  77. #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8)
  78. #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8)
  79. #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8)
  80. #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8)
  81. #define RX_BD_LEN (0xffff << 16)
  82. #define RX_BD_LEN_SHIFT 16
  83. u32 rx_bd_opaque;
  84. __le64 rx_bd_haddr;
  85. };
  86. struct tx_cmp {
  87. __le32 tx_cmp_flags_type;
  88. #define CMP_TYPE (0x3f << 0)
  89. #define CMP_TYPE_TX_L2_CMP 0
  90. #define CMP_TYPE_RX_L2_CMP 17
  91. #define CMP_TYPE_RX_AGG_CMP 18
  92. #define CMP_TYPE_RX_L2_TPA_START_CMP 19
  93. #define CMP_TYPE_RX_L2_TPA_END_CMP 21
  94. #define CMP_TYPE_STATUS_CMP 32
  95. #define CMP_TYPE_REMOTE_DRIVER_REQ 34
  96. #define CMP_TYPE_REMOTE_DRIVER_RESP 36
  97. #define CMP_TYPE_ERROR_STATUS 48
  98. #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
  99. #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
  100. #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
  101. #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
  102. #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
  103. #define TX_CMP_FLAGS_ERROR (1 << 6)
  104. #define TX_CMP_FLAGS_PUSH (1 << 7)
  105. u32 tx_cmp_opaque;
  106. __le32 tx_cmp_errors_v;
  107. #define TX_CMP_V (1 << 0)
  108. #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1)
  109. #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0
  110. #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2
  111. #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4
  112. #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5
  113. #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4)
  114. #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5)
  115. #define TX_CMP_ERRORS_DMA_ERROR (1 << 6)
  116. #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7)
  117. __le32 tx_cmp_unsed_3;
  118. };
  119. struct rx_cmp {
  120. __le32 rx_cmp_len_flags_type;
  121. #define RX_CMP_CMP_TYPE (0x3f << 0)
  122. #define RX_CMP_FLAGS_ERROR (1 << 6)
  123. #define RX_CMP_FLAGS_PLACEMENT (7 << 7)
  124. #define RX_CMP_FLAGS_RSS_VALID (1 << 10)
  125. #define RX_CMP_FLAGS_UNUSED (1 << 11)
  126. #define RX_CMP_FLAGS_ITYPES_SHIFT 12
  127. #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12)
  128. #define RX_CMP_FLAGS_ITYPE_IP (1 << 12)
  129. #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12)
  130. #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12)
  131. #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12)
  132. #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12)
  133. #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12)
  134. #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12)
  135. #define RX_CMP_LEN (0xffff << 16)
  136. #define RX_CMP_LEN_SHIFT 16
  137. u32 rx_cmp_opaque;
  138. __le32 rx_cmp_misc_v1;
  139. #define RX_CMP_V1 (1 << 0)
  140. #define RX_CMP_AGG_BUFS (0x1f << 1)
  141. #define RX_CMP_AGG_BUFS_SHIFT 1
  142. #define RX_CMP_RSS_HASH_TYPE (0x7f << 9)
  143. #define RX_CMP_RSS_HASH_TYPE_SHIFT 9
  144. #define RX_CMP_PAYLOAD_OFFSET (0xff << 16)
  145. #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16
  146. __le32 rx_cmp_rss_hash;
  147. };
  148. #define RX_CMP_HASH_VALID(rxcmp) \
  149. ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
  150. #define RSS_PROFILE_ID_MASK 0x1f
  151. #define RX_CMP_HASH_TYPE(rxcmp) \
  152. (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
  153. RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
  154. struct rx_cmp_ext {
  155. __le32 rx_cmp_flags2;
  156. #define RX_CMP_FLAGS2_IP_CS_CALC 0x1
  157. #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
  158. #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
  159. #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
  160. #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
  161. __le32 rx_cmp_meta_data;
  162. #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
  163. #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
  164. #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
  165. __le32 rx_cmp_cfa_code_errors_v2;
  166. #define RX_CMP_V (1 << 0)
  167. #define RX_CMPL_ERRORS_MASK (0x7fff << 1)
  168. #define RX_CMPL_ERRORS_SFT 1
  169. #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
  170. #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
  171. #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1)
  172. #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
  173. #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
  174. #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4)
  175. #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5)
  176. #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6)
  177. #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7)
  178. #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8)
  179. #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9)
  180. #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9)
  181. #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9)
  182. #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9)
  183. #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9)
  184. #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9)
  185. #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9)
  186. #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9)
  187. #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12)
  188. #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12)
  189. #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12)
  190. #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12)
  191. #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12)
  192. #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12)
  193. #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12)
  194. #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12)
  195. #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
  196. #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12)
  197. #define RX_CMPL_CFA_CODE_MASK (0xffff << 16)
  198. #define RX_CMPL_CFA_CODE_SFT 16
  199. __le32 rx_cmp_unused3;
  200. };
  201. #define RX_CMP_L2_ERRORS \
  202. cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
  203. #define RX_CMP_L4_CS_BITS \
  204. (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
  205. #define RX_CMP_L4_CS_ERR_BITS \
  206. (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
  207. #define RX_CMP_L4_CS_OK(rxcmp1) \
  208. (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \
  209. !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
  210. #define RX_CMP_ENCAP(rxcmp1) \
  211. ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
  212. RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
  213. struct rx_agg_cmp {
  214. __le32 rx_agg_cmp_len_flags_type;
  215. #define RX_AGG_CMP_TYPE (0x3f << 0)
  216. #define RX_AGG_CMP_LEN (0xffff << 16)
  217. #define RX_AGG_CMP_LEN_SHIFT 16
  218. u32 rx_agg_cmp_opaque;
  219. __le32 rx_agg_cmp_v;
  220. #define RX_AGG_CMP_V (1 << 0)
  221. __le32 rx_agg_cmp_unused;
  222. };
  223. struct rx_tpa_start_cmp {
  224. __le32 rx_tpa_start_cmp_len_flags_type;
  225. #define RX_TPA_START_CMP_TYPE (0x3f << 0)
  226. #define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
  227. #define RX_TPA_START_CMP_FLAGS_SHIFT 6
  228. #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
  229. #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
  230. #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
  231. #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
  232. #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
  233. #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
  234. #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
  235. #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
  236. #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
  237. #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
  238. #define RX_TPA_START_CMP_LEN (0xffff << 16)
  239. #define RX_TPA_START_CMP_LEN_SHIFT 16
  240. u32 rx_tpa_start_cmp_opaque;
  241. __le32 rx_tpa_start_cmp_misc_v1;
  242. #define RX_TPA_START_CMP_V1 (0x1 << 0)
  243. #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9)
  244. #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
  245. #define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
  246. #define RX_TPA_START_CMP_AGG_ID_SHIFT 25
  247. __le32 rx_tpa_start_cmp_rss_hash;
  248. };
  249. #define TPA_START_HASH_VALID(rx_tpa_start) \
  250. ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
  251. cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
  252. #define TPA_START_HASH_TYPE(rx_tpa_start) \
  253. (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
  254. RX_TPA_START_CMP_RSS_HASH_TYPE) >> \
  255. RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
  256. #define TPA_START_AGG_ID(rx_tpa_start) \
  257. ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
  258. RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
  259. struct rx_tpa_start_cmp_ext {
  260. __le32 rx_tpa_start_cmp_flags2;
  261. #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
  262. #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
  263. #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
  264. #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
  265. __le32 rx_tpa_start_cmp_metadata;
  266. __le32 rx_tpa_start_cmp_cfa_code_v2;
  267. #define RX_TPA_START_CMP_V2 (0x1 << 0)
  268. #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
  269. #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
  270. __le32 rx_tpa_start_cmp_unused5;
  271. };
  272. struct rx_tpa_end_cmp {
  273. __le32 rx_tpa_end_cmp_len_flags_type;
  274. #define RX_TPA_END_CMP_TYPE (0x3f << 0)
  275. #define RX_TPA_END_CMP_FLAGS (0x3ff << 6)
  276. #define RX_TPA_END_CMP_FLAGS_SHIFT 6
  277. #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7)
  278. #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7
  279. #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
  280. #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7)
  281. #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
  282. #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
  283. #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10)
  284. #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12)
  285. #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12
  286. #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
  287. #define RX_TPA_END_CMP_LEN (0xffff << 16)
  288. #define RX_TPA_END_CMP_LEN_SHIFT 16
  289. u32 rx_tpa_end_cmp_opaque;
  290. __le32 rx_tpa_end_cmp_misc_v1;
  291. #define RX_TPA_END_CMP_V1 (0x1 << 0)
  292. #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1)
  293. #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1
  294. #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8)
  295. #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8
  296. #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16)
  297. #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
  298. #define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
  299. #define RX_TPA_END_CMP_AGG_ID_SHIFT 25
  300. __le32 rx_tpa_end_cmp_tsdelta;
  301. #define RX_TPA_END_GRO_TS (0x1 << 31)
  302. };
  303. #define TPA_END_AGG_ID(rx_tpa_end) \
  304. ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
  305. RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
  306. #define TPA_END_TPA_SEGS(rx_tpa_end) \
  307. ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
  308. RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
  309. #define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \
  310. cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \
  311. RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
  312. #define TPA_END_GRO(rx_tpa_end) \
  313. ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \
  314. RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
  315. #define TPA_END_GRO_TS(rx_tpa_end) \
  316. ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
  317. struct rx_tpa_end_cmp_ext {
  318. __le32 rx_tpa_end_cmp_dup_acks;
  319. #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
  320. __le32 rx_tpa_end_cmp_seg_len;
  321. #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
  322. __le32 rx_tpa_end_cmp_errors_v2;
  323. #define RX_TPA_END_CMP_V2 (0x1 << 0)
  324. #define RX_TPA_END_CMP_ERRORS (0x7fff << 1)
  325. #define RX_TPA_END_CMPL_ERRORS_SHIFT 1
  326. u32 rx_tpa_end_cmp_start_opaque;
  327. };
  328. #define DB_IDX_MASK 0xffffff
  329. #define DB_IDX_VALID (0x1 << 26)
  330. #define DB_IRQ_DIS (0x1 << 27)
  331. #define DB_KEY_TX (0x0 << 28)
  332. #define DB_KEY_RX (0x1 << 28)
  333. #define DB_KEY_CP (0x2 << 28)
  334. #define DB_KEY_ST (0x3 << 28)
  335. #define DB_KEY_TX_PUSH (0x4 << 28)
  336. #define DB_LONG_TX_PUSH (0x2 << 24)
  337. #define INVALID_HW_RING_ID ((u16)-1)
  338. #define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
  339. #define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
  340. #define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
  341. #define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
  342. /* The hardware supports certain page sizes. Use the supported page sizes
  343. * to allocate the rings.
  344. */
  345. #if (PAGE_SHIFT < 12)
  346. #define BNXT_PAGE_SHIFT 12
  347. #elif (PAGE_SHIFT <= 13)
  348. #define BNXT_PAGE_SHIFT PAGE_SHIFT
  349. #elif (PAGE_SHIFT < 16)
  350. #define BNXT_PAGE_SHIFT 13
  351. #else
  352. #define BNXT_PAGE_SHIFT 16
  353. #endif
  354. #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
  355. #define BNXT_MIN_PKT_SIZE 45
  356. #define BNXT_NUM_TESTS(bp) 0
  357. #define BNXT_DEFAULT_RX_RING_SIZE 511
  358. #define BNXT_DEFAULT_TX_RING_SIZE 511
  359. #define MAX_TPA 64
  360. #define MAX_RX_PAGES 8
  361. #define MAX_RX_AGG_PAGES 32
  362. #define MAX_TX_PAGES 8
  363. #define MAX_CP_PAGES 64
  364. #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
  365. #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
  366. #define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
  367. #define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
  368. #define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
  369. #define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
  370. #define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
  371. #define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
  372. #define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
  373. #define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1)
  374. #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
  375. #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
  376. #define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
  377. #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
  378. #define TX_RING(x) (((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
  379. #define TX_IDX(x) ((x) & (TX_DESC_CNT - 1))
  380. #define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
  381. #define CP_IDX(x) ((x) & (CP_DESC_CNT - 1))
  382. #define TX_CMP_VALID(txcmp, raw_cons) \
  383. (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \
  384. !((raw_cons) & bp->cp_bit))
  385. #define RX_CMP_VALID(rxcmp1, raw_cons) \
  386. (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
  387. !((raw_cons) & bp->cp_bit))
  388. #define RX_AGG_CMP_VALID(agg, raw_cons) \
  389. (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \
  390. !((raw_cons) & bp->cp_bit))
  391. #define TX_CMP_TYPE(txcmp) \
  392. (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
  393. #define RX_CMP_TYPE(rxcmp) \
  394. (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
  395. #define NEXT_RX(idx) (((idx) + 1) & bp->rx_ring_mask)
  396. #define NEXT_RX_AGG(idx) (((idx) + 1) & bp->rx_agg_ring_mask)
  397. #define NEXT_TX(idx) (((idx) + 1) & bp->tx_ring_mask)
  398. #define ADV_RAW_CMP(idx, n) ((idx) + (n))
  399. #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
  400. #define RING_CMP(idx) ((idx) & bp->cp_ring_mask)
  401. #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
  402. #define DFLT_HWRM_CMD_TIMEOUT 500
  403. #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
  404. #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
  405. #define HWRM_RESP_ERR_CODE_MASK 0xffff
  406. #define HWRM_RESP_LEN_OFFSET 4
  407. #define HWRM_RESP_LEN_MASK 0xffff0000
  408. #define HWRM_RESP_LEN_SFT 16
  409. #define HWRM_RESP_VALID_MASK 0xff000000
  410. #define HWRM_SEQ_ID_INVALID -1
  411. #define BNXT_HWRM_REQ_MAX_SIZE 128
  412. #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
  413. BNXT_HWRM_REQ_MAX_SIZE)
  414. struct bnxt_sw_tx_bd {
  415. struct sk_buff *skb;
  416. DEFINE_DMA_UNMAP_ADDR(mapping);
  417. u8 is_gso;
  418. u8 is_push;
  419. unsigned short nr_frags;
  420. };
  421. struct bnxt_sw_rx_bd {
  422. u8 *data;
  423. DEFINE_DMA_UNMAP_ADDR(mapping);
  424. };
  425. struct bnxt_sw_rx_agg_bd {
  426. struct page *page;
  427. dma_addr_t mapping;
  428. };
  429. struct bnxt_ring_struct {
  430. int nr_pages;
  431. int page_size;
  432. void **pg_arr;
  433. dma_addr_t *dma_arr;
  434. __le64 *pg_tbl;
  435. dma_addr_t pg_tbl_map;
  436. int vmem_size;
  437. void **vmem;
  438. u16 fw_ring_id; /* Ring id filled by Chimp FW */
  439. u8 queue_id;
  440. };
  441. struct tx_push_bd {
  442. __le32 doorbell;
  443. __le32 tx_bd_len_flags_type;
  444. u32 tx_bd_opaque;
  445. struct tx_bd_ext txbd2;
  446. };
  447. struct tx_push_buffer {
  448. struct tx_push_bd push_bd;
  449. u32 data[25];
  450. };
  451. struct bnxt_tx_ring_info {
  452. struct bnxt_napi *bnapi;
  453. u16 tx_prod;
  454. u16 tx_cons;
  455. void __iomem *tx_doorbell;
  456. struct tx_bd *tx_desc_ring[MAX_TX_PAGES];
  457. struct bnxt_sw_tx_bd *tx_buf_ring;
  458. dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
  459. struct tx_push_buffer *tx_push;
  460. dma_addr_t tx_push_mapping;
  461. __le64 data_mapping;
  462. #define BNXT_DEV_STATE_CLOSING 0x1
  463. u32 dev_state;
  464. struct bnxt_ring_struct tx_ring_struct;
  465. };
  466. struct bnxt_tpa_info {
  467. u8 *data;
  468. dma_addr_t mapping;
  469. u16 len;
  470. unsigned short gso_type;
  471. u32 flags2;
  472. u32 metadata;
  473. enum pkt_hash_types hash_type;
  474. u32 rss_hash;
  475. };
  476. struct bnxt_rx_ring_info {
  477. struct bnxt_napi *bnapi;
  478. u16 rx_prod;
  479. u16 rx_agg_prod;
  480. u16 rx_sw_agg_prod;
  481. void __iomem *rx_doorbell;
  482. void __iomem *rx_agg_doorbell;
  483. struct rx_bd *rx_desc_ring[MAX_RX_PAGES];
  484. struct bnxt_sw_rx_bd *rx_buf_ring;
  485. struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES];
  486. struct bnxt_sw_rx_agg_bd *rx_agg_ring;
  487. unsigned long *rx_agg_bmap;
  488. u16 rx_agg_bmap_size;
  489. dma_addr_t rx_desc_mapping[MAX_RX_PAGES];
  490. dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
  491. struct bnxt_tpa_info *rx_tpa;
  492. struct bnxt_ring_struct rx_ring_struct;
  493. struct bnxt_ring_struct rx_agg_ring_struct;
  494. };
  495. struct bnxt_cp_ring_info {
  496. u32 cp_raw_cons;
  497. void __iomem *cp_doorbell;
  498. struct tx_cmp *cp_desc_ring[MAX_CP_PAGES];
  499. dma_addr_t cp_desc_mapping[MAX_CP_PAGES];
  500. struct ctx_hw_stats *hw_stats;
  501. dma_addr_t hw_stats_map;
  502. u32 hw_stats_ctx_id;
  503. u64 rx_l4_csum_errors;
  504. struct bnxt_ring_struct cp_ring_struct;
  505. };
  506. struct bnxt_napi {
  507. struct napi_struct napi;
  508. struct bnxt *bp;
  509. int index;
  510. struct bnxt_cp_ring_info cp_ring;
  511. struct bnxt_rx_ring_info *rx_ring;
  512. struct bnxt_tx_ring_info *tx_ring;
  513. #ifdef CONFIG_NET_RX_BUSY_POLL
  514. atomic_t poll_state;
  515. #endif
  516. };
  517. #ifdef CONFIG_NET_RX_BUSY_POLL
  518. enum bnxt_poll_state_t {
  519. BNXT_STATE_IDLE = 0,
  520. BNXT_STATE_NAPI,
  521. BNXT_STATE_POLL,
  522. BNXT_STATE_DISABLE,
  523. };
  524. #endif
  525. struct bnxt_irq {
  526. irq_handler_t handler;
  527. unsigned int vector;
  528. u8 requested;
  529. char name[IFNAMSIZ + 2];
  530. };
  531. #define HWRM_RING_ALLOC_TX 0x1
  532. #define HWRM_RING_ALLOC_RX 0x2
  533. #define HWRM_RING_ALLOC_AGG 0x4
  534. #define HWRM_RING_ALLOC_CMPL 0x8
  535. #define INVALID_STATS_CTX_ID -1
  536. struct bnxt_ring_grp_info {
  537. u16 fw_stats_ctx;
  538. u16 fw_grp_id;
  539. u16 rx_fw_ring_id;
  540. u16 agg_fw_ring_id;
  541. u16 cp_fw_ring_id;
  542. };
  543. struct bnxt_vnic_info {
  544. u16 fw_vnic_id; /* returned by Chimp during alloc */
  545. u16 fw_rss_cos_lb_ctx;
  546. u16 fw_l2_ctx_id;
  547. #define BNXT_MAX_UC_ADDRS 4
  548. __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
  549. /* index 0 always dev_addr */
  550. u16 uc_filter_count;
  551. u8 *uc_list;
  552. u16 *fw_grp_ids;
  553. u16 hash_type;
  554. dma_addr_t rss_table_dma_addr;
  555. __le16 *rss_table;
  556. dma_addr_t rss_hash_key_dma_addr;
  557. u64 *rss_hash_key;
  558. u32 rx_mask;
  559. u8 *mc_list;
  560. int mc_list_size;
  561. int mc_list_count;
  562. dma_addr_t mc_list_mapping;
  563. #define BNXT_MAX_MC_ADDRS 16
  564. u32 flags;
  565. #define BNXT_VNIC_RSS_FLAG 1
  566. #define BNXT_VNIC_RFS_FLAG 2
  567. #define BNXT_VNIC_MCAST_FLAG 4
  568. #define BNXT_VNIC_UCAST_FLAG 8
  569. };
  570. #if defined(CONFIG_BNXT_SRIOV)
  571. struct bnxt_vf_info {
  572. u16 fw_fid;
  573. u8 mac_addr[ETH_ALEN];
  574. u16 max_rsscos_ctxs;
  575. u16 max_cp_rings;
  576. u16 max_tx_rings;
  577. u16 max_rx_rings;
  578. u16 max_hw_ring_grps;
  579. u16 max_l2_ctxs;
  580. u16 max_irqs;
  581. u16 max_vnics;
  582. u16 max_stat_ctxs;
  583. u16 vlan;
  584. u32 flags;
  585. #define BNXT_VF_QOS 0x1
  586. #define BNXT_VF_SPOOFCHK 0x2
  587. #define BNXT_VF_LINK_FORCED 0x4
  588. #define BNXT_VF_LINK_UP 0x8
  589. u32 func_flags; /* func cfg flags */
  590. u32 min_tx_rate;
  591. u32 max_tx_rate;
  592. void *hwrm_cmd_req_addr;
  593. dma_addr_t hwrm_cmd_req_dma_addr;
  594. };
  595. #endif
  596. struct bnxt_pf_info {
  597. #define BNXT_FIRST_PF_FID 1
  598. #define BNXT_FIRST_VF_FID 128
  599. u32 fw_fid;
  600. u8 port_id;
  601. u8 mac_addr[ETH_ALEN];
  602. u16 max_rsscos_ctxs;
  603. u16 max_cp_rings;
  604. u16 max_tx_rings; /* HW assigned max tx rings for this PF */
  605. u16 max_rx_rings; /* HW assigned max rx rings for this PF */
  606. u16 max_hw_ring_grps;
  607. u16 max_irqs;
  608. u16 max_l2_ctxs;
  609. u16 max_vnics;
  610. u16 max_stat_ctxs;
  611. u32 first_vf_id;
  612. u16 active_vfs;
  613. u16 max_vfs;
  614. u32 max_encap_records;
  615. u32 max_decap_records;
  616. u32 max_tx_em_flows;
  617. u32 max_tx_wm_flows;
  618. u32 max_rx_em_flows;
  619. u32 max_rx_wm_flows;
  620. unsigned long *vf_event_bmap;
  621. u16 hwrm_cmd_req_pages;
  622. void *hwrm_cmd_req_addr[4];
  623. dma_addr_t hwrm_cmd_req_dma_addr[4];
  624. struct bnxt_vf_info *vf;
  625. };
  626. struct bnxt_ntuple_filter {
  627. struct hlist_node hash;
  628. u8 src_mac_addr[ETH_ALEN];
  629. struct flow_keys fkeys;
  630. __le64 filter_id;
  631. u16 sw_id;
  632. u16 rxq;
  633. u32 flow_id;
  634. unsigned long state;
  635. #define BNXT_FLTR_VALID 0
  636. #define BNXT_FLTR_UPDATE 1
  637. };
  638. struct bnxt_link_info {
  639. u8 media_type;
  640. u8 transceiver;
  641. u8 phy_addr;
  642. u8 phy_link_status;
  643. #define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK
  644. #define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
  645. #define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
  646. u8 wire_speed;
  647. u8 loop_back;
  648. u8 link_up;
  649. u8 duplex;
  650. #define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF
  651. #define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL
  652. u8 pause;
  653. #define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX
  654. #define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
  655. #define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \
  656. PORT_PHY_QCFG_RESP_PAUSE_TX)
  657. u8 lp_pause;
  658. u8 auto_pause_setting;
  659. u8 force_pause_setting;
  660. u8 duplex_setting;
  661. u8 auto_mode;
  662. #define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \
  663. (mode) <= BNXT_LINK_AUTO_MSK)
  664. #define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
  665. #define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
  666. #define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
  667. #define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
  668. #define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
  669. #define PHY_VER_LEN 3
  670. u8 phy_ver[PHY_VER_LEN];
  671. u16 link_speed;
  672. #define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
  673. #define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
  674. #define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
  675. #define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
  676. #define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
  677. #define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
  678. #define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
  679. #define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
  680. #define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
  681. u16 support_speeds;
  682. u16 auto_link_speeds;
  683. #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
  684. #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
  685. #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
  686. #define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
  687. #define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
  688. #define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
  689. #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
  690. #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
  691. #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
  692. u16 lp_auto_link_speeds;
  693. u16 auto_link_speed;
  694. u16 force_link_speed;
  695. u32 preemphasis;
  696. /* copy of requested setting from ethtool cmd */
  697. u8 autoneg;
  698. #define BNXT_AUTONEG_SPEED 1
  699. #define BNXT_AUTONEG_FLOW_CTRL 2
  700. u8 req_duplex;
  701. u8 req_flow_ctrl;
  702. u16 req_link_speed;
  703. u32 advertising;
  704. bool force_link_chng;
  705. /* a copy of phy_qcfg output used to report link
  706. * info to VF
  707. */
  708. struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
  709. };
  710. #define BNXT_MAX_QUEUE 8
  711. struct bnxt_queue_info {
  712. u8 queue_id;
  713. u8 queue_profile;
  714. };
  715. #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
  716. #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
  717. #define BNXT_CAG_REG_BASE 0x300000
  718. struct bnxt {
  719. void __iomem *bar0;
  720. void __iomem *bar1;
  721. void __iomem *bar2;
  722. u32 reg_base;
  723. struct net_device *dev;
  724. struct pci_dev *pdev;
  725. atomic_t intr_sem;
  726. u32 flags;
  727. #define BNXT_FLAG_DCB_ENABLED 0x1
  728. #define BNXT_FLAG_VF 0x2
  729. #define BNXT_FLAG_LRO 0x4
  730. #ifdef CONFIG_INET
  731. #define BNXT_FLAG_GRO 0x8
  732. #else
  733. /* Cannot support hardware GRO if CONFIG_INET is not set */
  734. #define BNXT_FLAG_GRO 0x0
  735. #endif
  736. #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
  737. #define BNXT_FLAG_JUMBO 0x10
  738. #define BNXT_FLAG_STRIP_VLAN 0x20
  739. #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
  740. BNXT_FLAG_LRO)
  741. #define BNXT_FLAG_USING_MSIX 0x40
  742. #define BNXT_FLAG_MSIX_CAP 0x80
  743. #define BNXT_FLAG_RFS 0x100
  744. #define BNXT_FLAG_SHARED_RINGS 0x200
  745. #define BNXT_FLAG_PORT_STATS 0x400
  746. #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
  747. BNXT_FLAG_RFS | \
  748. BNXT_FLAG_STRIP_VLAN)
  749. #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
  750. #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
  751. struct bnxt_napi **bnapi;
  752. struct bnxt_rx_ring_info *rx_ring;
  753. struct bnxt_tx_ring_info *tx_ring;
  754. u32 rx_buf_size;
  755. u32 rx_buf_use_size; /* useable size */
  756. u32 rx_ring_size;
  757. u32 rx_agg_ring_size;
  758. u32 rx_copy_thresh;
  759. u32 rx_ring_mask;
  760. u32 rx_agg_ring_mask;
  761. int rx_nr_pages;
  762. int rx_agg_nr_pages;
  763. int rx_nr_rings;
  764. int rsscos_nr_ctxs;
  765. u32 tx_ring_size;
  766. u32 tx_ring_mask;
  767. int tx_nr_pages;
  768. int tx_nr_rings;
  769. int tx_nr_rings_per_tc;
  770. int tx_wake_thresh;
  771. int tx_push_thresh;
  772. int tx_push_size;
  773. u32 cp_ring_size;
  774. u32 cp_ring_mask;
  775. u32 cp_bit;
  776. int cp_nr_pages;
  777. int cp_nr_rings;
  778. int num_stat_ctxs;
  779. /* grp_info indexed by completion ring index */
  780. struct bnxt_ring_grp_info *grp_info;
  781. struct bnxt_vnic_info *vnic_info;
  782. int nr_vnics;
  783. u8 max_tc;
  784. struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
  785. unsigned int current_interval;
  786. #define BNXT_TIMER_INTERVAL HZ
  787. struct timer_list timer;
  788. unsigned long state;
  789. #define BNXT_STATE_OPEN 0
  790. #define BNXT_STATE_IN_SP_TASK 1
  791. struct bnxt_irq *irq_tbl;
  792. u8 mac_addr[ETH_ALEN];
  793. u32 msg_enable;
  794. u16 hwrm_cmd_seq;
  795. u32 hwrm_intr_seq_id;
  796. void *hwrm_cmd_resp_addr;
  797. dma_addr_t hwrm_cmd_resp_dma_addr;
  798. void *hwrm_dbg_resp_addr;
  799. dma_addr_t hwrm_dbg_resp_dma_addr;
  800. #define HWRM_DBG_REG_BUF_SIZE 128
  801. struct rx_port_stats *hw_rx_port_stats;
  802. struct tx_port_stats *hw_tx_port_stats;
  803. dma_addr_t hw_rx_port_stats_map;
  804. dma_addr_t hw_tx_port_stats_map;
  805. int hw_port_stats_size;
  806. int hwrm_cmd_timeout;
  807. struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
  808. struct hwrm_ver_get_output ver_resp;
  809. #define FW_VER_STR_LEN 32
  810. #define BC_HWRM_STR_LEN 21
  811. #define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
  812. char fw_ver_str[FW_VER_STR_LEN];
  813. __be16 vxlan_port;
  814. u8 vxlan_port_cnt;
  815. __le16 vxlan_fw_dst_port_id;
  816. u8 nge_port_cnt;
  817. __le16 nge_fw_dst_port_id;
  818. u16 rx_coal_ticks;
  819. u16 rx_coal_ticks_irq;
  820. u16 rx_coal_bufs;
  821. u16 rx_coal_bufs_irq;
  822. u16 tx_coal_ticks;
  823. u16 tx_coal_ticks_irq;
  824. u16 tx_coal_bufs;
  825. u16 tx_coal_bufs_irq;
  826. #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
  827. struct work_struct sp_task;
  828. unsigned long sp_event;
  829. #define BNXT_RX_MASK_SP_EVENT 0
  830. #define BNXT_RX_NTP_FLTR_SP_EVENT 1
  831. #define BNXT_LINK_CHNG_SP_EVENT 2
  832. #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3
  833. #define BNXT_VXLAN_ADD_PORT_SP_EVENT 4
  834. #define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
  835. #define BNXT_RESET_TASK_SP_EVENT 6
  836. #define BNXT_RST_RING_SP_EVENT 7
  837. #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
  838. #define BNXT_PERIODIC_STATS_SP_EVENT 9
  839. struct bnxt_pf_info pf;
  840. #ifdef CONFIG_BNXT_SRIOV
  841. int nr_vfs;
  842. struct bnxt_vf_info vf;
  843. wait_queue_head_t sriov_cfg_wait;
  844. bool sriov_cfg;
  845. #define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
  846. #endif
  847. #define BNXT_NTP_FLTR_MAX_FLTR 4096
  848. #define BNXT_NTP_FLTR_HASH_SIZE 512
  849. #define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1)
  850. struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
  851. spinlock_t ntp_fltr_lock; /* for hash table add, del */
  852. unsigned long *ntp_fltr_bmap;
  853. int ntp_fltr_count;
  854. struct bnxt_link_info link_info;
  855. };
  856. #ifdef CONFIG_NET_RX_BUSY_POLL
  857. static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
  858. {
  859. atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
  860. }
  861. /* called from the NAPI poll routine to get ownership of a bnapi */
  862. static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
  863. {
  864. int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
  865. BNXT_STATE_NAPI);
  866. return rc == BNXT_STATE_IDLE;
  867. }
  868. static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
  869. {
  870. atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
  871. }
  872. /* called from the busy poll routine to get ownership of a bnapi */
  873. static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
  874. {
  875. int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
  876. BNXT_STATE_POLL);
  877. return rc == BNXT_STATE_IDLE;
  878. }
  879. static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
  880. {
  881. atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
  882. }
  883. static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
  884. {
  885. return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
  886. }
  887. static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
  888. {
  889. int old;
  890. while (1) {
  891. old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
  892. BNXT_STATE_DISABLE);
  893. if (old == BNXT_STATE_IDLE)
  894. break;
  895. usleep_range(500, 5000);
  896. }
  897. }
  898. #else
  899. static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
  900. {
  901. }
  902. static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
  903. {
  904. return true;
  905. }
  906. static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
  907. {
  908. }
  909. static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
  910. {
  911. return false;
  912. }
  913. static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
  914. {
  915. }
  916. static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
  917. {
  918. return false;
  919. }
  920. static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
  921. {
  922. }
  923. #endif
  924. void bnxt_set_ring_params(struct bnxt *);
  925. void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
  926. int _hwrm_send_message(struct bnxt *, void *, u32, int);
  927. int hwrm_send_message(struct bnxt *, void *, u32, int);
  928. int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
  929. int bnxt_hwrm_set_coal(struct bnxt *);
  930. int bnxt_hwrm_func_qcaps(struct bnxt *);
  931. int bnxt_hwrm_set_pause(struct bnxt *);
  932. int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
  933. int bnxt_open_nic(struct bnxt *, bool, bool);
  934. int bnxt_close_nic(struct bnxt *, bool, bool);
  935. int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
  936. #endif