qed_l2.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_L2_H
  33. #define _QED_L2_H
  34. #include <linux/types.h>
  35. #include <linux/io.h>
  36. #include <linux/kernel.h>
  37. #include <linux/slab.h>
  38. #include <linux/qed/qed_eth_if.h>
  39. #include "qed.h"
  40. #include "qed_hw.h"
  41. #include "qed_sp.h"
  42. struct qed_rss_params {
  43. u8 update_rss_config;
  44. u8 rss_enable;
  45. u8 rss_eng_id;
  46. u8 update_rss_capabilities;
  47. u8 update_rss_ind_table;
  48. u8 update_rss_key;
  49. u8 rss_caps;
  50. u8 rss_table_size_log;
  51. /* Indirection table consist of rx queue handles */
  52. void *rss_ind_table[QED_RSS_IND_TABLE_SIZE];
  53. u32 rss_key[QED_RSS_KEY_SIZE];
  54. };
  55. struct qed_sge_tpa_params {
  56. u8 max_buffers_per_cqe;
  57. u8 update_tpa_en_flg;
  58. u8 tpa_ipv4_en_flg;
  59. u8 tpa_ipv6_en_flg;
  60. u8 tpa_ipv4_tunn_en_flg;
  61. u8 tpa_ipv6_tunn_en_flg;
  62. u8 update_tpa_param_flg;
  63. u8 tpa_pkt_split_flg;
  64. u8 tpa_hdr_data_split_flg;
  65. u8 tpa_gro_consistent_flg;
  66. u8 tpa_max_aggs_num;
  67. u16 tpa_max_size;
  68. u16 tpa_min_size_to_start;
  69. u16 tpa_min_size_to_cont;
  70. };
  71. enum qed_filter_opcode {
  72. QED_FILTER_ADD,
  73. QED_FILTER_REMOVE,
  74. QED_FILTER_MOVE,
  75. QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
  76. QED_FILTER_FLUSH, /* Removes all filters */
  77. };
  78. enum qed_filter_ucast_type {
  79. QED_FILTER_MAC,
  80. QED_FILTER_VLAN,
  81. QED_FILTER_MAC_VLAN,
  82. QED_FILTER_INNER_MAC,
  83. QED_FILTER_INNER_VLAN,
  84. QED_FILTER_INNER_PAIR,
  85. QED_FILTER_INNER_MAC_VNI_PAIR,
  86. QED_FILTER_MAC_VNI_PAIR,
  87. QED_FILTER_VNI,
  88. };
  89. struct qed_filter_ucast {
  90. enum qed_filter_opcode opcode;
  91. enum qed_filter_ucast_type type;
  92. u8 is_rx_filter;
  93. u8 is_tx_filter;
  94. u8 vport_to_add_to;
  95. u8 vport_to_remove_from;
  96. unsigned char mac[ETH_ALEN];
  97. u8 assert_on_error;
  98. u16 vlan;
  99. u32 vni;
  100. };
  101. struct qed_filter_mcast {
  102. /* MOVE is not supported for multicast */
  103. enum qed_filter_opcode opcode;
  104. u8 vport_to_add_to;
  105. u8 vport_to_remove_from;
  106. u8 num_mc_addrs;
  107. #define QED_MAX_MC_ADDRS 64
  108. unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
  109. };
  110. /**
  111. * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
  112. *
  113. * @param p_hwfn
  114. * @param p_rxq Handler of queue to close
  115. * @param eq_completion_only If True completion will be on
  116. * EQe, if False completion will be
  117. * on EQe if p_hwfn opaque
  118. * different from the RXQ opaque
  119. * otherwise on CQe.
  120. * @param cqe_completion If True completion will be
  121. * receive on CQe.
  122. * @return int
  123. */
  124. int
  125. qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
  126. void *p_rxq,
  127. bool eq_completion_only, bool cqe_completion);
  128. /**
  129. * @brief qed_eth_tx_queue_stop - closes a Tx queue
  130. *
  131. * @param p_hwfn
  132. * @param p_txq - handle to Tx queue needed to be closed
  133. *
  134. * @return int
  135. */
  136. int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
  137. enum qed_tpa_mode {
  138. QED_TPA_MODE_NONE,
  139. QED_TPA_MODE_UNUSED,
  140. QED_TPA_MODE_GRO,
  141. QED_TPA_MODE_MAX
  142. };
  143. struct qed_sp_vport_start_params {
  144. enum qed_tpa_mode tpa_mode;
  145. bool remove_inner_vlan;
  146. bool tx_switching;
  147. bool handle_ptp_pkts;
  148. bool only_untagged;
  149. bool drop_ttl0;
  150. u8 max_buffers_per_cqe;
  151. u32 concrete_fid;
  152. u16 opaque_fid;
  153. u8 vport_id;
  154. u16 mtu;
  155. bool check_mac;
  156. bool check_ethtype;
  157. };
  158. int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
  159. struct qed_sp_vport_start_params *p_params);
  160. struct qed_filter_accept_flags {
  161. u8 update_rx_mode_config;
  162. u8 update_tx_mode_config;
  163. u8 rx_accept_filter;
  164. u8 tx_accept_filter;
  165. #define QED_ACCEPT_NONE 0x01
  166. #define QED_ACCEPT_UCAST_MATCHED 0x02
  167. #define QED_ACCEPT_UCAST_UNMATCHED 0x04
  168. #define QED_ACCEPT_MCAST_MATCHED 0x08
  169. #define QED_ACCEPT_MCAST_UNMATCHED 0x10
  170. #define QED_ACCEPT_BCAST 0x20
  171. };
  172. struct qed_arfs_config_params {
  173. bool tcp;
  174. bool udp;
  175. bool ipv4;
  176. bool ipv6;
  177. bool arfs_enable;
  178. };
  179. struct qed_sp_vport_update_params {
  180. u16 opaque_fid;
  181. u8 vport_id;
  182. u8 update_vport_active_rx_flg;
  183. u8 vport_active_rx_flg;
  184. u8 update_vport_active_tx_flg;
  185. u8 vport_active_tx_flg;
  186. u8 update_inner_vlan_removal_flg;
  187. u8 inner_vlan_removal_flg;
  188. u8 silent_vlan_removal_flg;
  189. u8 update_default_vlan_enable_flg;
  190. u8 default_vlan_enable_flg;
  191. u8 update_default_vlan_flg;
  192. u16 default_vlan;
  193. u8 update_tx_switching_flg;
  194. u8 tx_switching_flg;
  195. u8 update_approx_mcast_flg;
  196. u8 update_anti_spoofing_en_flg;
  197. u8 anti_spoofing_en;
  198. u8 update_accept_any_vlan_flg;
  199. u8 accept_any_vlan;
  200. unsigned long bins[8];
  201. struct qed_rss_params *rss_params;
  202. struct qed_filter_accept_flags accept_flags;
  203. struct qed_sge_tpa_params *sge_tpa_params;
  204. };
  205. int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
  206. struct qed_sp_vport_update_params *p_params,
  207. enum spq_mode comp_mode,
  208. struct qed_spq_comp_cb *p_comp_data);
  209. /**
  210. * @brief qed_sp_vport_stop -
  211. *
  212. * This ramrod closes a VPort after all its RX and TX queues are terminated.
  213. * An Assert is generated if any queues are left open.
  214. *
  215. * @param p_hwfn
  216. * @param opaque_fid
  217. * @param vport_id VPort ID
  218. *
  219. * @return int
  220. */
  221. int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
  222. int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
  223. u16 opaque_fid,
  224. struct qed_filter_ucast *p_filter_cmd,
  225. enum spq_mode comp_mode,
  226. struct qed_spq_comp_cb *p_comp_data);
  227. /**
  228. * @brief qed_sp_rx_eth_queues_update -
  229. *
  230. * This ramrod updates an RX queue. It is used for setting the active state
  231. * of the queue and updating the TPA and SGE parameters.
  232. *
  233. * @note At the moment - only used by non-linux VFs.
  234. *
  235. * @param p_hwfn
  236. * @param pp_rxq_handlers An array of queue handlers to be updated.
  237. * @param num_rxqs number of queues to update.
  238. * @param complete_cqe_flg Post completion to the CQE Ring if set
  239. * @param complete_event_flg Post completion to the Event Ring if set
  240. * @param comp_mode
  241. * @param p_comp_data
  242. *
  243. * @return int
  244. */
  245. int
  246. qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
  247. void **pp_rxq_handlers,
  248. u8 num_rxqs,
  249. u8 complete_cqe_flg,
  250. u8 complete_event_flg,
  251. enum spq_mode comp_mode,
  252. struct qed_spq_comp_cb *p_comp_data);
  253. void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
  254. void qed_reset_vport_stats(struct qed_dev *cdev);
  255. #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
  256. #define QED_QUEUE_CID_SELF (0xff)
  257. /* Almost identical to the qed_queue_start_common_params,
  258. * but here we maintain the SB index in IGU CAM.
  259. */
  260. struct qed_queue_cid_params {
  261. u8 vport_id;
  262. u16 queue_id;
  263. u8 stats_id;
  264. };
  265. /* Additional parameters required for initialization of the queue_cid
  266. * and are relevant only for a PF initializing one for its VFs.
  267. */
  268. struct qed_queue_cid_vf_params {
  269. /* Should match the VF's relative index */
  270. u8 vfid;
  271. /* 0-based queue index. Should reflect the relative qzone the
  272. * VF thinks is associated with it [in its range].
  273. */
  274. u8 vf_qid;
  275. /* Indicates a VF is legacy, making it differ in several things:
  276. * - Producers would be placed in a different place.
  277. * - Makes assumptions regarding the CIDs.
  278. */
  279. u8 vf_legacy;
  280. u8 qid_usage_idx;
  281. };
  282. struct qed_queue_cid {
  283. /* For stats-id, the `rel' is actually absolute as well */
  284. struct qed_queue_cid_params rel;
  285. struct qed_queue_cid_params abs;
  286. /* These have no 'relative' meaning */
  287. u16 sb_igu_id;
  288. u8 sb_idx;
  289. u32 cid;
  290. u16 opaque_fid;
  291. bool b_is_rx;
  292. /* VFs queues are mapped differently, so we need to know the
  293. * relative queue associated with them [0-based].
  294. * Notice this is relevant on the *PF* queue-cid of its VF's queues,
  295. * and not on the VF itself.
  296. */
  297. u8 vfid;
  298. u8 vf_qid;
  299. /* We need an additional index to differentiate between queues opened
  300. * for same queue-zone, as VFs would have to communicate the info
  301. * to the PF [otherwise PF has no way to differentiate].
  302. */
  303. u8 qid_usage_idx;
  304. u8 vf_legacy;
  305. #define QED_QCID_LEGACY_VF_RX_PROD (BIT(0))
  306. #define QED_QCID_LEGACY_VF_CID (BIT(1))
  307. struct qed_hwfn *p_owner;
  308. };
  309. int qed_l2_alloc(struct qed_hwfn *p_hwfn);
  310. void qed_l2_setup(struct qed_hwfn *p_hwfn);
  311. void qed_l2_free(struct qed_hwfn *p_hwfn);
  312. void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
  313. struct qed_queue_cid *p_cid);
  314. struct qed_queue_cid *
  315. qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
  316. u16 opaque_fid,
  317. struct qed_queue_start_common_params *p_params,
  318. bool b_is_rx,
  319. struct qed_queue_cid_vf_params *p_vf_params);
  320. int
  321. qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
  322. struct qed_sp_vport_start_params *p_params);
  323. /**
  324. * @brief - Starts an Rx queue, when queue_cid is already prepared
  325. *
  326. * @param p_hwfn
  327. * @param p_cid
  328. * @param bd_max_bytes
  329. * @param bd_chain_phys_addr
  330. * @param cqe_pbl_addr
  331. * @param cqe_pbl_size
  332. *
  333. * @return int
  334. */
  335. int
  336. qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
  337. struct qed_queue_cid *p_cid,
  338. u16 bd_max_bytes,
  339. dma_addr_t bd_chain_phys_addr,
  340. dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
  341. /**
  342. * @brief - Starts a Tx queue, where queue_cid is already prepared
  343. *
  344. * @param p_hwfn
  345. * @param p_cid
  346. * @param pbl_addr
  347. * @param pbl_size
  348. * @param p_pq_params - parameters for choosing the PQ for this Tx queue
  349. *
  350. * @return int
  351. */
  352. int
  353. qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
  354. struct qed_queue_cid *p_cid,
  355. dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
  356. u8 qed_mcast_bin_from_mac(u8 *mac);
  357. int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
  358. struct qed_ptt *p_ptt,
  359. u16 coalesce, struct qed_queue_cid *p_cid);
  360. int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
  361. struct qed_ptt *p_ptt,
  362. u16 coalesce, struct qed_queue_cid *p_cid);
  363. int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
  364. struct qed_ptt *p_ptt,
  365. struct qed_queue_cid *p_cid, u16 *p_hw_coal);
  366. int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
  367. struct qed_ptt *p_ptt,
  368. struct qed_queue_cid *p_cid, u16 *p_hw_coal);
  369. #endif