qed_l2.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #ifndef _QED_L2_H
  9. #define _QED_L2_H
  10. #include <linux/types.h>
  11. #include <linux/io.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/qed/qed_eth_if.h>
  15. #include "qed.h"
  16. #include "qed_hw.h"
  17. #include "qed_sp.h"
  18. struct qed_sge_tpa_params {
  19. u8 max_buffers_per_cqe;
  20. u8 update_tpa_en_flg;
  21. u8 tpa_ipv4_en_flg;
  22. u8 tpa_ipv6_en_flg;
  23. u8 tpa_ipv4_tunn_en_flg;
  24. u8 tpa_ipv6_tunn_en_flg;
  25. u8 update_tpa_param_flg;
  26. u8 tpa_pkt_split_flg;
  27. u8 tpa_hdr_data_split_flg;
  28. u8 tpa_gro_consistent_flg;
  29. u8 tpa_max_aggs_num;
  30. u16 tpa_max_size;
  31. u16 tpa_min_size_to_start;
  32. u16 tpa_min_size_to_cont;
  33. };
  34. enum qed_filter_opcode {
  35. QED_FILTER_ADD,
  36. QED_FILTER_REMOVE,
  37. QED_FILTER_MOVE,
  38. QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
  39. QED_FILTER_FLUSH, /* Removes all filters */
  40. };
  41. enum qed_filter_ucast_type {
  42. QED_FILTER_MAC,
  43. QED_FILTER_VLAN,
  44. QED_FILTER_MAC_VLAN,
  45. QED_FILTER_INNER_MAC,
  46. QED_FILTER_INNER_VLAN,
  47. QED_FILTER_INNER_PAIR,
  48. QED_FILTER_INNER_MAC_VNI_PAIR,
  49. QED_FILTER_MAC_VNI_PAIR,
  50. QED_FILTER_VNI,
  51. };
  52. struct qed_filter_ucast {
  53. enum qed_filter_opcode opcode;
  54. enum qed_filter_ucast_type type;
  55. u8 is_rx_filter;
  56. u8 is_tx_filter;
  57. u8 vport_to_add_to;
  58. u8 vport_to_remove_from;
  59. unsigned char mac[ETH_ALEN];
  60. u8 assert_on_error;
  61. u16 vlan;
  62. u32 vni;
  63. };
  64. struct qed_filter_mcast {
  65. /* MOVE is not supported for multicast */
  66. enum qed_filter_opcode opcode;
  67. u8 vport_to_add_to;
  68. u8 vport_to_remove_from;
  69. u8 num_mc_addrs;
  70. #define QED_MAX_MC_ADDRS 64
  71. unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
  72. };
  73. /**
  74. * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
  75. *
  76. * @param p_hwfn
  77. * @param p_rxq Handler of queue to close
  78. * @param eq_completion_only If True completion will be on
  79. * EQe, if False completion will be
  80. * on EQe if p_hwfn opaque
  81. * different from the RXQ opaque
  82. * otherwise on CQe.
  83. * @param cqe_completion If True completion will be
  84. * receive on CQe.
  85. * @return int
  86. */
  87. int
  88. qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
  89. void *p_rxq,
  90. bool eq_completion_only, bool cqe_completion);
  91. /**
  92. * @brief qed_eth_tx_queue_stop - closes a Tx queue
  93. *
  94. * @param p_hwfn
  95. * @param p_txq - handle to Tx queue needed to be closed
  96. *
  97. * @return int
  98. */
  99. int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
  100. enum qed_tpa_mode {
  101. QED_TPA_MODE_NONE,
  102. QED_TPA_MODE_UNUSED,
  103. QED_TPA_MODE_GRO,
  104. QED_TPA_MODE_MAX
  105. };
  106. struct qed_sp_vport_start_params {
  107. enum qed_tpa_mode tpa_mode;
  108. bool remove_inner_vlan;
  109. bool tx_switching;
  110. bool only_untagged;
  111. bool drop_ttl0;
  112. u8 max_buffers_per_cqe;
  113. u32 concrete_fid;
  114. u16 opaque_fid;
  115. u8 vport_id;
  116. u16 mtu;
  117. bool check_mac;
  118. bool check_ethtype;
  119. };
  120. int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
  121. struct qed_sp_vport_start_params *p_params);
  122. struct qed_rss_params {
  123. u8 update_rss_config;
  124. u8 rss_enable;
  125. u8 rss_eng_id;
  126. u8 update_rss_capabilities;
  127. u8 update_rss_ind_table;
  128. u8 update_rss_key;
  129. u8 rss_caps;
  130. u8 rss_table_size_log;
  131. u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
  132. u32 rss_key[QED_RSS_KEY_SIZE];
  133. };
  134. struct qed_filter_accept_flags {
  135. u8 update_rx_mode_config;
  136. u8 update_tx_mode_config;
  137. u8 rx_accept_filter;
  138. u8 tx_accept_filter;
  139. #define QED_ACCEPT_NONE 0x01
  140. #define QED_ACCEPT_UCAST_MATCHED 0x02
  141. #define QED_ACCEPT_UCAST_UNMATCHED 0x04
  142. #define QED_ACCEPT_MCAST_MATCHED 0x08
  143. #define QED_ACCEPT_MCAST_UNMATCHED 0x10
  144. #define QED_ACCEPT_BCAST 0x20
  145. };
  146. struct qed_sp_vport_update_params {
  147. u16 opaque_fid;
  148. u8 vport_id;
  149. u8 update_vport_active_rx_flg;
  150. u8 vport_active_rx_flg;
  151. u8 update_vport_active_tx_flg;
  152. u8 vport_active_tx_flg;
  153. u8 update_inner_vlan_removal_flg;
  154. u8 inner_vlan_removal_flg;
  155. u8 silent_vlan_removal_flg;
  156. u8 update_default_vlan_enable_flg;
  157. u8 default_vlan_enable_flg;
  158. u8 update_default_vlan_flg;
  159. u16 default_vlan;
  160. u8 update_tx_switching_flg;
  161. u8 tx_switching_flg;
  162. u8 update_approx_mcast_flg;
  163. u8 update_anti_spoofing_en_flg;
  164. u8 anti_spoofing_en;
  165. u8 update_accept_any_vlan_flg;
  166. u8 accept_any_vlan;
  167. unsigned long bins[8];
  168. struct qed_rss_params *rss_params;
  169. struct qed_filter_accept_flags accept_flags;
  170. struct qed_sge_tpa_params *sge_tpa_params;
  171. };
  172. int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
  173. struct qed_sp_vport_update_params *p_params,
  174. enum spq_mode comp_mode,
  175. struct qed_spq_comp_cb *p_comp_data);
  176. /**
  177. * @brief qed_sp_vport_stop -
  178. *
  179. * This ramrod closes a VPort after all its RX and TX queues are terminated.
  180. * An Assert is generated if any queues are left open.
  181. *
  182. * @param p_hwfn
  183. * @param opaque_fid
  184. * @param vport_id VPort ID
  185. *
  186. * @return int
  187. */
  188. int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
  189. int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
  190. u16 opaque_fid,
  191. struct qed_filter_ucast *p_filter_cmd,
  192. enum spq_mode comp_mode,
  193. struct qed_spq_comp_cb *p_comp_data);
  194. /**
  195. * @brief qed_sp_rx_eth_queues_update -
  196. *
  197. * This ramrod updates an RX queue. It is used for setting the active state
  198. * of the queue and updating the TPA and SGE parameters.
  199. *
  200. * @note At the moment - only used by non-linux VFs.
  201. *
  202. * @param p_hwfn
  203. * @param pp_rxq_handlers An array of queue handlers to be updated.
  204. * @param num_rxqs number of queues to update.
  205. * @param complete_cqe_flg Post completion to the CQE Ring if set
  206. * @param complete_event_flg Post completion to the Event Ring if set
  207. * @param comp_mode
  208. * @param p_comp_data
  209. *
  210. * @return int
  211. */
  212. int
  213. qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
  214. void **pp_rxq_handlers,
  215. u8 num_rxqs,
  216. u8 complete_cqe_flg,
  217. u8 complete_event_flg,
  218. enum spq_mode comp_mode,
  219. struct qed_spq_comp_cb *p_comp_data);
  220. void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
  221. void qed_reset_vport_stats(struct qed_dev *cdev);
  222. struct qed_queue_cid {
  223. /* 'Relative' is a relative term ;-). Usually the indices [not counting
  224. * SBs] would be PF-relative, but there are some cases where that isn't
  225. * the case - specifically for a PF configuring its VF indices it's
  226. * possible some fields [E.g., stats-id] in 'rel' would already be abs.
  227. */
  228. struct qed_queue_start_common_params rel;
  229. struct qed_queue_start_common_params abs;
  230. u32 cid;
  231. u16 opaque_fid;
  232. /* VFs queues are mapped differently, so we need to know the
  233. * relative queue associated with them [0-based].
  234. * Notice this is relevant on the *PF* queue-cid of its VF's queues,
  235. * and not on the VF itself.
  236. */
  237. bool is_vf;
  238. u8 vf_qid;
  239. /* Legacy VFs might have Rx producer located elsewhere */
  240. bool b_legacy_vf;
  241. };
  242. void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
  243. struct qed_queue_cid *p_cid);
  244. struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
  245. u16 opaque_fid,
  246. u32 cid,
  247. u8 vf_qid,
  248. struct qed_queue_start_common_params
  249. *p_params);
  250. int
  251. qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
  252. struct qed_sp_vport_start_params *p_params);
  253. /**
  254. * @brief - Starts an Rx queue, when queue_cid is already prepared
  255. *
  256. * @param p_hwfn
  257. * @param p_cid
  258. * @param bd_max_bytes
  259. * @param bd_chain_phys_addr
  260. * @param cqe_pbl_addr
  261. * @param cqe_pbl_size
  262. *
  263. * @return int
  264. */
  265. int
  266. qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
  267. struct qed_queue_cid *p_cid,
  268. u16 bd_max_bytes,
  269. dma_addr_t bd_chain_phys_addr,
  270. dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
  271. /**
  272. * @brief - Starts a Tx queue, where queue_cid is already prepared
  273. *
  274. * @param p_hwfn
  275. * @param p_cid
  276. * @param pbl_addr
  277. * @param pbl_size
  278. * @param p_pq_params - parameters for choosing the PQ for this Tx queue
  279. *
  280. * @return int
  281. */
  282. int
  283. qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
  284. struct qed_queue_cid *p_cid,
  285. dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
  286. u8 qed_mcast_bin_from_mac(u8 *mac);
  287. #endif /* _QED_L2_H */