qed_vf.h 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #ifndef _QED_VF_H
  9. #define _QED_VF_H
  10. #include "qed_l2.h"
  11. #include "qed_mcp.h"
  12. #define T_ETH_INDIRECTION_TABLE_SIZE 128
  13. #define T_ETH_RSS_KEY_SIZE 10
  14. struct vf_pf_resc_request {
  15. u8 num_rxqs;
  16. u8 num_txqs;
  17. u8 num_sbs;
  18. u8 num_mac_filters;
  19. u8 num_vlan_filters;
  20. u8 num_mc_filters;
  21. u16 padding;
  22. };
  23. struct hw_sb_info {
  24. u16 hw_sb_id;
  25. u8 sb_qid;
  26. u8 padding[5];
  27. };
  28. #define TLV_BUFFER_SIZE 1024
  29. enum {
  30. PFVF_STATUS_WAITING,
  31. PFVF_STATUS_SUCCESS,
  32. PFVF_STATUS_FAILURE,
  33. PFVF_STATUS_NOT_SUPPORTED,
  34. PFVF_STATUS_NO_RESOURCE,
  35. PFVF_STATUS_FORCED,
  36. PFVF_STATUS_MALICIOUS,
  37. };
  38. /* vf pf channel tlvs */
  39. /* general tlv header (used for both vf->pf request and pf->vf response) */
  40. struct channel_tlv {
  41. u16 type;
  42. u16 length;
  43. };
  44. /* header of first vf->pf tlv carries the offset used to calculate reponse
  45. * buffer address
  46. */
  47. struct vfpf_first_tlv {
  48. struct channel_tlv tl;
  49. u32 padding;
  50. u64 reply_address;
  51. };
  52. /* header of pf->vf tlvs, carries the status of handling the request */
  53. struct pfvf_tlv {
  54. struct channel_tlv tl;
  55. u8 status;
  56. u8 padding[3];
  57. };
  58. /* response tlv used for most tlvs */
  59. struct pfvf_def_resp_tlv {
  60. struct pfvf_tlv hdr;
  61. };
  62. /* used to terminate and pad a tlv list */
  63. struct channel_list_end_tlv {
  64. struct channel_tlv tl;
  65. u8 padding[4];
  66. };
  67. #define VFPF_ACQUIRE_OS_LINUX (0)
  68. #define VFPF_ACQUIRE_OS_WINDOWS (1)
  69. #define VFPF_ACQUIRE_OS_ESX (2)
  70. #define VFPF_ACQUIRE_OS_SOLARIS (3)
  71. #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
  72. struct vfpf_acquire_tlv {
  73. struct vfpf_first_tlv first_tlv;
  74. struct vf_pf_vfdev_info {
  75. #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
  76. #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
  77. u64 capabilities;
  78. u8 fw_major;
  79. u8 fw_minor;
  80. u8 fw_revision;
  81. u8 fw_engineering;
  82. u32 driver_version;
  83. u16 opaque_fid; /* ME register value */
  84. u8 os_type; /* VFPF_ACQUIRE_OS_* value */
  85. u8 eth_fp_hsi_major;
  86. u8 eth_fp_hsi_minor;
  87. u8 padding[3];
  88. } vfdev_info;
  89. struct vf_pf_resc_request resc_request;
  90. u64 bulletin_addr;
  91. u32 bulletin_size;
  92. u32 padding;
  93. };
  94. /* receive side scaling tlv */
  95. struct vfpf_vport_update_rss_tlv {
  96. struct channel_tlv tl;
  97. u8 update_rss_flags;
  98. #define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
  99. #define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
  100. #define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
  101. #define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
  102. u8 rss_enable;
  103. u8 rss_caps;
  104. u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
  105. u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
  106. u32 rss_key[T_ETH_RSS_KEY_SIZE];
  107. };
  108. struct pfvf_storm_stats {
  109. u32 address;
  110. u32 len;
  111. };
  112. struct pfvf_stats_info {
  113. struct pfvf_storm_stats mstats;
  114. struct pfvf_storm_stats pstats;
  115. struct pfvf_storm_stats tstats;
  116. struct pfvf_storm_stats ustats;
  117. };
  118. struct pfvf_acquire_resp_tlv {
  119. struct pfvf_tlv hdr;
  120. struct pf_vf_pfdev_info {
  121. u32 chip_num;
  122. u32 mfw_ver;
  123. u16 fw_major;
  124. u16 fw_minor;
  125. u16 fw_rev;
  126. u16 fw_eng;
  127. u64 capabilities;
  128. #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
  129. #define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
  130. /* There are old PF versions where the PF might mistakenly override the sanity
  131. * mechanism [version-based] and allow a VF that can't be supported to pass
  132. * the acquisition phase.
  133. * To overcome this, PFs now indicate that they're past that point and the new
  134. * VFs would fail probe on the older PFs that fail to do so.
  135. */
  136. #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
  137. u16 db_size;
  138. u8 indices_per_sb;
  139. u8 os_type;
  140. /* These should match the PF's qed_dev values */
  141. u16 chip_rev;
  142. u8 dev_type;
  143. u8 padding;
  144. struct pfvf_stats_info stats_info;
  145. u8 port_mac[ETH_ALEN];
  146. /* It's possible PF had to configure an older fastpath HSI
  147. * [in case VF is newer than PF]. This is communicated back
  148. * to the VF. It can also be used in case of error due to
  149. * non-matching versions to shed light in VF about failure.
  150. */
  151. u8 major_fp_hsi;
  152. u8 minor_fp_hsi;
  153. } pfdev_info;
  154. struct pf_vf_resc {
  155. #define PFVF_MAX_QUEUES_PER_VF 16
  156. #define PFVF_MAX_SBS_PER_VF 16
  157. struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
  158. u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
  159. u8 cid[PFVF_MAX_QUEUES_PER_VF];
  160. u8 num_rxqs;
  161. u8 num_txqs;
  162. u8 num_sbs;
  163. u8 num_mac_filters;
  164. u8 num_vlan_filters;
  165. u8 num_mc_filters;
  166. u8 padding[2];
  167. } resc;
  168. u32 bulletin_size;
  169. u32 padding;
  170. };
  171. struct pfvf_start_queue_resp_tlv {
  172. struct pfvf_tlv hdr;
  173. u32 offset; /* offset to consumer/producer of queue */
  174. u8 padding[4];
  175. };
  176. /* Setup Queue */
  177. struct vfpf_start_rxq_tlv {
  178. struct vfpf_first_tlv first_tlv;
  179. /* physical addresses */
  180. u64 rxq_addr;
  181. u64 deprecated_sge_addr;
  182. u64 cqe_pbl_addr;
  183. u16 cqe_pbl_size;
  184. u16 hw_sb;
  185. u16 rx_qid;
  186. u16 hc_rate; /* desired interrupts per sec. */
  187. u16 bd_max_bytes;
  188. u16 stat_id;
  189. u8 sb_index;
  190. u8 padding[3];
  191. };
  192. struct vfpf_start_txq_tlv {
  193. struct vfpf_first_tlv first_tlv;
  194. /* physical addresses */
  195. u64 pbl_addr;
  196. u16 pbl_size;
  197. u16 stat_id;
  198. u16 tx_qid;
  199. u16 hw_sb;
  200. u32 flags; /* VFPF_QUEUE_FLG_X flags */
  201. u16 hc_rate; /* desired interrupts per sec. */
  202. u8 sb_index;
  203. u8 padding[3];
  204. };
  205. /* Stop RX Queue */
  206. struct vfpf_stop_rxqs_tlv {
  207. struct vfpf_first_tlv first_tlv;
  208. u16 rx_qid;
  209. u8 num_rxqs;
  210. u8 cqe_completion;
  211. u8 padding[4];
  212. };
  213. /* Stop TX Queues */
  214. struct vfpf_stop_txqs_tlv {
  215. struct vfpf_first_tlv first_tlv;
  216. u16 tx_qid;
  217. u8 num_txqs;
  218. u8 padding[5];
  219. };
  220. struct vfpf_update_rxq_tlv {
  221. struct vfpf_first_tlv first_tlv;
  222. u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
  223. u16 rx_qid;
  224. u8 num_rxqs;
  225. u8 flags;
  226. #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
  227. #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
  228. #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
  229. u8 padding[4];
  230. };
  231. /* Set Queue Filters */
  232. struct vfpf_q_mac_vlan_filter {
  233. u32 flags;
  234. #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
  235. #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
  236. #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
  237. u8 mac[ETH_ALEN];
  238. u16 vlan_tag;
  239. u8 padding[4];
  240. };
  241. /* Start a vport */
  242. struct vfpf_vport_start_tlv {
  243. struct vfpf_first_tlv first_tlv;
  244. u64 sb_addr[PFVF_MAX_SBS_PER_VF];
  245. u32 tpa_mode;
  246. u16 dep1;
  247. u16 mtu;
  248. u8 vport_id;
  249. u8 inner_vlan_removal;
  250. u8 only_untagged;
  251. u8 max_buffers_per_cqe;
  252. u8 padding[4];
  253. };
  254. /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
  255. struct vfpf_vport_update_activate_tlv {
  256. struct channel_tlv tl;
  257. u8 update_rx;
  258. u8 update_tx;
  259. u8 active_rx;
  260. u8 active_tx;
  261. };
  262. struct vfpf_vport_update_tx_switch_tlv {
  263. struct channel_tlv tl;
  264. u8 tx_switching;
  265. u8 padding[3];
  266. };
  267. struct vfpf_vport_update_vlan_strip_tlv {
  268. struct channel_tlv tl;
  269. u8 remove_vlan;
  270. u8 padding[3];
  271. };
  272. struct vfpf_vport_update_mcast_bin_tlv {
  273. struct channel_tlv tl;
  274. u8 padding[4];
  275. u64 bins[8];
  276. };
  277. struct vfpf_vport_update_accept_param_tlv {
  278. struct channel_tlv tl;
  279. u8 update_rx_mode;
  280. u8 update_tx_mode;
  281. u8 rx_accept_filter;
  282. u8 tx_accept_filter;
  283. };
  284. struct vfpf_vport_update_accept_any_vlan_tlv {
  285. struct channel_tlv tl;
  286. u8 update_accept_any_vlan_flg;
  287. u8 accept_any_vlan;
  288. u8 padding[2];
  289. };
  290. struct vfpf_vport_update_sge_tpa_tlv {
  291. struct channel_tlv tl;
  292. u16 sge_tpa_flags;
  293. #define VFPF_TPA_IPV4_EN_FLAG BIT(0)
  294. #define VFPF_TPA_IPV6_EN_FLAG BIT(1)
  295. #define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
  296. #define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
  297. #define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
  298. u8 update_sge_tpa_flags;
  299. #define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
  300. #define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
  301. #define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
  302. u8 max_buffers_per_cqe;
  303. u16 deprecated_sge_buff_size;
  304. u16 tpa_max_size;
  305. u16 tpa_min_size_to_start;
  306. u16 tpa_min_size_to_cont;
  307. u8 tpa_max_aggs_num;
  308. u8 padding[7];
  309. };
  310. /* Primary tlv as a header for various extended tlvs for
  311. * various functionalities in vport update ramrod.
  312. */
  313. struct vfpf_vport_update_tlv {
  314. struct vfpf_first_tlv first_tlv;
  315. };
  316. struct vfpf_ucast_filter_tlv {
  317. struct vfpf_first_tlv first_tlv;
  318. u8 opcode;
  319. u8 type;
  320. u8 mac[ETH_ALEN];
  321. u16 vlan;
  322. u16 padding[3];
  323. };
  324. struct tlv_buffer_size {
  325. u8 tlv_buffer[TLV_BUFFER_SIZE];
  326. };
  327. union vfpf_tlvs {
  328. struct vfpf_first_tlv first_tlv;
  329. struct vfpf_acquire_tlv acquire;
  330. struct vfpf_start_rxq_tlv start_rxq;
  331. struct vfpf_start_txq_tlv start_txq;
  332. struct vfpf_stop_rxqs_tlv stop_rxqs;
  333. struct vfpf_stop_txqs_tlv stop_txqs;
  334. struct vfpf_update_rxq_tlv update_rxq;
  335. struct vfpf_vport_start_tlv start_vport;
  336. struct vfpf_vport_update_tlv vport_update;
  337. struct vfpf_ucast_filter_tlv ucast_filter;
  338. struct channel_list_end_tlv list_end;
  339. struct tlv_buffer_size tlv_buf_size;
  340. };
  341. union pfvf_tlvs {
  342. struct pfvf_def_resp_tlv default_resp;
  343. struct pfvf_acquire_resp_tlv acquire_resp;
  344. struct tlv_buffer_size tlv_buf_size;
  345. struct pfvf_start_queue_resp_tlv queue_start;
  346. };
  347. enum qed_bulletin_bit {
  348. /* Alert the VF that a forced MAC was set by the PF */
  349. MAC_ADDR_FORCED = 0,
  350. /* Alert the VF that a forced VLAN was set by the PF */
  351. VLAN_ADDR_FORCED = 2,
  352. /* Indicate that `default_only_untagged' contains actual data */
  353. VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
  354. VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
  355. /* Alert the VF that suggested mac was sent by the PF.
  356. * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
  357. */
  358. VFPF_BULLETIN_MAC_ADDR = 5
  359. };
  360. struct qed_bulletin_content {
  361. /* crc of structure to ensure is not in mid-update */
  362. u32 crc;
  363. u32 version;
  364. /* bitmap indicating which fields hold valid values */
  365. u64 valid_bitmap;
  366. /* used for MAC_ADDR or MAC_ADDR_FORCED */
  367. u8 mac[ETH_ALEN];
  368. /* If valid, 1 => only untagged Rx if no vlan is configured */
  369. u8 default_only_untagged;
  370. u8 padding;
  371. /* The following is a 'copy' of qed_mcp_link_state,
  372. * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
  373. * possible the structs will increase further along the road we cannot
  374. * have it here; Instead we need to have all of its fields.
  375. */
  376. u8 req_autoneg;
  377. u8 req_autoneg_pause;
  378. u8 req_forced_rx;
  379. u8 req_forced_tx;
  380. u8 padding2[4];
  381. u32 req_adv_speed;
  382. u32 req_forced_speed;
  383. u32 req_loopback;
  384. u32 padding3;
  385. u8 link_up;
  386. u8 full_duplex;
  387. u8 autoneg;
  388. u8 autoneg_complete;
  389. u8 parallel_detection;
  390. u8 pfc_enabled;
  391. u8 partner_tx_flow_ctrl_en;
  392. u8 partner_rx_flow_ctrl_en;
  393. u8 partner_adv_pause;
  394. u8 sfp_tx_fault;
  395. u8 padding4[6];
  396. u32 speed;
  397. u32 partner_adv_speed;
  398. u32 capability_speed;
  399. /* Forced vlan */
  400. u16 pvid;
  401. u16 padding5;
  402. };
  403. struct qed_bulletin {
  404. dma_addr_t phys;
  405. struct qed_bulletin_content *p_virt;
  406. u32 size;
  407. };
  408. enum {
  409. CHANNEL_TLV_NONE, /* ends tlv sequence */
  410. CHANNEL_TLV_ACQUIRE,
  411. CHANNEL_TLV_VPORT_START,
  412. CHANNEL_TLV_VPORT_UPDATE,
  413. CHANNEL_TLV_VPORT_TEARDOWN,
  414. CHANNEL_TLV_START_RXQ,
  415. CHANNEL_TLV_START_TXQ,
  416. CHANNEL_TLV_STOP_RXQS,
  417. CHANNEL_TLV_STOP_TXQS,
  418. CHANNEL_TLV_UPDATE_RXQ,
  419. CHANNEL_TLV_INT_CLEANUP,
  420. CHANNEL_TLV_CLOSE,
  421. CHANNEL_TLV_RELEASE,
  422. CHANNEL_TLV_LIST_END,
  423. CHANNEL_TLV_UCAST_FILTER,
  424. CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
  425. CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
  426. CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
  427. CHANNEL_TLV_VPORT_UPDATE_MCAST,
  428. CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
  429. CHANNEL_TLV_VPORT_UPDATE_RSS,
  430. CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
  431. CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
  432. CHANNEL_TLV_MAX,
  433. /* Required for iterating over vport-update tlvs.
  434. * Will break in case non-sequential vport-update tlvs.
  435. */
  436. CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
  437. };
  438. /* This data is held in the qed_hwfn structure for VFs only. */
  439. struct qed_vf_iov {
  440. union vfpf_tlvs *vf2pf_request;
  441. dma_addr_t vf2pf_request_phys;
  442. union pfvf_tlvs *pf2vf_reply;
  443. dma_addr_t pf2vf_reply_phys;
  444. /* Should be taken whenever the mailbox buffers are accessed */
  445. struct mutex mutex;
  446. u8 *offset;
  447. /* Bulletin Board */
  448. struct qed_bulletin bulletin;
  449. struct qed_bulletin_content bulletin_shadow;
  450. /* we set aside a copy of the acquire response */
  451. struct pfvf_acquire_resp_tlv acquire_resp;
  452. /* In case PF originates prior to the fp-hsi version comparison,
  453. * this has to be propagated as it affects the fastpath.
  454. */
  455. bool b_pre_fp_hsi;
  456. };
  457. #ifdef CONFIG_QED_SRIOV
  458. /**
  459. * @brief Read the VF bulletin and act on it if needed
  460. *
  461. * @param p_hwfn
  462. * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
  463. *
  464. * @return enum _qed_status
  465. */
  466. int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
  467. /**
  468. * @brief Get link paramters for VF from qed
  469. *
  470. * @param p_hwfn
  471. * @param params - the link params structure to be filled for the VF
  472. */
  473. void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  474. struct qed_mcp_link_params *params);
  475. /**
  476. * @brief Get link state for VF from qed
  477. *
  478. * @param p_hwfn
  479. * @param link - the link state structure to be filled for the VF
  480. */
  481. void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  482. struct qed_mcp_link_state *link);
  483. /**
  484. * @brief Get link capabilities for VF from qed
  485. *
  486. * @param p_hwfn
  487. * @param p_link_caps - the link capabilities structure to be filled for the VF
  488. */
  489. void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  490. struct qed_mcp_link_capabilities *p_link_caps);
  491. /**
  492. * @brief Get number of Rx queues allocated for VF by qed
  493. *
  494. * @param p_hwfn
  495. * @param num_rxqs - allocated RX queues
  496. */
  497. void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
  498. /**
  499. * @brief Get port mac address for VF
  500. *
  501. * @param p_hwfn
  502. * @param port_mac - destination location for port mac
  503. */
  504. void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
  505. /**
  506. * @brief Get number of VLAN filters allocated for VF by qed
  507. *
  508. * @param p_hwfn
  509. * @param num_rxqs - allocated VLAN filters
  510. */
  511. void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
  512. u8 *num_vlan_filters);
  513. /**
  514. * @brief Get number of MAC filters allocated for VF by qed
  515. *
  516. * @param p_hwfn
  517. * @param num_rxqs - allocated MAC filters
  518. */
  519. void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
  520. /**
  521. * @brief Check if VF can set a MAC address
  522. *
  523. * @param p_hwfn
  524. * @param mac
  525. *
  526. * @return bool
  527. */
  528. bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
  529. /**
  530. * @brief Set firmware version information in dev_info from VFs acquire response tlv
  531. *
  532. * @param p_hwfn
  533. * @param fw_major
  534. * @param fw_minor
  535. * @param fw_rev
  536. * @param fw_eng
  537. */
  538. void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
  539. u16 *fw_major, u16 *fw_minor,
  540. u16 *fw_rev, u16 *fw_eng);
  541. /**
  542. * @brief hw preparation for VF
  543. * sends ACQUIRE message
  544. *
  545. * @param p_hwfn
  546. *
  547. * @return int
  548. */
  549. int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
  550. /**
  551. * @brief VF - start the RX Queue by sending a message to the PF
  552. * @param p_hwfn
  553. * @param p_cid - Only relative fields are relevant
  554. * @param bd_max_bytes - maximum number of bytes per bd
  555. * @param bd_chain_phys_addr - physical address of bd chain
  556. * @param cqe_pbl_addr - physical address of pbl
  557. * @param cqe_pbl_size - pbl size
  558. * @param pp_prod - pointer to the producer to be
  559. * used in fastpath
  560. *
  561. * @return int
  562. */
  563. int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
  564. struct qed_queue_cid *p_cid,
  565. u16 bd_max_bytes,
  566. dma_addr_t bd_chain_phys_addr,
  567. dma_addr_t cqe_pbl_addr,
  568. u16 cqe_pbl_size, void __iomem **pp_prod);
  569. /**
  570. * @brief VF - start the TX queue by sending a message to the
  571. * PF.
  572. *
  573. * @param p_hwfn
  574. * @param tx_queue_id - zero based within the VF
  575. * @param sb - status block for this queue
  576. * @param sb_index - index within the status block
  577. * @param bd_chain_phys_addr - physical address of tx chain
  578. * @param pp_doorbell - pointer to address to which to
  579. * write the doorbell too..
  580. *
  581. * @return int
  582. */
  583. int
  584. qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
  585. struct qed_queue_cid *p_cid,
  586. dma_addr_t pbl_addr,
  587. u16 pbl_size, void __iomem **pp_doorbell);
  588. /**
  589. * @brief VF - stop the RX queue by sending a message to the PF
  590. *
  591. * @param p_hwfn
  592. * @param p_cid
  593. * @param cqe_completion
  594. *
  595. * @return int
  596. */
  597. int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
  598. struct qed_queue_cid *p_cid, bool cqe_completion);
  599. /**
  600. * @brief VF - stop the TX queue by sending a message to the PF
  601. *
  602. * @param p_hwfn
  603. * @param tx_qid
  604. *
  605. * @return int
  606. */
  607. int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
  608. /**
  609. * @brief VF - send a vport update command
  610. *
  611. * @param p_hwfn
  612. * @param params
  613. *
  614. * @return int
  615. */
  616. int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
  617. struct qed_sp_vport_update_params *p_params);
  618. /**
  619. *
  620. * @brief VF - send a close message to PF
  621. *
  622. * @param p_hwfn
  623. *
  624. * @return enum _qed_status
  625. */
  626. int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
  627. /**
  628. * @brief VF - free vf`s memories
  629. *
  630. * @param p_hwfn
  631. *
  632. * @return enum _qed_status
  633. */
  634. int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
  635. /**
  636. * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
  637. * sb_id. For VFs igu sbs don't have to be contiguous
  638. *
  639. * @param p_hwfn
  640. * @param sb_id
  641. *
  642. * @return INLINE u16
  643. */
  644. u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
  645. /**
  646. * @brief qed_vf_pf_vport_start - perform vport start for VF.
  647. *
  648. * @param p_hwfn
  649. * @param vport_id
  650. * @param mtu
  651. * @param inner_vlan_removal
  652. * @param tpa_mode
  653. * @param max_buffers_per_cqe,
  654. * @param only_untagged - default behavior regarding vlan acceptance
  655. *
  656. * @return enum _qed_status
  657. */
  658. int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
  659. u8 vport_id,
  660. u16 mtu,
  661. u8 inner_vlan_removal,
  662. enum qed_tpa_mode tpa_mode,
  663. u8 max_buffers_per_cqe, u8 only_untagged);
  664. /**
  665. * @brief qed_vf_pf_vport_stop - stop the VF's vport
  666. *
  667. * @param p_hwfn
  668. *
  669. * @return enum _qed_status
  670. */
  671. int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
  672. int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
  673. struct qed_filter_ucast *p_param);
  674. void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
  675. struct qed_filter_mcast *p_filter_cmd);
  676. /**
  677. * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
  678. *
  679. * @param p_hwfn
  680. *
  681. * @return enum _qed_status
  682. */
  683. int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
  684. /**
  685. * @brief - return the link params in a given bulletin board
  686. *
  687. * @param p_hwfn
  688. * @param p_params - pointer to a struct to fill with link params
  689. * @param p_bulletin
  690. */
  691. void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  692. struct qed_mcp_link_params *p_params,
  693. struct qed_bulletin_content *p_bulletin);
  694. /**
  695. * @brief - return the link state in a given bulletin board
  696. *
  697. * @param p_hwfn
  698. * @param p_link - pointer to a struct to fill with link state
  699. * @param p_bulletin
  700. */
  701. void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  702. struct qed_mcp_link_state *p_link,
  703. struct qed_bulletin_content *p_bulletin);
  704. /**
  705. * @brief - return the link capabilities in a given bulletin board
  706. *
  707. * @param p_hwfn
  708. * @param p_link - pointer to a struct to fill with link capabilities
  709. * @param p_bulletin
  710. */
  711. void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  712. struct qed_mcp_link_capabilities *p_link_caps,
  713. struct qed_bulletin_content *p_bulletin);
  714. void qed_iov_vf_task(struct work_struct *work);
  715. #else
  716. static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  717. struct qed_mcp_link_params *params)
  718. {
  719. }
  720. static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  721. struct qed_mcp_link_state *link)
  722. {
  723. }
  724. static inline void
  725. qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  726. struct qed_mcp_link_capabilities *p_link_caps)
  727. {
  728. }
  729. static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
  730. {
  731. }
  732. static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
  733. {
  734. }
  735. static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
  736. u8 *num_vlan_filters)
  737. {
  738. }
  739. static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
  740. u8 *num_mac_filters)
  741. {
  742. }
  743. static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
  744. {
  745. return false;
  746. }
  747. static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
  748. u16 *fw_major, u16 *fw_minor,
  749. u16 *fw_rev, u16 *fw_eng)
  750. {
  751. }
  752. static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
  753. {
  754. return -EINVAL;
  755. }
  756. static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
  757. struct qed_queue_cid *p_cid,
  758. u16 bd_max_bytes,
  759. dma_addr_t bd_chain_phys_adr,
  760. dma_addr_t cqe_pbl_addr,
  761. u16 cqe_pbl_size, void __iomem **pp_prod)
  762. {
  763. return -EINVAL;
  764. }
  765. static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
  766. struct qed_queue_cid *p_cid,
  767. dma_addr_t pbl_addr,
  768. u16 pbl_size, void __iomem **pp_doorbell)
  769. {
  770. return -EINVAL;
  771. }
  772. static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
  773. struct qed_queue_cid *p_cid,
  774. bool cqe_completion)
  775. {
  776. return -EINVAL;
  777. }
  778. static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
  779. struct qed_queue_cid *p_cid)
  780. {
  781. return -EINVAL;
  782. }
  783. static inline int
  784. qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
  785. struct qed_sp_vport_update_params *p_params)
  786. {
  787. return -EINVAL;
  788. }
  789. static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
  790. {
  791. return -EINVAL;
  792. }
  793. static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
  794. {
  795. return -EINVAL;
  796. }
  797. static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
  798. {
  799. return 0;
  800. }
  801. static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
  802. u8 vport_id,
  803. u16 mtu,
  804. u8 inner_vlan_removal,
  805. enum qed_tpa_mode tpa_mode,
  806. u8 max_buffers_per_cqe,
  807. u8 only_untagged)
  808. {
  809. return -EINVAL;
  810. }
  811. static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
  812. {
  813. return -EINVAL;
  814. }
  815. static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
  816. struct qed_filter_ucast *p_param)
  817. {
  818. return -EINVAL;
  819. }
  820. static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
  821. struct qed_filter_mcast *p_filter_cmd)
  822. {
  823. }
  824. static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
  825. {
  826. return -EINVAL;
  827. }
  828. static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
  829. struct qed_mcp_link_params
  830. *p_params,
  831. struct qed_bulletin_content
  832. *p_bulletin)
  833. {
  834. }
  835. static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
  836. struct qed_mcp_link_state *p_link,
  837. struct qed_bulletin_content
  838. *p_bulletin)
  839. {
  840. }
  841. static inline void
  842. __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
  843. struct qed_mcp_link_capabilities *p_link_caps,
  844. struct qed_bulletin_content *p_bulletin)
  845. {
  846. }
  847. static inline void qed_iov_vf_task(struct work_struct *work)
  848. {
  849. }
  850. #endif
  851. #endif