qed.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_H
  33. #define _QED_H
  34. #include <linux/types.h>
  35. #include <linux/io.h>
  36. #include <linux/delay.h>
  37. #include <linux/firmware.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/list.h>
  40. #include <linux/mutex.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/string.h>
  44. #include <linux/workqueue.h>
  45. #include <linux/zlib.h>
  46. #include <linux/hashtable.h>
  47. #include <linux/qed/qed_if.h>
  48. #include "qed_debug.h"
  49. #include "qed_hsi.h"
  50. extern const struct qed_common_ops qed_common_ops_pass;
  51. #define QED_MAJOR_VERSION 8
  52. #define QED_MINOR_VERSION 10
  53. #define QED_REVISION_VERSION 10
  54. #define QED_ENGINEERING_VERSION 21
  55. #define QED_VERSION \
  56. ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
  57. (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
  58. #define STORM_FW_VERSION \
  59. ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
  60. (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
  61. #define MAX_HWFNS_PER_DEVICE (4)
  62. #define NAME_SIZE 16
  63. #define VER_SIZE 16
  64. #define QED_WFQ_UNIT 100
  65. #define QED_WID_SIZE (1024)
  66. #define QED_MIN_WIDS (4)
  67. #define QED_PF_DEMS_SIZE (4)
  68. /* cau states */
  69. enum qed_coalescing_mode {
  70. QED_COAL_MODE_DISABLE,
  71. QED_COAL_MODE_ENABLE
  72. };
  73. struct qed_eth_cb_ops;
  74. struct qed_dev_info;
  75. union qed_mcp_protocol_stats;
  76. enum qed_mcp_protocol_type;
  77. /* helpers */
  78. #define QED_MFW_GET_FIELD(name, field) \
  79. (((name) & (field ## _MASK)) >> (field ## _SHIFT))
  80. #define QED_MFW_SET_FIELD(name, field, value) \
  81. do { \
  82. (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
  83. (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
  84. } while (0)
  85. static inline u32 qed_db_addr(u32 cid, u32 DEMS)
  86. {
  87. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  88. (cid * QED_PF_DEMS_SIZE);
  89. return db_addr;
  90. }
  91. static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
  92. {
  93. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  94. FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
  95. return db_addr;
  96. }
  97. #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
  98. ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
  99. ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
  100. #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
  101. #define D_TRINE(val, cond1, cond2, true1, true2, def) \
  102. (val == (cond1) ? true1 : \
  103. (val == (cond2) ? true2 : def))
  104. /* forward */
  105. struct qed_ptt_pool;
  106. struct qed_spq;
  107. struct qed_sb_info;
  108. struct qed_sb_attn_info;
  109. struct qed_cxt_mngr;
  110. struct qed_sb_sp_info;
  111. struct qed_ll2_info;
  112. struct qed_mcp_info;
  113. struct qed_rt_data {
  114. u32 *init_val;
  115. bool *b_valid;
  116. };
  117. enum qed_tunn_mode {
  118. QED_MODE_L2GENEVE_TUNN,
  119. QED_MODE_IPGENEVE_TUNN,
  120. QED_MODE_L2GRE_TUNN,
  121. QED_MODE_IPGRE_TUNN,
  122. QED_MODE_VXLAN_TUNN,
  123. };
  124. enum qed_tunn_clss {
  125. QED_TUNN_CLSS_MAC_VLAN,
  126. QED_TUNN_CLSS_MAC_VNI,
  127. QED_TUNN_CLSS_INNER_MAC_VLAN,
  128. QED_TUNN_CLSS_INNER_MAC_VNI,
  129. QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
  130. MAX_QED_TUNN_CLSS,
  131. };
  132. struct qed_tunn_update_type {
  133. bool b_update_mode;
  134. bool b_mode_enabled;
  135. enum qed_tunn_clss tun_cls;
  136. };
  137. struct qed_tunn_update_udp_port {
  138. bool b_update_port;
  139. u16 port;
  140. };
  141. struct qed_tunnel_info {
  142. struct qed_tunn_update_type vxlan;
  143. struct qed_tunn_update_type l2_geneve;
  144. struct qed_tunn_update_type ip_geneve;
  145. struct qed_tunn_update_type l2_gre;
  146. struct qed_tunn_update_type ip_gre;
  147. struct qed_tunn_update_udp_port vxlan_port;
  148. struct qed_tunn_update_udp_port geneve_port;
  149. bool b_update_rx_cls;
  150. bool b_update_tx_cls;
  151. };
  152. struct qed_tunn_start_params {
  153. unsigned long tunn_mode;
  154. u16 vxlan_udp_port;
  155. u16 geneve_udp_port;
  156. u8 update_vxlan_udp_port;
  157. u8 update_geneve_udp_port;
  158. u8 tunn_clss_vxlan;
  159. u8 tunn_clss_l2geneve;
  160. u8 tunn_clss_ipgeneve;
  161. u8 tunn_clss_l2gre;
  162. u8 tunn_clss_ipgre;
  163. };
  164. struct qed_tunn_update_params {
  165. unsigned long tunn_mode_update_mask;
  166. unsigned long tunn_mode;
  167. u16 vxlan_udp_port;
  168. u16 geneve_udp_port;
  169. u8 update_rx_pf_clss;
  170. u8 update_tx_pf_clss;
  171. u8 update_vxlan_udp_port;
  172. u8 update_geneve_udp_port;
  173. u8 tunn_clss_vxlan;
  174. u8 tunn_clss_l2geneve;
  175. u8 tunn_clss_ipgeneve;
  176. u8 tunn_clss_l2gre;
  177. u8 tunn_clss_ipgre;
  178. };
  179. /* The PCI personality is not quite synonymous to protocol ID:
  180. * 1. All personalities need CORE connections
  181. * 2. The Ethernet personality may support also the RoCE protocol
  182. */
  183. enum qed_pci_personality {
  184. QED_PCI_ETH,
  185. QED_PCI_FCOE,
  186. QED_PCI_ISCSI,
  187. QED_PCI_ETH_ROCE,
  188. QED_PCI_DEFAULT /* default in shmem */
  189. };
  190. /* All VFs are symmetric, all counters are PF + all VFs */
  191. struct qed_qm_iids {
  192. u32 cids;
  193. u32 vf_cids;
  194. u32 tids;
  195. };
  196. /* HW / FW resources, output of features supported below, most information
  197. * is received from MFW.
  198. */
  199. enum qed_resources {
  200. QED_SB,
  201. QED_L2_QUEUE,
  202. QED_VPORT,
  203. QED_RSS_ENG,
  204. QED_PQ,
  205. QED_RL,
  206. QED_MAC,
  207. QED_VLAN,
  208. QED_RDMA_CNQ_RAM,
  209. QED_ILT,
  210. QED_LL2_QUEUE,
  211. QED_CMDQS_CQS,
  212. QED_RDMA_STATS_QUEUE,
  213. QED_BDQ,
  214. QED_MAX_RESC,
  215. };
  216. enum QED_FEATURE {
  217. QED_PF_L2_QUE,
  218. QED_VF,
  219. QED_RDMA_CNQ,
  220. QED_ISCSI_CQ,
  221. QED_FCOE_CQ,
  222. QED_VF_L2_QUE,
  223. QED_MAX_FEATURES,
  224. };
  225. enum QED_PORT_MODE {
  226. QED_PORT_MODE_DE_2X40G,
  227. QED_PORT_MODE_DE_2X50G,
  228. QED_PORT_MODE_DE_1X100G,
  229. QED_PORT_MODE_DE_4X10G_F,
  230. QED_PORT_MODE_DE_4X10G_E,
  231. QED_PORT_MODE_DE_4X20G,
  232. QED_PORT_MODE_DE_1X40G,
  233. QED_PORT_MODE_DE_2X25G,
  234. QED_PORT_MODE_DE_1X25G,
  235. QED_PORT_MODE_DE_4X25G,
  236. QED_PORT_MODE_DE_2X10G,
  237. };
  238. enum qed_dev_cap {
  239. QED_DEV_CAP_ETH,
  240. QED_DEV_CAP_FCOE,
  241. QED_DEV_CAP_ISCSI,
  242. QED_DEV_CAP_ROCE,
  243. };
  244. enum qed_wol_support {
  245. QED_WOL_SUPPORT_NONE,
  246. QED_WOL_SUPPORT_PME,
  247. };
  248. struct qed_hw_info {
  249. /* PCI personality */
  250. enum qed_pci_personality personality;
  251. /* Resource Allocation scheme results */
  252. u32 resc_start[QED_MAX_RESC];
  253. u32 resc_num[QED_MAX_RESC];
  254. u32 feat_num[QED_MAX_FEATURES];
  255. #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
  256. #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
  257. #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
  258. RESC_NUM(_p_hwfn, resc))
  259. #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
  260. /* Amount of traffic classes HW supports */
  261. u8 num_hw_tc;
  262. /* Amount of TCs which should be active according to DCBx or upper
  263. * layer driver configuration.
  264. */
  265. u8 num_active_tc;
  266. u8 offload_tc;
  267. u32 concrete_fid;
  268. u16 opaque_fid;
  269. u16 ovlan;
  270. u32 part_num[4];
  271. unsigned char hw_mac_addr[ETH_ALEN];
  272. u64 node_wwn;
  273. u64 port_wwn;
  274. u16 num_fcoe_conns;
  275. struct qed_igu_info *p_igu_info;
  276. u32 port_mode;
  277. u32 hw_mode;
  278. unsigned long device_capabilities;
  279. u16 mtu;
  280. enum qed_wol_support b_wol_support;
  281. };
  282. /* maximun size of read/write commands (HW limit) */
  283. #define DMAE_MAX_RW_SIZE 0x2000
  284. struct qed_dmae_info {
  285. /* Mutex for synchronizing access to functions */
  286. struct mutex mutex;
  287. u8 channel;
  288. dma_addr_t completion_word_phys_addr;
  289. /* The memory location where the DMAE writes the completion
  290. * value when an operation is finished on this context.
  291. */
  292. u32 *p_completion_word;
  293. dma_addr_t intermediate_buffer_phys_addr;
  294. /* An intermediate buffer for DMAE operations that use virtual
  295. * addresses - data is DMA'd to/from this buffer and then
  296. * memcpy'd to/from the virtual address
  297. */
  298. u32 *p_intermediate_buffer;
  299. dma_addr_t dmae_cmd_phys_addr;
  300. struct dmae_cmd *p_dmae_cmd;
  301. };
  302. struct qed_wfq_data {
  303. /* when feature is configured for at least 1 vport */
  304. u32 min_speed;
  305. bool configured;
  306. };
  307. struct qed_qm_info {
  308. struct init_qm_pq_params *qm_pq_params;
  309. struct init_qm_vport_params *qm_vport_params;
  310. struct init_qm_port_params *qm_port_params;
  311. u16 start_pq;
  312. u8 start_vport;
  313. u16 pure_lb_pq;
  314. u16 offload_pq;
  315. u16 low_latency_pq;
  316. u16 pure_ack_pq;
  317. u16 ooo_pq;
  318. u16 first_vf_pq;
  319. u16 first_mcos_pq;
  320. u16 first_rl_pq;
  321. u16 num_pqs;
  322. u16 num_vf_pqs;
  323. u8 num_vports;
  324. u8 max_phys_tcs_per_port;
  325. u8 ooo_tc;
  326. bool pf_rl_en;
  327. bool pf_wfq_en;
  328. bool vport_rl_en;
  329. bool vport_wfq_en;
  330. u8 pf_wfq;
  331. u32 pf_rl;
  332. struct qed_wfq_data *wfq_data;
  333. u8 num_pf_rls;
  334. };
  335. struct storm_stats {
  336. u32 address;
  337. u32 len;
  338. };
  339. struct qed_storm_stats {
  340. struct storm_stats mstats;
  341. struct storm_stats pstats;
  342. struct storm_stats tstats;
  343. struct storm_stats ustats;
  344. };
  345. struct qed_fw_data {
  346. struct fw_ver_info *fw_ver_info;
  347. const u8 *modes_tree_buf;
  348. union init_op *init_ops;
  349. const u32 *arr_data;
  350. u32 init_ops_size;
  351. };
  352. #define DRV_MODULE_VERSION \
  353. __stringify(QED_MAJOR_VERSION) "." \
  354. __stringify(QED_MINOR_VERSION) "." \
  355. __stringify(QED_REVISION_VERSION) "." \
  356. __stringify(QED_ENGINEERING_VERSION)
  357. struct qed_simd_fp_handler {
  358. void *token;
  359. void (*func)(void *);
  360. };
  361. struct qed_hwfn {
  362. struct qed_dev *cdev;
  363. u8 my_id; /* ID inside the PF */
  364. #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
  365. u8 rel_pf_id; /* Relative to engine*/
  366. u8 abs_pf_id;
  367. #define QED_PATH_ID(_p_hwfn) \
  368. (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
  369. u8 port_id;
  370. bool b_active;
  371. u32 dp_module;
  372. u8 dp_level;
  373. char name[NAME_SIZE];
  374. bool first_on_engine;
  375. bool hw_init_done;
  376. u8 num_funcs_on_engine;
  377. u8 enabled_func_idx;
  378. /* BAR access */
  379. void __iomem *regview;
  380. void __iomem *doorbells;
  381. u64 db_phys_addr;
  382. unsigned long db_size;
  383. /* PTT pool */
  384. struct qed_ptt_pool *p_ptt_pool;
  385. /* HW info */
  386. struct qed_hw_info hw_info;
  387. /* rt_array (for init-tool) */
  388. struct qed_rt_data rt_data;
  389. /* SPQ */
  390. struct qed_spq *p_spq;
  391. /* EQ */
  392. struct qed_eq *p_eq;
  393. /* Consolidate Q*/
  394. struct qed_consq *p_consq;
  395. /* Slow-Path definitions */
  396. struct tasklet_struct *sp_dpc;
  397. bool b_sp_dpc_enabled;
  398. struct qed_ptt *p_main_ptt;
  399. struct qed_ptt *p_dpc_ptt;
  400. /* PTP will be used only by the leading function.
  401. * Usage of all PTP-apis should be synchronized as result.
  402. */
  403. struct qed_ptt *p_ptp_ptt;
  404. struct qed_sb_sp_info *p_sp_sb;
  405. struct qed_sb_attn_info *p_sb_attn;
  406. /* Protocol related */
  407. bool using_ll2;
  408. struct qed_ll2_info *p_ll2_info;
  409. struct qed_ooo_info *p_ooo_info;
  410. struct qed_rdma_info *p_rdma_info;
  411. struct qed_iscsi_info *p_iscsi_info;
  412. struct qed_fcoe_info *p_fcoe_info;
  413. struct qed_pf_params pf_params;
  414. bool b_rdma_enabled_in_prs;
  415. u32 rdma_prs_search_reg;
  416. /* Array of sb_info of all status blocks */
  417. struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
  418. u16 num_sbs;
  419. struct qed_cxt_mngr *p_cxt_mngr;
  420. /* Flag indicating whether interrupts are enabled or not*/
  421. bool b_int_enabled;
  422. bool b_int_requested;
  423. /* True if the driver requests for the link */
  424. bool b_drv_link_init;
  425. struct qed_vf_iov *vf_iov_info;
  426. struct qed_pf_iov *pf_iov_info;
  427. struct qed_mcp_info *mcp_info;
  428. struct qed_dcbx_info *p_dcbx_info;
  429. struct qed_dmae_info dmae_info;
  430. /* QM init */
  431. struct qed_qm_info qm_info;
  432. struct qed_storm_stats storm_stats;
  433. /* Buffer for unzipping firmware data */
  434. void *unzip_buf;
  435. struct dbg_tools_data dbg_info;
  436. /* PWM region specific data */
  437. u16 wid_count;
  438. u32 dpi_size;
  439. u32 dpi_count;
  440. /* This is used to calculate the doorbell address */
  441. u32 dpi_start_offset;
  442. /* If one of the following is set then EDPM shouldn't be used */
  443. u8 dcbx_no_edpm;
  444. u8 db_bar_no_edpm;
  445. struct qed_ptt *p_arfs_ptt;
  446. struct qed_simd_fp_handler simd_proto_handler[64];
  447. #ifdef CONFIG_QED_SRIOV
  448. struct workqueue_struct *iov_wq;
  449. struct delayed_work iov_task;
  450. unsigned long iov_task_flags;
  451. #endif
  452. struct z_stream_s *stream;
  453. struct qed_roce_ll2_info *ll2;
  454. };
  455. struct pci_params {
  456. int pm_cap;
  457. unsigned long mem_start;
  458. unsigned long mem_end;
  459. unsigned int irq;
  460. u8 pf_num;
  461. };
  462. struct qed_int_param {
  463. u32 int_mode;
  464. u8 num_vectors;
  465. u8 min_msix_cnt; /* for minimal functionality */
  466. };
  467. struct qed_int_params {
  468. struct qed_int_param in;
  469. struct qed_int_param out;
  470. struct msix_entry *msix_table;
  471. bool fp_initialized;
  472. u8 fp_msix_base;
  473. u8 fp_msix_cnt;
  474. u8 rdma_msix_base;
  475. u8 rdma_msix_cnt;
  476. };
  477. struct qed_dbg_feature {
  478. struct dentry *dentry;
  479. u8 *dump_buf;
  480. u32 buf_size;
  481. u32 dumped_dwords;
  482. };
  483. struct qed_dbg_params {
  484. struct qed_dbg_feature features[DBG_FEATURE_NUM];
  485. u8 engine_for_debug;
  486. bool print_data;
  487. };
  488. struct qed_dev {
  489. u32 dp_module;
  490. u8 dp_level;
  491. char name[NAME_SIZE];
  492. enum qed_dev_type type;
  493. /* Translate type/revision combo into the proper conditions */
  494. #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
  495. #define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
  496. CHIP_REV_IS_A0(dev))
  497. #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
  498. CHIP_REV_IS_B0(dev))
  499. #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
  500. #define QED_IS_K2(dev) QED_IS_AH(dev)
  501. #define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
  502. QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
  503. u16 vendor_id;
  504. u16 device_id;
  505. #define QED_DEV_ID_MASK 0xff00
  506. #define QED_DEV_ID_MASK_BB 0x1600
  507. #define QED_DEV_ID_MASK_AH 0x8000
  508. u16 chip_num;
  509. #define CHIP_NUM_MASK 0xffff
  510. #define CHIP_NUM_SHIFT 16
  511. u16 chip_rev;
  512. #define CHIP_REV_MASK 0xf
  513. #define CHIP_REV_SHIFT 12
  514. #define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
  515. #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
  516. u16 chip_metal;
  517. #define CHIP_METAL_MASK 0xff
  518. #define CHIP_METAL_SHIFT 4
  519. u16 chip_bond_id;
  520. #define CHIP_BOND_ID_MASK 0xf
  521. #define CHIP_BOND_ID_SHIFT 0
  522. u8 num_engines;
  523. u8 num_ports_in_engines;
  524. u8 num_funcs_in_port;
  525. u8 path_id;
  526. enum qed_mf_mode mf_mode;
  527. #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
  528. #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
  529. #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
  530. int pcie_width;
  531. int pcie_speed;
  532. u8 ver_str[VER_SIZE];
  533. /* Add MF related configuration */
  534. u8 mcp_rev;
  535. u8 boot_mode;
  536. /* WoL related configurations */
  537. u8 wol_config;
  538. u8 wol_mac[ETH_ALEN];
  539. u32 int_mode;
  540. enum qed_coalescing_mode int_coalescing_mode;
  541. u16 rx_coalesce_usecs;
  542. u16 tx_coalesce_usecs;
  543. /* Start Bar offset of first hwfn */
  544. void __iomem *regview;
  545. void __iomem *doorbells;
  546. u64 db_phys_addr;
  547. unsigned long db_size;
  548. /* PCI */
  549. u8 cache_shift;
  550. /* Init */
  551. const struct iro *iro_arr;
  552. #define IRO (p_hwfn->cdev->iro_arr)
  553. /* HW functions */
  554. u8 num_hwfns;
  555. struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
  556. /* SRIOV */
  557. struct qed_hw_sriov_info *p_iov_info;
  558. #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
  559. struct qed_tunnel_info tunnel;
  560. bool b_is_vf;
  561. u32 drv_type;
  562. struct qed_eth_stats *reset_stats;
  563. struct qed_fw_data *fw_data;
  564. u32 mcp_nvm_resp;
  565. /* Linux specific here */
  566. struct qede_dev *edev;
  567. struct pci_dev *pdev;
  568. u32 flags;
  569. #define QED_FLAG_STORAGE_STARTED (BIT(0))
  570. int msg_enable;
  571. struct pci_params pci_params;
  572. struct qed_int_params int_params;
  573. u8 protocol;
  574. #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
  575. #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
  576. /* Callbacks to protocol driver */
  577. union {
  578. struct qed_common_cb_ops *common;
  579. struct qed_eth_cb_ops *eth;
  580. struct qed_fcoe_cb_ops *fcoe;
  581. struct qed_iscsi_cb_ops *iscsi;
  582. } protocol_ops;
  583. void *ops_cookie;
  584. struct qed_dbg_params dbg_params;
  585. #ifdef CONFIG_QED_LL2
  586. struct qed_cb_ll2_info *ll2;
  587. u8 ll2_mac_address[ETH_ALEN];
  588. #endif
  589. DECLARE_HASHTABLE(connections, 10);
  590. const struct firmware *firmware;
  591. u32 rdma_max_sge;
  592. u32 rdma_max_inline;
  593. u32 rdma_max_srq_sge;
  594. u16 tunn_feature_mask;
  595. };
  596. #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
  597. : MAX_NUM_VFS_K2)
  598. #define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
  599. : MAX_NUM_L2_QUEUES_K2)
  600. #define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
  601. : MAX_NUM_PORTS_K2)
  602. #define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
  603. : MAX_SB_PER_PATH_K2)
  604. #define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
  605. : MAX_NUM_PFS_K2)
  606. /**
  607. * @brief qed_concrete_to_sw_fid - get the sw function id from
  608. * the concrete value.
  609. *
  610. * @param concrete_fid
  611. *
  612. * @return inline u8
  613. */
  614. static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
  615. u32 concrete_fid)
  616. {
  617. u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
  618. u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
  619. u8 vf_valid = GET_FIELD(concrete_fid,
  620. PXP_CONCRETE_FID_VFVALID);
  621. u8 sw_fid;
  622. if (vf_valid)
  623. sw_fid = vfid + MAX_NUM_PFS;
  624. else
  625. sw_fid = pfid;
  626. return sw_fid;
  627. }
  628. #define PURE_LB_TC 8
  629. #define OOO_LB_TC 9
  630. int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
  631. void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
  632. struct qed_ptt *p_ptt,
  633. u32 min_pf_rate);
  634. void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  635. int qed_device_num_engines(struct qed_dev *cdev);
  636. int qed_device_get_port_id(struct qed_dev *cdev);
  637. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  638. /* Flags for indication of required queues */
  639. #define PQ_FLAGS_RLS (BIT(0))
  640. #define PQ_FLAGS_MCOS (BIT(1))
  641. #define PQ_FLAGS_LB (BIT(2))
  642. #define PQ_FLAGS_OOO (BIT(3))
  643. #define PQ_FLAGS_ACK (BIT(4))
  644. #define PQ_FLAGS_OFLD (BIT(5))
  645. #define PQ_FLAGS_VFS (BIT(6))
  646. #define PQ_FLAGS_LLT (BIT(7))
  647. /* physical queue index for cm context intialization */
  648. u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
  649. u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
  650. u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
  651. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  652. /* Other Linux specific common definitions */
  653. #define DP_NAME(cdev) ((cdev)->name)
  654. #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
  655. (cdev->regview) + \
  656. (offset))
  657. #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
  658. #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
  659. #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
  660. #define DOORBELL(cdev, db_addr, val) \
  661. writel((u32)val, (void __iomem *)((u8 __iomem *)\
  662. (cdev->doorbells) + (db_addr)))
  663. /* Prototypes */
  664. int qed_fill_dev_info(struct qed_dev *cdev,
  665. struct qed_dev_info *dev_info);
  666. void qed_link_update(struct qed_hwfn *hwfn);
  667. u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
  668. u32 input_len, u8 *input_buf,
  669. u32 max_size, u8 *unzip_buf);
  670. void qed_get_protocol_stats(struct qed_dev *cdev,
  671. enum qed_mcp_protocol_type type,
  672. union qed_mcp_protocol_stats *stats);
  673. int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
  674. void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
  675. #endif /* _QED_H */