qed.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_H
  33. #define _QED_H
  34. #include <linux/types.h>
  35. #include <linux/io.h>
  36. #include <linux/delay.h>
  37. #include <linux/firmware.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/list.h>
  40. #include <linux/mutex.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/string.h>
  44. #include <linux/workqueue.h>
  45. #include <linux/zlib.h>
  46. #include <linux/hashtable.h>
  47. #include <linux/qed/qed_if.h>
  48. #include "qed_debug.h"
  49. #include "qed_hsi.h"
  50. extern const struct qed_common_ops qed_common_ops_pass;
  51. #define QED_MAJOR_VERSION 8
  52. #define QED_MINOR_VERSION 10
  53. #define QED_REVISION_VERSION 10
  54. #define QED_ENGINEERING_VERSION 21
  55. #define QED_VERSION \
  56. ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
  57. (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
  58. #define STORM_FW_VERSION \
  59. ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
  60. (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
  61. #define MAX_HWFNS_PER_DEVICE (4)
  62. #define NAME_SIZE 16
  63. #define VER_SIZE 16
  64. #define QED_WFQ_UNIT 100
  65. #define QED_WID_SIZE (1024)
  66. #define QED_PF_DEMS_SIZE (4)
  67. /* cau states */
  68. enum qed_coalescing_mode {
  69. QED_COAL_MODE_DISABLE,
  70. QED_COAL_MODE_ENABLE
  71. };
  72. struct qed_eth_cb_ops;
  73. struct qed_dev_info;
  74. union qed_mcp_protocol_stats;
  75. enum qed_mcp_protocol_type;
  76. /* helpers */
  77. #define QED_MFW_GET_FIELD(name, field) \
  78. (((name) & (field ## _MASK)) >> (field ## _SHIFT))
  79. #define QED_MFW_SET_FIELD(name, field, value) \
  80. do { \
  81. (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
  82. (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
  83. } while (0)
  84. static inline u32 qed_db_addr(u32 cid, u32 DEMS)
  85. {
  86. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  87. (cid * QED_PF_DEMS_SIZE);
  88. return db_addr;
  89. }
  90. static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
  91. {
  92. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  93. FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
  94. return db_addr;
  95. }
  96. #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
  97. ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
  98. ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
  99. #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
  100. #define D_TRINE(val, cond1, cond2, true1, true2, def) \
  101. (val == (cond1) ? true1 : \
  102. (val == (cond2) ? true2 : def))
  103. /* forward */
  104. struct qed_ptt_pool;
  105. struct qed_spq;
  106. struct qed_sb_info;
  107. struct qed_sb_attn_info;
  108. struct qed_cxt_mngr;
  109. struct qed_sb_sp_info;
  110. struct qed_ll2_info;
  111. struct qed_mcp_info;
  112. struct qed_rt_data {
  113. u32 *init_val;
  114. bool *b_valid;
  115. };
  116. enum qed_tunn_mode {
  117. QED_MODE_L2GENEVE_TUNN,
  118. QED_MODE_IPGENEVE_TUNN,
  119. QED_MODE_L2GRE_TUNN,
  120. QED_MODE_IPGRE_TUNN,
  121. QED_MODE_VXLAN_TUNN,
  122. };
  123. enum qed_tunn_clss {
  124. QED_TUNN_CLSS_MAC_VLAN,
  125. QED_TUNN_CLSS_MAC_VNI,
  126. QED_TUNN_CLSS_INNER_MAC_VLAN,
  127. QED_TUNN_CLSS_INNER_MAC_VNI,
  128. MAX_QED_TUNN_CLSS,
  129. };
  130. struct qed_tunn_start_params {
  131. unsigned long tunn_mode;
  132. u16 vxlan_udp_port;
  133. u16 geneve_udp_port;
  134. u8 update_vxlan_udp_port;
  135. u8 update_geneve_udp_port;
  136. u8 tunn_clss_vxlan;
  137. u8 tunn_clss_l2geneve;
  138. u8 tunn_clss_ipgeneve;
  139. u8 tunn_clss_l2gre;
  140. u8 tunn_clss_ipgre;
  141. };
  142. struct qed_tunn_update_params {
  143. unsigned long tunn_mode_update_mask;
  144. unsigned long tunn_mode;
  145. u16 vxlan_udp_port;
  146. u16 geneve_udp_port;
  147. u8 update_rx_pf_clss;
  148. u8 update_tx_pf_clss;
  149. u8 update_vxlan_udp_port;
  150. u8 update_geneve_udp_port;
  151. u8 tunn_clss_vxlan;
  152. u8 tunn_clss_l2geneve;
  153. u8 tunn_clss_ipgeneve;
  154. u8 tunn_clss_l2gre;
  155. u8 tunn_clss_ipgre;
  156. };
  157. /* The PCI personality is not quite synonymous to protocol ID:
  158. * 1. All personalities need CORE connections
  159. * 2. The Ethernet personality may support also the RoCE protocol
  160. */
  161. enum qed_pci_personality {
  162. QED_PCI_ETH,
  163. QED_PCI_FCOE,
  164. QED_PCI_ISCSI,
  165. QED_PCI_ETH_ROCE,
  166. QED_PCI_DEFAULT /* default in shmem */
  167. };
  168. /* All VFs are symmetric, all counters are PF + all VFs */
  169. struct qed_qm_iids {
  170. u32 cids;
  171. u32 vf_cids;
  172. u32 tids;
  173. };
  174. /* HW / FW resources, output of features supported below, most information
  175. * is received from MFW.
  176. */
  177. enum qed_resources {
  178. QED_SB,
  179. QED_L2_QUEUE,
  180. QED_VPORT,
  181. QED_RSS_ENG,
  182. QED_PQ,
  183. QED_RL,
  184. QED_MAC,
  185. QED_VLAN,
  186. QED_RDMA_CNQ_RAM,
  187. QED_ILT,
  188. QED_LL2_QUEUE,
  189. QED_CMDQS_CQS,
  190. QED_RDMA_STATS_QUEUE,
  191. QED_BDQ,
  192. QED_MAX_RESC,
  193. };
  194. enum QED_FEATURE {
  195. QED_PF_L2_QUE,
  196. QED_VF,
  197. QED_RDMA_CNQ,
  198. QED_VF_L2_QUE,
  199. QED_FCOE_CQ,
  200. QED_MAX_FEATURES,
  201. };
  202. enum QED_PORT_MODE {
  203. QED_PORT_MODE_DE_2X40G,
  204. QED_PORT_MODE_DE_2X50G,
  205. QED_PORT_MODE_DE_1X100G,
  206. QED_PORT_MODE_DE_4X10G_F,
  207. QED_PORT_MODE_DE_4X10G_E,
  208. QED_PORT_MODE_DE_4X20G,
  209. QED_PORT_MODE_DE_1X40G,
  210. QED_PORT_MODE_DE_2X25G,
  211. QED_PORT_MODE_DE_1X25G,
  212. QED_PORT_MODE_DE_4X25G,
  213. QED_PORT_MODE_DE_2X10G,
  214. };
  215. enum qed_dev_cap {
  216. QED_DEV_CAP_ETH,
  217. QED_DEV_CAP_FCOE,
  218. QED_DEV_CAP_ISCSI,
  219. QED_DEV_CAP_ROCE,
  220. };
  221. enum qed_wol_support {
  222. QED_WOL_SUPPORT_NONE,
  223. QED_WOL_SUPPORT_PME,
  224. };
  225. struct qed_hw_info {
  226. /* PCI personality */
  227. enum qed_pci_personality personality;
  228. /* Resource Allocation scheme results */
  229. u32 resc_start[QED_MAX_RESC];
  230. u32 resc_num[QED_MAX_RESC];
  231. u32 feat_num[QED_MAX_FEATURES];
  232. #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
  233. #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
  234. #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
  235. RESC_NUM(_p_hwfn, resc))
  236. #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
  237. u8 num_tc;
  238. u8 offload_tc;
  239. u8 non_offload_tc;
  240. u32 concrete_fid;
  241. u16 opaque_fid;
  242. u16 ovlan;
  243. u32 part_num[4];
  244. unsigned char hw_mac_addr[ETH_ALEN];
  245. u64 node_wwn;
  246. u64 port_wwn;
  247. u16 num_fcoe_conns;
  248. struct qed_igu_info *p_igu_info;
  249. u32 port_mode;
  250. u32 hw_mode;
  251. unsigned long device_capabilities;
  252. u16 mtu;
  253. enum qed_wol_support b_wol_support;
  254. };
  255. /* maximun size of read/write commands (HW limit) */
  256. #define DMAE_MAX_RW_SIZE 0x2000
  257. struct qed_dmae_info {
  258. /* Mutex for synchronizing access to functions */
  259. struct mutex mutex;
  260. u8 channel;
  261. dma_addr_t completion_word_phys_addr;
  262. /* The memory location where the DMAE writes the completion
  263. * value when an operation is finished on this context.
  264. */
  265. u32 *p_completion_word;
  266. dma_addr_t intermediate_buffer_phys_addr;
  267. /* An intermediate buffer for DMAE operations that use virtual
  268. * addresses - data is DMA'd to/from this buffer and then
  269. * memcpy'd to/from the virtual address
  270. */
  271. u32 *p_intermediate_buffer;
  272. dma_addr_t dmae_cmd_phys_addr;
  273. struct dmae_cmd *p_dmae_cmd;
  274. };
  275. struct qed_wfq_data {
  276. /* when feature is configured for at least 1 vport */
  277. u32 min_speed;
  278. bool configured;
  279. };
  280. struct qed_qm_info {
  281. struct init_qm_pq_params *qm_pq_params;
  282. struct init_qm_vport_params *qm_vport_params;
  283. struct init_qm_port_params *qm_port_params;
  284. u16 start_pq;
  285. u8 start_vport;
  286. u8 pure_lb_pq;
  287. u8 offload_pq;
  288. u8 pure_ack_pq;
  289. u8 ooo_pq;
  290. u8 vf_queues_offset;
  291. u16 num_pqs;
  292. u16 num_vf_pqs;
  293. u8 num_vports;
  294. u8 max_phys_tcs_per_port;
  295. bool pf_rl_en;
  296. bool pf_wfq_en;
  297. bool vport_rl_en;
  298. bool vport_wfq_en;
  299. u8 pf_wfq;
  300. u32 pf_rl;
  301. struct qed_wfq_data *wfq_data;
  302. u8 num_pf_rls;
  303. };
  304. struct storm_stats {
  305. u32 address;
  306. u32 len;
  307. };
  308. struct qed_storm_stats {
  309. struct storm_stats mstats;
  310. struct storm_stats pstats;
  311. struct storm_stats tstats;
  312. struct storm_stats ustats;
  313. };
  314. struct qed_fw_data {
  315. struct fw_ver_info *fw_ver_info;
  316. const u8 *modes_tree_buf;
  317. union init_op *init_ops;
  318. const u32 *arr_data;
  319. u32 init_ops_size;
  320. };
  321. #define DRV_MODULE_VERSION \
  322. __stringify(QED_MAJOR_VERSION) "." \
  323. __stringify(QED_MINOR_VERSION) "." \
  324. __stringify(QED_REVISION_VERSION) "." \
  325. __stringify(QED_ENGINEERING_VERSION)
  326. struct qed_simd_fp_handler {
  327. void *token;
  328. void (*func)(void *);
  329. };
  330. struct qed_hwfn {
  331. struct qed_dev *cdev;
  332. u8 my_id; /* ID inside the PF */
  333. #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
  334. u8 rel_pf_id; /* Relative to engine*/
  335. u8 abs_pf_id;
  336. #define QED_PATH_ID(_p_hwfn) \
  337. (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
  338. u8 port_id;
  339. bool b_active;
  340. u32 dp_module;
  341. u8 dp_level;
  342. char name[NAME_SIZE];
  343. bool first_on_engine;
  344. bool hw_init_done;
  345. u8 num_funcs_on_engine;
  346. u8 enabled_func_idx;
  347. /* BAR access */
  348. void __iomem *regview;
  349. void __iomem *doorbells;
  350. u64 db_phys_addr;
  351. unsigned long db_size;
  352. /* PTT pool */
  353. struct qed_ptt_pool *p_ptt_pool;
  354. /* HW info */
  355. struct qed_hw_info hw_info;
  356. /* rt_array (for init-tool) */
  357. struct qed_rt_data rt_data;
  358. /* SPQ */
  359. struct qed_spq *p_spq;
  360. /* EQ */
  361. struct qed_eq *p_eq;
  362. /* Consolidate Q*/
  363. struct qed_consq *p_consq;
  364. /* Slow-Path definitions */
  365. struct tasklet_struct *sp_dpc;
  366. bool b_sp_dpc_enabled;
  367. struct qed_ptt *p_main_ptt;
  368. struct qed_ptt *p_dpc_ptt;
  369. struct qed_sb_sp_info *p_sp_sb;
  370. struct qed_sb_attn_info *p_sb_attn;
  371. /* Protocol related */
  372. bool using_ll2;
  373. struct qed_ll2_info *p_ll2_info;
  374. struct qed_ooo_info *p_ooo_info;
  375. struct qed_rdma_info *p_rdma_info;
  376. struct qed_iscsi_info *p_iscsi_info;
  377. struct qed_fcoe_info *p_fcoe_info;
  378. struct qed_pf_params pf_params;
  379. bool b_rdma_enabled_in_prs;
  380. u32 rdma_prs_search_reg;
  381. /* Array of sb_info of all status blocks */
  382. struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
  383. u16 num_sbs;
  384. struct qed_cxt_mngr *p_cxt_mngr;
  385. /* Flag indicating whether interrupts are enabled or not*/
  386. bool b_int_enabled;
  387. bool b_int_requested;
  388. /* True if the driver requests for the link */
  389. bool b_drv_link_init;
  390. struct qed_vf_iov *vf_iov_info;
  391. struct qed_pf_iov *pf_iov_info;
  392. struct qed_mcp_info *mcp_info;
  393. struct qed_dcbx_info *p_dcbx_info;
  394. struct qed_dmae_info dmae_info;
  395. /* QM init */
  396. struct qed_qm_info qm_info;
  397. struct qed_storm_stats storm_stats;
  398. /* Buffer for unzipping firmware data */
  399. void *unzip_buf;
  400. struct dbg_tools_data dbg_info;
  401. /* PWM region specific data */
  402. u32 dpi_size;
  403. u32 dpi_count;
  404. /* This is used to calculate the doorbell address */
  405. u32 dpi_start_offset;
  406. /* If one of the following is set then EDPM shouldn't be used */
  407. u8 dcbx_no_edpm;
  408. u8 db_bar_no_edpm;
  409. /* p_ptp_ptt is valid for leading HWFN only */
  410. struct qed_ptt *p_ptp_ptt;
  411. struct qed_simd_fp_handler simd_proto_handler[64];
  412. #ifdef CONFIG_QED_SRIOV
  413. struct workqueue_struct *iov_wq;
  414. struct delayed_work iov_task;
  415. unsigned long iov_task_flags;
  416. #endif
  417. struct z_stream_s *stream;
  418. struct qed_roce_ll2_info *ll2;
  419. };
  420. struct pci_params {
  421. int pm_cap;
  422. unsigned long mem_start;
  423. unsigned long mem_end;
  424. unsigned int irq;
  425. u8 pf_num;
  426. };
  427. struct qed_int_param {
  428. u32 int_mode;
  429. u8 num_vectors;
  430. u8 min_msix_cnt; /* for minimal functionality */
  431. };
  432. struct qed_int_params {
  433. struct qed_int_param in;
  434. struct qed_int_param out;
  435. struct msix_entry *msix_table;
  436. bool fp_initialized;
  437. u8 fp_msix_base;
  438. u8 fp_msix_cnt;
  439. u8 rdma_msix_base;
  440. u8 rdma_msix_cnt;
  441. };
  442. struct qed_dbg_feature {
  443. struct dentry *dentry;
  444. u8 *dump_buf;
  445. u32 buf_size;
  446. u32 dumped_dwords;
  447. };
  448. struct qed_dbg_params {
  449. struct qed_dbg_feature features[DBG_FEATURE_NUM];
  450. u8 engine_for_debug;
  451. bool print_data;
  452. };
  453. struct qed_dev {
  454. u32 dp_module;
  455. u8 dp_level;
  456. char name[NAME_SIZE];
  457. enum qed_dev_type type;
  458. /* Translate type/revision combo into the proper conditions */
  459. #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
  460. #define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
  461. CHIP_REV_IS_A0(dev))
  462. #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
  463. CHIP_REV_IS_B0(dev))
  464. #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
  465. #define QED_IS_K2(dev) QED_IS_AH(dev)
  466. #define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
  467. QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
  468. u16 vendor_id;
  469. u16 device_id;
  470. #define QED_DEV_ID_MASK 0xff00
  471. #define QED_DEV_ID_MASK_BB 0x1600
  472. #define QED_DEV_ID_MASK_AH 0x8000
  473. u16 chip_num;
  474. #define CHIP_NUM_MASK 0xffff
  475. #define CHIP_NUM_SHIFT 16
  476. u16 chip_rev;
  477. #define CHIP_REV_MASK 0xf
  478. #define CHIP_REV_SHIFT 12
  479. #define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
  480. #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
  481. u16 chip_metal;
  482. #define CHIP_METAL_MASK 0xff
  483. #define CHIP_METAL_SHIFT 4
  484. u16 chip_bond_id;
  485. #define CHIP_BOND_ID_MASK 0xf
  486. #define CHIP_BOND_ID_SHIFT 0
  487. u8 num_engines;
  488. u8 num_ports_in_engines;
  489. u8 num_funcs_in_port;
  490. u8 path_id;
  491. enum qed_mf_mode mf_mode;
  492. #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
  493. #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
  494. #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
  495. int pcie_width;
  496. int pcie_speed;
  497. u8 ver_str[VER_SIZE];
  498. /* Add MF related configuration */
  499. u8 mcp_rev;
  500. u8 boot_mode;
  501. /* WoL related configurations */
  502. u8 wol_config;
  503. u8 wol_mac[ETH_ALEN];
  504. u32 int_mode;
  505. enum qed_coalescing_mode int_coalescing_mode;
  506. u16 rx_coalesce_usecs;
  507. u16 tx_coalesce_usecs;
  508. /* Start Bar offset of first hwfn */
  509. void __iomem *regview;
  510. void __iomem *doorbells;
  511. u64 db_phys_addr;
  512. unsigned long db_size;
  513. /* PCI */
  514. u8 cache_shift;
  515. /* Init */
  516. const struct iro *iro_arr;
  517. #define IRO (p_hwfn->cdev->iro_arr)
  518. /* HW functions */
  519. u8 num_hwfns;
  520. struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
  521. /* SRIOV */
  522. struct qed_hw_sriov_info *p_iov_info;
  523. #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
  524. unsigned long tunn_mode;
  525. bool b_is_vf;
  526. u32 drv_type;
  527. struct qed_eth_stats *reset_stats;
  528. struct qed_fw_data *fw_data;
  529. u32 mcp_nvm_resp;
  530. /* Linux specific here */
  531. struct qede_dev *edev;
  532. struct pci_dev *pdev;
  533. u32 flags;
  534. #define QED_FLAG_STORAGE_STARTED (BIT(0))
  535. int msg_enable;
  536. struct pci_params pci_params;
  537. struct qed_int_params int_params;
  538. u8 protocol;
  539. #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
  540. #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
  541. /* Callbacks to protocol driver */
  542. union {
  543. struct qed_common_cb_ops *common;
  544. struct qed_eth_cb_ops *eth;
  545. struct qed_fcoe_cb_ops *fcoe;
  546. struct qed_iscsi_cb_ops *iscsi;
  547. } protocol_ops;
  548. void *ops_cookie;
  549. struct qed_dbg_params dbg_params;
  550. #ifdef CONFIG_QED_LL2
  551. struct qed_cb_ll2_info *ll2;
  552. u8 ll2_mac_address[ETH_ALEN];
  553. #endif
  554. DECLARE_HASHTABLE(connections, 10);
  555. const struct firmware *firmware;
  556. u32 rdma_max_sge;
  557. u32 rdma_max_inline;
  558. u32 rdma_max_srq_sge;
  559. };
  560. #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
  561. : MAX_NUM_VFS_K2)
  562. #define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
  563. : MAX_NUM_L2_QUEUES_K2)
  564. #define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
  565. : MAX_NUM_PORTS_K2)
  566. #define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
  567. : MAX_SB_PER_PATH_K2)
  568. #define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
  569. : MAX_NUM_PFS_K2)
  570. /**
  571. * @brief qed_concrete_to_sw_fid - get the sw function id from
  572. * the concrete value.
  573. *
  574. * @param concrete_fid
  575. *
  576. * @return inline u8
  577. */
  578. static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
  579. u32 concrete_fid)
  580. {
  581. u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
  582. u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
  583. u8 vf_valid = GET_FIELD(concrete_fid,
  584. PXP_CONCRETE_FID_VFVALID);
  585. u8 sw_fid;
  586. if (vf_valid)
  587. sw_fid = vfid + MAX_NUM_PFS;
  588. else
  589. sw_fid = pfid;
  590. return sw_fid;
  591. }
  592. #define PURE_LB_TC 8
  593. #define OOO_LB_TC 9
  594. int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
  595. void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
  596. struct qed_ptt *p_ptt,
  597. u32 min_pf_rate);
  598. void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  599. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  600. int qed_device_num_engines(struct qed_dev *cdev);
  601. /* Other Linux specific common definitions */
  602. #define DP_NAME(cdev) ((cdev)->name)
  603. #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
  604. (cdev->regview) + \
  605. (offset))
  606. #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
  607. #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
  608. #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
  609. #define DOORBELL(cdev, db_addr, val) \
  610. writel((u32)val, (void __iomem *)((u8 __iomem *)\
  611. (cdev->doorbells) + (db_addr)))
  612. /* Prototypes */
  613. int qed_fill_dev_info(struct qed_dev *cdev,
  614. struct qed_dev_info *dev_info);
  615. void qed_link_update(struct qed_hwfn *hwfn);
  616. u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
  617. u32 input_len, u8 *input_buf,
  618. u32 max_size, u8 *unzip_buf);
  619. void qed_get_protocol_stats(struct qed_dev *cdev,
  620. enum qed_mcp_protocol_type type,
  621. union qed_mcp_protocol_stats *stats);
  622. int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
  623. void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
  624. #endif /* _QED_H */