qed.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_H
  33. #define _QED_H
  34. #include <linux/types.h>
  35. #include <linux/io.h>
  36. #include <linux/delay.h>
  37. #include <linux/firmware.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/list.h>
  40. #include <linux/mutex.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/string.h>
  44. #include <linux/workqueue.h>
  45. #include <linux/zlib.h>
  46. #include <linux/hashtable.h>
  47. #include <linux/qed/qed_if.h>
  48. #include "qed_debug.h"
  49. #include "qed_hsi.h"
  50. extern const struct qed_common_ops qed_common_ops_pass;
  51. #define QED_MAJOR_VERSION 8
  52. #define QED_MINOR_VERSION 33
  53. #define QED_REVISION_VERSION 0
  54. #define QED_ENGINEERING_VERSION 20
  55. #define QED_VERSION \
  56. ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
  57. (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
  58. #define STORM_FW_VERSION \
  59. ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
  60. (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
  61. #define MAX_HWFNS_PER_DEVICE (4)
  62. #define NAME_SIZE 16
  63. #define VER_SIZE 16
  64. #define QED_WFQ_UNIT 100
  65. #define QED_WID_SIZE (1024)
  66. #define QED_MIN_WIDS (4)
  67. #define QED_PF_DEMS_SIZE (4)
  68. /* cau states */
  69. enum qed_coalescing_mode {
  70. QED_COAL_MODE_DISABLE,
  71. QED_COAL_MODE_ENABLE
  72. };
  73. enum qed_nvm_cmd {
  74. QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
  75. QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
  76. QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
  77. QED_GET_MCP_NVM_RESP = 0xFFFFFF00
  78. };
  79. struct qed_eth_cb_ops;
  80. struct qed_dev_info;
  81. union qed_mcp_protocol_stats;
  82. enum qed_mcp_protocol_type;
  83. /* helpers */
  84. #define QED_MFW_GET_FIELD(name, field) \
  85. (((name) & (field ## _MASK)) >> (field ## _SHIFT))
  86. #define QED_MFW_SET_FIELD(name, field, value) \
  87. do { \
  88. (name) &= ~(field ## _MASK); \
  89. (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
  90. } while (0)
  91. static inline u32 qed_db_addr(u32 cid, u32 DEMS)
  92. {
  93. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  94. (cid * QED_PF_DEMS_SIZE);
  95. return db_addr;
  96. }
  97. static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
  98. {
  99. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  100. FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
  101. return db_addr;
  102. }
  103. #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
  104. ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
  105. ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
  106. #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
  107. #define D_TRINE(val, cond1, cond2, true1, true2, def) \
  108. (val == (cond1) ? true1 : \
  109. (val == (cond2) ? true2 : def))
  110. /* forward */
  111. struct qed_ptt_pool;
  112. struct qed_spq;
  113. struct qed_sb_info;
  114. struct qed_sb_attn_info;
  115. struct qed_cxt_mngr;
  116. struct qed_sb_sp_info;
  117. struct qed_ll2_info;
  118. struct qed_mcp_info;
  119. struct qed_rt_data {
  120. u32 *init_val;
  121. bool *b_valid;
  122. };
  123. enum qed_tunn_mode {
  124. QED_MODE_L2GENEVE_TUNN,
  125. QED_MODE_IPGENEVE_TUNN,
  126. QED_MODE_L2GRE_TUNN,
  127. QED_MODE_IPGRE_TUNN,
  128. QED_MODE_VXLAN_TUNN,
  129. };
  130. enum qed_tunn_clss {
  131. QED_TUNN_CLSS_MAC_VLAN,
  132. QED_TUNN_CLSS_MAC_VNI,
  133. QED_TUNN_CLSS_INNER_MAC_VLAN,
  134. QED_TUNN_CLSS_INNER_MAC_VNI,
  135. QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
  136. MAX_QED_TUNN_CLSS,
  137. };
  138. struct qed_tunn_update_type {
  139. bool b_update_mode;
  140. bool b_mode_enabled;
  141. enum qed_tunn_clss tun_cls;
  142. };
  143. struct qed_tunn_update_udp_port {
  144. bool b_update_port;
  145. u16 port;
  146. };
  147. struct qed_tunnel_info {
  148. struct qed_tunn_update_type vxlan;
  149. struct qed_tunn_update_type l2_geneve;
  150. struct qed_tunn_update_type ip_geneve;
  151. struct qed_tunn_update_type l2_gre;
  152. struct qed_tunn_update_type ip_gre;
  153. struct qed_tunn_update_udp_port vxlan_port;
  154. struct qed_tunn_update_udp_port geneve_port;
  155. bool b_update_rx_cls;
  156. bool b_update_tx_cls;
  157. };
  158. struct qed_tunn_start_params {
  159. unsigned long tunn_mode;
  160. u16 vxlan_udp_port;
  161. u16 geneve_udp_port;
  162. u8 update_vxlan_udp_port;
  163. u8 update_geneve_udp_port;
  164. u8 tunn_clss_vxlan;
  165. u8 tunn_clss_l2geneve;
  166. u8 tunn_clss_ipgeneve;
  167. u8 tunn_clss_l2gre;
  168. u8 tunn_clss_ipgre;
  169. };
  170. struct qed_tunn_update_params {
  171. unsigned long tunn_mode_update_mask;
  172. unsigned long tunn_mode;
  173. u16 vxlan_udp_port;
  174. u16 geneve_udp_port;
  175. u8 update_rx_pf_clss;
  176. u8 update_tx_pf_clss;
  177. u8 update_vxlan_udp_port;
  178. u8 update_geneve_udp_port;
  179. u8 tunn_clss_vxlan;
  180. u8 tunn_clss_l2geneve;
  181. u8 tunn_clss_ipgeneve;
  182. u8 tunn_clss_l2gre;
  183. u8 tunn_clss_ipgre;
  184. };
  185. /* The PCI personality is not quite synonymous to protocol ID:
  186. * 1. All personalities need CORE connections
  187. * 2. The Ethernet personality may support also the RoCE/iWARP protocol
  188. */
  189. enum qed_pci_personality {
  190. QED_PCI_ETH,
  191. QED_PCI_FCOE,
  192. QED_PCI_ISCSI,
  193. QED_PCI_ETH_ROCE,
  194. QED_PCI_ETH_IWARP,
  195. QED_PCI_ETH_RDMA,
  196. QED_PCI_DEFAULT, /* default in shmem */
  197. };
  198. /* All VFs are symmetric, all counters are PF + all VFs */
  199. struct qed_qm_iids {
  200. u32 cids;
  201. u32 vf_cids;
  202. u32 tids;
  203. };
  204. /* HW / FW resources, output of features supported below, most information
  205. * is received from MFW.
  206. */
  207. enum qed_resources {
  208. QED_SB,
  209. QED_L2_QUEUE,
  210. QED_VPORT,
  211. QED_RSS_ENG,
  212. QED_PQ,
  213. QED_RL,
  214. QED_MAC,
  215. QED_VLAN,
  216. QED_RDMA_CNQ_RAM,
  217. QED_ILT,
  218. QED_LL2_QUEUE,
  219. QED_CMDQS_CQS,
  220. QED_RDMA_STATS_QUEUE,
  221. QED_BDQ,
  222. QED_MAX_RESC,
  223. };
  224. enum QED_FEATURE {
  225. QED_PF_L2_QUE,
  226. QED_VF,
  227. QED_RDMA_CNQ,
  228. QED_ISCSI_CQ,
  229. QED_FCOE_CQ,
  230. QED_VF_L2_QUE,
  231. QED_MAX_FEATURES,
  232. };
  233. enum QED_PORT_MODE {
  234. QED_PORT_MODE_DE_2X40G,
  235. QED_PORT_MODE_DE_2X50G,
  236. QED_PORT_MODE_DE_1X100G,
  237. QED_PORT_MODE_DE_4X10G_F,
  238. QED_PORT_MODE_DE_4X10G_E,
  239. QED_PORT_MODE_DE_4X20G,
  240. QED_PORT_MODE_DE_1X40G,
  241. QED_PORT_MODE_DE_2X25G,
  242. QED_PORT_MODE_DE_1X25G,
  243. QED_PORT_MODE_DE_4X25G,
  244. QED_PORT_MODE_DE_2X10G,
  245. };
  246. enum qed_dev_cap {
  247. QED_DEV_CAP_ETH,
  248. QED_DEV_CAP_FCOE,
  249. QED_DEV_CAP_ISCSI,
  250. QED_DEV_CAP_ROCE,
  251. QED_DEV_CAP_IWARP,
  252. };
  253. enum qed_wol_support {
  254. QED_WOL_SUPPORT_NONE,
  255. QED_WOL_SUPPORT_PME,
  256. };
  257. struct qed_hw_info {
  258. /* PCI personality */
  259. enum qed_pci_personality personality;
  260. #define QED_IS_RDMA_PERSONALITY(dev) \
  261. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  262. (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  263. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  264. #define QED_IS_ROCE_PERSONALITY(dev) \
  265. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  266. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  267. #define QED_IS_IWARP_PERSONALITY(dev) \
  268. ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  269. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  270. #define QED_IS_L2_PERSONALITY(dev) \
  271. ((dev)->hw_info.personality == QED_PCI_ETH || \
  272. QED_IS_RDMA_PERSONALITY(dev))
  273. #define QED_IS_FCOE_PERSONALITY(dev) \
  274. ((dev)->hw_info.personality == QED_PCI_FCOE)
  275. #define QED_IS_ISCSI_PERSONALITY(dev) \
  276. ((dev)->hw_info.personality == QED_PCI_ISCSI)
  277. /* Resource Allocation scheme results */
  278. u32 resc_start[QED_MAX_RESC];
  279. u32 resc_num[QED_MAX_RESC];
  280. u32 feat_num[QED_MAX_FEATURES];
  281. #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
  282. #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
  283. #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
  284. RESC_NUM(_p_hwfn, resc))
  285. #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
  286. /* Amount of traffic classes HW supports */
  287. u8 num_hw_tc;
  288. /* Amount of TCs which should be active according to DCBx or upper
  289. * layer driver configuration.
  290. */
  291. u8 num_active_tc;
  292. u8 offload_tc;
  293. u32 concrete_fid;
  294. u16 opaque_fid;
  295. u16 ovlan;
  296. u32 part_num[4];
  297. unsigned char hw_mac_addr[ETH_ALEN];
  298. u64 node_wwn;
  299. u64 port_wwn;
  300. u16 num_fcoe_conns;
  301. struct qed_igu_info *p_igu_info;
  302. u32 port_mode;
  303. u32 hw_mode;
  304. unsigned long device_capabilities;
  305. u16 mtu;
  306. enum qed_wol_support b_wol_support;
  307. };
  308. /* maximun size of read/write commands (HW limit) */
  309. #define DMAE_MAX_RW_SIZE 0x2000
  310. struct qed_dmae_info {
  311. /* Mutex for synchronizing access to functions */
  312. struct mutex mutex;
  313. u8 channel;
  314. dma_addr_t completion_word_phys_addr;
  315. /* The memory location where the DMAE writes the completion
  316. * value when an operation is finished on this context.
  317. */
  318. u32 *p_completion_word;
  319. dma_addr_t intermediate_buffer_phys_addr;
  320. /* An intermediate buffer for DMAE operations that use virtual
  321. * addresses - data is DMA'd to/from this buffer and then
  322. * memcpy'd to/from the virtual address
  323. */
  324. u32 *p_intermediate_buffer;
  325. dma_addr_t dmae_cmd_phys_addr;
  326. struct dmae_cmd *p_dmae_cmd;
  327. };
  328. struct qed_wfq_data {
  329. /* when feature is configured for at least 1 vport */
  330. u32 min_speed;
  331. bool configured;
  332. };
  333. struct qed_qm_info {
  334. struct init_qm_pq_params *qm_pq_params;
  335. struct init_qm_vport_params *qm_vport_params;
  336. struct init_qm_port_params *qm_port_params;
  337. u16 start_pq;
  338. u8 start_vport;
  339. u16 pure_lb_pq;
  340. u16 offload_pq;
  341. u16 low_latency_pq;
  342. u16 pure_ack_pq;
  343. u16 ooo_pq;
  344. u16 first_vf_pq;
  345. u16 first_mcos_pq;
  346. u16 first_rl_pq;
  347. u16 num_pqs;
  348. u16 num_vf_pqs;
  349. u8 num_vports;
  350. u8 max_phys_tcs_per_port;
  351. u8 ooo_tc;
  352. bool pf_rl_en;
  353. bool pf_wfq_en;
  354. bool vport_rl_en;
  355. bool vport_wfq_en;
  356. u8 pf_wfq;
  357. u32 pf_rl;
  358. struct qed_wfq_data *wfq_data;
  359. u8 num_pf_rls;
  360. };
  361. struct storm_stats {
  362. u32 address;
  363. u32 len;
  364. };
  365. struct qed_storm_stats {
  366. struct storm_stats mstats;
  367. struct storm_stats pstats;
  368. struct storm_stats tstats;
  369. struct storm_stats ustats;
  370. };
  371. struct qed_fw_data {
  372. struct fw_ver_info *fw_ver_info;
  373. const u8 *modes_tree_buf;
  374. union init_op *init_ops;
  375. const u32 *arr_data;
  376. u32 init_ops_size;
  377. };
  378. enum BAR_ID {
  379. BAR_ID_0, /* used for GRC */
  380. BAR_ID_1 /* Used for doorbells */
  381. };
  382. struct qed_nvm_image_info {
  383. u32 num_images;
  384. struct bist_nvm_image_att *image_att;
  385. };
  386. #define DRV_MODULE_VERSION \
  387. __stringify(QED_MAJOR_VERSION) "." \
  388. __stringify(QED_MINOR_VERSION) "." \
  389. __stringify(QED_REVISION_VERSION) "." \
  390. __stringify(QED_ENGINEERING_VERSION)
  391. struct qed_simd_fp_handler {
  392. void *token;
  393. void (*func)(void *);
  394. };
  395. struct qed_hwfn {
  396. struct qed_dev *cdev;
  397. u8 my_id; /* ID inside the PF */
  398. #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
  399. u8 rel_pf_id; /* Relative to engine*/
  400. u8 abs_pf_id;
  401. #define QED_PATH_ID(_p_hwfn) \
  402. (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
  403. u8 port_id;
  404. bool b_active;
  405. u32 dp_module;
  406. u8 dp_level;
  407. char name[NAME_SIZE];
  408. bool first_on_engine;
  409. bool hw_init_done;
  410. u8 num_funcs_on_engine;
  411. u8 enabled_func_idx;
  412. /* BAR access */
  413. void __iomem *regview;
  414. void __iomem *doorbells;
  415. u64 db_phys_addr;
  416. unsigned long db_size;
  417. /* PTT pool */
  418. struct qed_ptt_pool *p_ptt_pool;
  419. /* HW info */
  420. struct qed_hw_info hw_info;
  421. /* rt_array (for init-tool) */
  422. struct qed_rt_data rt_data;
  423. /* SPQ */
  424. struct qed_spq *p_spq;
  425. /* EQ */
  426. struct qed_eq *p_eq;
  427. /* Consolidate Q*/
  428. struct qed_consq *p_consq;
  429. /* Slow-Path definitions */
  430. struct tasklet_struct *sp_dpc;
  431. bool b_sp_dpc_enabled;
  432. struct qed_ptt *p_main_ptt;
  433. struct qed_ptt *p_dpc_ptt;
  434. /* PTP will be used only by the leading function.
  435. * Usage of all PTP-apis should be synchronized as result.
  436. */
  437. struct qed_ptt *p_ptp_ptt;
  438. struct qed_sb_sp_info *p_sp_sb;
  439. struct qed_sb_attn_info *p_sb_attn;
  440. /* Protocol related */
  441. bool using_ll2;
  442. struct qed_ll2_info *p_ll2_info;
  443. struct qed_ooo_info *p_ooo_info;
  444. struct qed_rdma_info *p_rdma_info;
  445. struct qed_iscsi_info *p_iscsi_info;
  446. struct qed_fcoe_info *p_fcoe_info;
  447. struct qed_pf_params pf_params;
  448. bool b_rdma_enabled_in_prs;
  449. u32 rdma_prs_search_reg;
  450. struct qed_cxt_mngr *p_cxt_mngr;
  451. /* Flag indicating whether interrupts are enabled or not*/
  452. bool b_int_enabled;
  453. bool b_int_requested;
  454. /* True if the driver requests for the link */
  455. bool b_drv_link_init;
  456. struct qed_vf_iov *vf_iov_info;
  457. struct qed_pf_iov *pf_iov_info;
  458. struct qed_mcp_info *mcp_info;
  459. struct qed_dcbx_info *p_dcbx_info;
  460. struct qed_dmae_info dmae_info;
  461. /* QM init */
  462. struct qed_qm_info qm_info;
  463. struct qed_storm_stats storm_stats;
  464. /* Buffer for unzipping firmware data */
  465. void *unzip_buf;
  466. struct dbg_tools_data dbg_info;
  467. /* PWM region specific data */
  468. u16 wid_count;
  469. u32 dpi_size;
  470. u32 dpi_count;
  471. /* This is used to calculate the doorbell address */
  472. u32 dpi_start_offset;
  473. /* If one of the following is set then EDPM shouldn't be used */
  474. u8 dcbx_no_edpm;
  475. u8 db_bar_no_edpm;
  476. /* L2-related */
  477. struct qed_l2_info *p_l2_info;
  478. /* Nvm images number and attributes */
  479. struct qed_nvm_image_info nvm_info;
  480. struct qed_ptt *p_arfs_ptt;
  481. struct qed_simd_fp_handler simd_proto_handler[64];
  482. #ifdef CONFIG_QED_SRIOV
  483. struct workqueue_struct *iov_wq;
  484. struct delayed_work iov_task;
  485. unsigned long iov_task_flags;
  486. #endif
  487. struct z_stream_s *stream;
  488. };
  489. struct pci_params {
  490. int pm_cap;
  491. unsigned long mem_start;
  492. unsigned long mem_end;
  493. unsigned int irq;
  494. u8 pf_num;
  495. };
  496. struct qed_int_param {
  497. u32 int_mode;
  498. u8 num_vectors;
  499. u8 min_msix_cnt; /* for minimal functionality */
  500. };
  501. struct qed_int_params {
  502. struct qed_int_param in;
  503. struct qed_int_param out;
  504. struct msix_entry *msix_table;
  505. bool fp_initialized;
  506. u8 fp_msix_base;
  507. u8 fp_msix_cnt;
  508. u8 rdma_msix_base;
  509. u8 rdma_msix_cnt;
  510. };
  511. struct qed_dbg_feature {
  512. struct dentry *dentry;
  513. u8 *dump_buf;
  514. u32 buf_size;
  515. u32 dumped_dwords;
  516. };
  517. struct qed_dbg_params {
  518. struct qed_dbg_feature features[DBG_FEATURE_NUM];
  519. u8 engine_for_debug;
  520. bool print_data;
  521. };
  522. struct qed_dev {
  523. u32 dp_module;
  524. u8 dp_level;
  525. char name[NAME_SIZE];
  526. enum qed_dev_type type;
  527. /* Translate type/revision combo into the proper conditions */
  528. #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
  529. #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
  530. CHIP_REV_IS_B0(dev))
  531. #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
  532. #define QED_IS_K2(dev) QED_IS_AH(dev)
  533. u16 vendor_id;
  534. u16 device_id;
  535. #define QED_DEV_ID_MASK 0xff00
  536. #define QED_DEV_ID_MASK_BB 0x1600
  537. #define QED_DEV_ID_MASK_AH 0x8000
  538. u16 chip_num;
  539. #define CHIP_NUM_MASK 0xffff
  540. #define CHIP_NUM_SHIFT 16
  541. u16 chip_rev;
  542. #define CHIP_REV_MASK 0xf
  543. #define CHIP_REV_SHIFT 12
  544. #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
  545. u16 chip_metal;
  546. #define CHIP_METAL_MASK 0xff
  547. #define CHIP_METAL_SHIFT 4
  548. u16 chip_bond_id;
  549. #define CHIP_BOND_ID_MASK 0xf
  550. #define CHIP_BOND_ID_SHIFT 0
  551. u8 num_engines;
  552. u8 num_ports_in_engine;
  553. u8 num_funcs_in_port;
  554. u8 path_id;
  555. enum qed_mf_mode mf_mode;
  556. #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
  557. #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
  558. #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
  559. int pcie_width;
  560. int pcie_speed;
  561. /* Add MF related configuration */
  562. u8 mcp_rev;
  563. u8 boot_mode;
  564. /* WoL related configurations */
  565. u8 wol_config;
  566. u8 wol_mac[ETH_ALEN];
  567. u32 int_mode;
  568. enum qed_coalescing_mode int_coalescing_mode;
  569. u16 rx_coalesce_usecs;
  570. u16 tx_coalesce_usecs;
  571. /* Start Bar offset of first hwfn */
  572. void __iomem *regview;
  573. void __iomem *doorbells;
  574. u64 db_phys_addr;
  575. unsigned long db_size;
  576. /* PCI */
  577. u8 cache_shift;
  578. /* Init */
  579. const struct iro *iro_arr;
  580. #define IRO (p_hwfn->cdev->iro_arr)
  581. /* HW functions */
  582. u8 num_hwfns;
  583. struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
  584. /* SRIOV */
  585. struct qed_hw_sriov_info *p_iov_info;
  586. #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
  587. struct qed_tunnel_info tunnel;
  588. bool b_is_vf;
  589. u32 drv_type;
  590. struct qed_eth_stats *reset_stats;
  591. struct qed_fw_data *fw_data;
  592. u32 mcp_nvm_resp;
  593. /* Linux specific here */
  594. struct qede_dev *edev;
  595. struct pci_dev *pdev;
  596. u32 flags;
  597. #define QED_FLAG_STORAGE_STARTED (BIT(0))
  598. int msg_enable;
  599. struct pci_params pci_params;
  600. struct qed_int_params int_params;
  601. u8 protocol;
  602. #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
  603. #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
  604. /* Callbacks to protocol driver */
  605. union {
  606. struct qed_common_cb_ops *common;
  607. struct qed_eth_cb_ops *eth;
  608. struct qed_fcoe_cb_ops *fcoe;
  609. struct qed_iscsi_cb_ops *iscsi;
  610. } protocol_ops;
  611. void *ops_cookie;
  612. struct qed_dbg_params dbg_params;
  613. #ifdef CONFIG_QED_LL2
  614. struct qed_cb_ll2_info *ll2;
  615. u8 ll2_mac_address[ETH_ALEN];
  616. #endif
  617. DECLARE_HASHTABLE(connections, 10);
  618. const struct firmware *firmware;
  619. u32 rdma_max_sge;
  620. u32 rdma_max_inline;
  621. u32 rdma_max_srq_sge;
  622. u16 tunn_feature_mask;
  623. };
  624. #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
  625. : MAX_NUM_VFS_K2)
  626. #define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
  627. : MAX_NUM_L2_QUEUES_K2)
  628. #define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
  629. : MAX_NUM_PORTS_K2)
  630. #define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
  631. : MAX_SB_PER_PATH_K2)
  632. #define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
  633. : MAX_NUM_PFS_K2)
  634. /**
  635. * @brief qed_concrete_to_sw_fid - get the sw function id from
  636. * the concrete value.
  637. *
  638. * @param concrete_fid
  639. *
  640. * @return inline u8
  641. */
  642. static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
  643. u32 concrete_fid)
  644. {
  645. u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
  646. u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
  647. u8 vf_valid = GET_FIELD(concrete_fid,
  648. PXP_CONCRETE_FID_VFVALID);
  649. u8 sw_fid;
  650. if (vf_valid)
  651. sw_fid = vfid + MAX_NUM_PFS;
  652. else
  653. sw_fid = pfid;
  654. return sw_fid;
  655. }
  656. #define PKT_LB_TC 9
  657. #define MAX_NUM_VOQS_E4 20
  658. int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
  659. void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
  660. struct qed_ptt *p_ptt,
  661. u32 min_pf_rate);
  662. void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  663. int qed_device_num_engines(struct qed_dev *cdev);
  664. int qed_device_get_port_id(struct qed_dev *cdev);
  665. void qed_set_fw_mac_addr(__le16 *fw_msb,
  666. __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
  667. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  668. /* Flags for indication of required queues */
  669. #define PQ_FLAGS_RLS (BIT(0))
  670. #define PQ_FLAGS_MCOS (BIT(1))
  671. #define PQ_FLAGS_LB (BIT(2))
  672. #define PQ_FLAGS_OOO (BIT(3))
  673. #define PQ_FLAGS_ACK (BIT(4))
  674. #define PQ_FLAGS_OFLD (BIT(5))
  675. #define PQ_FLAGS_VFS (BIT(6))
  676. #define PQ_FLAGS_LLT (BIT(7))
  677. /* physical queue index for cm context intialization */
  678. u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
  679. u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
  680. u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
  681. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  682. /* Other Linux specific common definitions */
  683. #define DP_NAME(cdev) ((cdev)->name)
  684. #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
  685. (cdev->regview) + \
  686. (offset))
  687. #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
  688. #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
  689. #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
  690. #define DOORBELL(cdev, db_addr, val) \
  691. writel((u32)val, (void __iomem *)((u8 __iomem *)\
  692. (cdev->doorbells) + (db_addr)))
  693. /* Prototypes */
  694. int qed_fill_dev_info(struct qed_dev *cdev,
  695. struct qed_dev_info *dev_info);
  696. void qed_link_update(struct qed_hwfn *hwfn);
  697. u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
  698. u32 input_len, u8 *input_buf,
  699. u32 max_size, u8 *unzip_buf);
  700. void qed_get_protocol_stats(struct qed_dev *cdev,
  701. enum qed_mcp_protocol_type type,
  702. union qed_mcp_protocol_stats *stats);
  703. int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
  704. void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
  705. #endif /* _QED_H */