qed.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_H
  33. #define _QED_H
  34. #include <linux/types.h>
  35. #include <linux/io.h>
  36. #include <linux/delay.h>
  37. #include <linux/firmware.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/list.h>
  40. #include <linux/mutex.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/string.h>
  44. #include <linux/workqueue.h>
  45. #include <linux/zlib.h>
  46. #include <linux/hashtable.h>
  47. #include <linux/qed/qed_if.h>
  48. #include "qed_debug.h"
  49. #include "qed_hsi.h"
  50. extern const struct qed_common_ops qed_common_ops_pass;
  51. #define QED_MAJOR_VERSION 8
  52. #define QED_MINOR_VERSION 10
  53. #define QED_REVISION_VERSION 11
  54. #define QED_ENGINEERING_VERSION 21
  55. #define QED_VERSION \
  56. ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
  57. (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
  58. #define STORM_FW_VERSION \
  59. ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
  60. (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
  61. #define MAX_HWFNS_PER_DEVICE (4)
  62. #define NAME_SIZE 16
  63. #define VER_SIZE 16
  64. #define QED_WFQ_UNIT 100
  65. #define QED_WID_SIZE (1024)
  66. #define QED_MIN_WIDS (4)
  67. #define QED_PF_DEMS_SIZE (4)
  68. /* cau states */
  69. enum qed_coalescing_mode {
  70. QED_COAL_MODE_DISABLE,
  71. QED_COAL_MODE_ENABLE
  72. };
  73. struct qed_eth_cb_ops;
  74. struct qed_dev_info;
  75. union qed_mcp_protocol_stats;
  76. enum qed_mcp_protocol_type;
  77. /* helpers */
  78. #define QED_MFW_GET_FIELD(name, field) \
  79. (((name) & (field ## _MASK)) >> (field ## _SHIFT))
  80. #define QED_MFW_SET_FIELD(name, field, value) \
  81. do { \
  82. (name) &= ~(field ## _MASK); \
  83. (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
  84. } while (0)
  85. static inline u32 qed_db_addr(u32 cid, u32 DEMS)
  86. {
  87. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  88. (cid * QED_PF_DEMS_SIZE);
  89. return db_addr;
  90. }
  91. static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
  92. {
  93. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  94. FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
  95. return db_addr;
  96. }
  97. #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
  98. ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
  99. ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
  100. #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
  101. #define D_TRINE(val, cond1, cond2, true1, true2, def) \
  102. (val == (cond1) ? true1 : \
  103. (val == (cond2) ? true2 : def))
  104. /* forward */
  105. struct qed_ptt_pool;
  106. struct qed_spq;
  107. struct qed_sb_info;
  108. struct qed_sb_attn_info;
  109. struct qed_cxt_mngr;
  110. struct qed_sb_sp_info;
  111. struct qed_ll2_info;
  112. struct qed_mcp_info;
  113. struct qed_rt_data {
  114. u32 *init_val;
  115. bool *b_valid;
  116. };
  117. enum qed_tunn_mode {
  118. QED_MODE_L2GENEVE_TUNN,
  119. QED_MODE_IPGENEVE_TUNN,
  120. QED_MODE_L2GRE_TUNN,
  121. QED_MODE_IPGRE_TUNN,
  122. QED_MODE_VXLAN_TUNN,
  123. };
  124. enum qed_tunn_clss {
  125. QED_TUNN_CLSS_MAC_VLAN,
  126. QED_TUNN_CLSS_MAC_VNI,
  127. QED_TUNN_CLSS_INNER_MAC_VLAN,
  128. QED_TUNN_CLSS_INNER_MAC_VNI,
  129. QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
  130. MAX_QED_TUNN_CLSS,
  131. };
  132. struct qed_tunn_update_type {
  133. bool b_update_mode;
  134. bool b_mode_enabled;
  135. enum qed_tunn_clss tun_cls;
  136. };
  137. struct qed_tunn_update_udp_port {
  138. bool b_update_port;
  139. u16 port;
  140. };
  141. struct qed_tunnel_info {
  142. struct qed_tunn_update_type vxlan;
  143. struct qed_tunn_update_type l2_geneve;
  144. struct qed_tunn_update_type ip_geneve;
  145. struct qed_tunn_update_type l2_gre;
  146. struct qed_tunn_update_type ip_gre;
  147. struct qed_tunn_update_udp_port vxlan_port;
  148. struct qed_tunn_update_udp_port geneve_port;
  149. bool b_update_rx_cls;
  150. bool b_update_tx_cls;
  151. };
  152. struct qed_tunn_start_params {
  153. unsigned long tunn_mode;
  154. u16 vxlan_udp_port;
  155. u16 geneve_udp_port;
  156. u8 update_vxlan_udp_port;
  157. u8 update_geneve_udp_port;
  158. u8 tunn_clss_vxlan;
  159. u8 tunn_clss_l2geneve;
  160. u8 tunn_clss_ipgeneve;
  161. u8 tunn_clss_l2gre;
  162. u8 tunn_clss_ipgre;
  163. };
  164. struct qed_tunn_update_params {
  165. unsigned long tunn_mode_update_mask;
  166. unsigned long tunn_mode;
  167. u16 vxlan_udp_port;
  168. u16 geneve_udp_port;
  169. u8 update_rx_pf_clss;
  170. u8 update_tx_pf_clss;
  171. u8 update_vxlan_udp_port;
  172. u8 update_geneve_udp_port;
  173. u8 tunn_clss_vxlan;
  174. u8 tunn_clss_l2geneve;
  175. u8 tunn_clss_ipgeneve;
  176. u8 tunn_clss_l2gre;
  177. u8 tunn_clss_ipgre;
  178. };
  179. /* The PCI personality is not quite synonymous to protocol ID:
  180. * 1. All personalities need CORE connections
  181. * 2. The Ethernet personality may support also the RoCE/iWARP protocol
  182. */
  183. enum qed_pci_personality {
  184. QED_PCI_ETH,
  185. QED_PCI_FCOE,
  186. QED_PCI_ISCSI,
  187. QED_PCI_ETH_ROCE,
  188. QED_PCI_ETH_IWARP,
  189. QED_PCI_ETH_RDMA,
  190. QED_PCI_DEFAULT, /* default in shmem */
  191. };
  192. /* All VFs are symmetric, all counters are PF + all VFs */
  193. struct qed_qm_iids {
  194. u32 cids;
  195. u32 vf_cids;
  196. u32 tids;
  197. };
  198. /* HW / FW resources, output of features supported below, most information
  199. * is received from MFW.
  200. */
  201. enum qed_resources {
  202. QED_SB,
  203. QED_L2_QUEUE,
  204. QED_VPORT,
  205. QED_RSS_ENG,
  206. QED_PQ,
  207. QED_RL,
  208. QED_MAC,
  209. QED_VLAN,
  210. QED_RDMA_CNQ_RAM,
  211. QED_ILT,
  212. QED_LL2_QUEUE,
  213. QED_CMDQS_CQS,
  214. QED_RDMA_STATS_QUEUE,
  215. QED_BDQ,
  216. QED_MAX_RESC,
  217. };
  218. enum QED_FEATURE {
  219. QED_PF_L2_QUE,
  220. QED_VF,
  221. QED_RDMA_CNQ,
  222. QED_ISCSI_CQ,
  223. QED_FCOE_CQ,
  224. QED_VF_L2_QUE,
  225. QED_MAX_FEATURES,
  226. };
  227. enum QED_PORT_MODE {
  228. QED_PORT_MODE_DE_2X40G,
  229. QED_PORT_MODE_DE_2X50G,
  230. QED_PORT_MODE_DE_1X100G,
  231. QED_PORT_MODE_DE_4X10G_F,
  232. QED_PORT_MODE_DE_4X10G_E,
  233. QED_PORT_MODE_DE_4X20G,
  234. QED_PORT_MODE_DE_1X40G,
  235. QED_PORT_MODE_DE_2X25G,
  236. QED_PORT_MODE_DE_1X25G,
  237. QED_PORT_MODE_DE_4X25G,
  238. QED_PORT_MODE_DE_2X10G,
  239. };
  240. enum qed_dev_cap {
  241. QED_DEV_CAP_ETH,
  242. QED_DEV_CAP_FCOE,
  243. QED_DEV_CAP_ISCSI,
  244. QED_DEV_CAP_ROCE,
  245. QED_DEV_CAP_IWARP,
  246. };
  247. enum qed_wol_support {
  248. QED_WOL_SUPPORT_NONE,
  249. QED_WOL_SUPPORT_PME,
  250. };
  251. struct qed_hw_info {
  252. /* PCI personality */
  253. enum qed_pci_personality personality;
  254. #define QED_IS_RDMA_PERSONALITY(dev) \
  255. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  256. (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  257. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  258. #define QED_IS_ROCE_PERSONALITY(dev) \
  259. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  260. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  261. #define QED_IS_IWARP_PERSONALITY(dev) \
  262. ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  263. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  264. #define QED_IS_L2_PERSONALITY(dev) \
  265. ((dev)->hw_info.personality == QED_PCI_ETH || \
  266. QED_IS_RDMA_PERSONALITY(dev))
  267. #define QED_IS_FCOE_PERSONALITY(dev) \
  268. ((dev)->hw_info.personality == QED_PCI_FCOE)
  269. #define QED_IS_ISCSI_PERSONALITY(dev) \
  270. ((dev)->hw_info.personality == QED_PCI_ISCSI)
  271. /* Resource Allocation scheme results */
  272. u32 resc_start[QED_MAX_RESC];
  273. u32 resc_num[QED_MAX_RESC];
  274. u32 feat_num[QED_MAX_FEATURES];
  275. #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
  276. #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
  277. #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
  278. RESC_NUM(_p_hwfn, resc))
  279. #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
  280. /* Amount of traffic classes HW supports */
  281. u8 num_hw_tc;
  282. /* Amount of TCs which should be active according to DCBx or upper
  283. * layer driver configuration.
  284. */
  285. u8 num_active_tc;
  286. u8 offload_tc;
  287. u32 concrete_fid;
  288. u16 opaque_fid;
  289. u16 ovlan;
  290. u32 part_num[4];
  291. unsigned char hw_mac_addr[ETH_ALEN];
  292. u64 node_wwn;
  293. u64 port_wwn;
  294. u16 num_fcoe_conns;
  295. struct qed_igu_info *p_igu_info;
  296. u32 port_mode;
  297. u32 hw_mode;
  298. unsigned long device_capabilities;
  299. u16 mtu;
  300. enum qed_wol_support b_wol_support;
  301. };
  302. /* maximun size of read/write commands (HW limit) */
  303. #define DMAE_MAX_RW_SIZE 0x2000
  304. struct qed_dmae_info {
  305. /* Mutex for synchronizing access to functions */
  306. struct mutex mutex;
  307. u8 channel;
  308. dma_addr_t completion_word_phys_addr;
  309. /* The memory location where the DMAE writes the completion
  310. * value when an operation is finished on this context.
  311. */
  312. u32 *p_completion_word;
  313. dma_addr_t intermediate_buffer_phys_addr;
  314. /* An intermediate buffer for DMAE operations that use virtual
  315. * addresses - data is DMA'd to/from this buffer and then
  316. * memcpy'd to/from the virtual address
  317. */
  318. u32 *p_intermediate_buffer;
  319. dma_addr_t dmae_cmd_phys_addr;
  320. struct dmae_cmd *p_dmae_cmd;
  321. };
  322. struct qed_wfq_data {
  323. /* when feature is configured for at least 1 vport */
  324. u32 min_speed;
  325. bool configured;
  326. };
  327. struct qed_qm_info {
  328. struct init_qm_pq_params *qm_pq_params;
  329. struct init_qm_vport_params *qm_vport_params;
  330. struct init_qm_port_params *qm_port_params;
  331. u16 start_pq;
  332. u8 start_vport;
  333. u16 pure_lb_pq;
  334. u16 offload_pq;
  335. u16 low_latency_pq;
  336. u16 pure_ack_pq;
  337. u16 ooo_pq;
  338. u16 first_vf_pq;
  339. u16 first_mcos_pq;
  340. u16 first_rl_pq;
  341. u16 num_pqs;
  342. u16 num_vf_pqs;
  343. u8 num_vports;
  344. u8 max_phys_tcs_per_port;
  345. u8 ooo_tc;
  346. bool pf_rl_en;
  347. bool pf_wfq_en;
  348. bool vport_rl_en;
  349. bool vport_wfq_en;
  350. u8 pf_wfq;
  351. u32 pf_rl;
  352. struct qed_wfq_data *wfq_data;
  353. u8 num_pf_rls;
  354. };
  355. struct storm_stats {
  356. u32 address;
  357. u32 len;
  358. };
  359. struct qed_storm_stats {
  360. struct storm_stats mstats;
  361. struct storm_stats pstats;
  362. struct storm_stats tstats;
  363. struct storm_stats ustats;
  364. };
  365. struct qed_fw_data {
  366. struct fw_ver_info *fw_ver_info;
  367. const u8 *modes_tree_buf;
  368. union init_op *init_ops;
  369. const u32 *arr_data;
  370. u32 init_ops_size;
  371. };
  372. enum BAR_ID {
  373. BAR_ID_0, /* used for GRC */
  374. BAR_ID_1 /* Used for doorbells */
  375. };
  376. #define DRV_MODULE_VERSION \
  377. __stringify(QED_MAJOR_VERSION) "." \
  378. __stringify(QED_MINOR_VERSION) "." \
  379. __stringify(QED_REVISION_VERSION) "." \
  380. __stringify(QED_ENGINEERING_VERSION)
  381. struct qed_simd_fp_handler {
  382. void *token;
  383. void (*func)(void *);
  384. };
  385. struct qed_hwfn {
  386. struct qed_dev *cdev;
  387. u8 my_id; /* ID inside the PF */
  388. #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
  389. u8 rel_pf_id; /* Relative to engine*/
  390. u8 abs_pf_id;
  391. #define QED_PATH_ID(_p_hwfn) \
  392. (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
  393. u8 port_id;
  394. bool b_active;
  395. u32 dp_module;
  396. u8 dp_level;
  397. char name[NAME_SIZE];
  398. bool first_on_engine;
  399. bool hw_init_done;
  400. u8 num_funcs_on_engine;
  401. u8 enabled_func_idx;
  402. /* BAR access */
  403. void __iomem *regview;
  404. void __iomem *doorbells;
  405. u64 db_phys_addr;
  406. unsigned long db_size;
  407. /* PTT pool */
  408. struct qed_ptt_pool *p_ptt_pool;
  409. /* HW info */
  410. struct qed_hw_info hw_info;
  411. /* rt_array (for init-tool) */
  412. struct qed_rt_data rt_data;
  413. /* SPQ */
  414. struct qed_spq *p_spq;
  415. /* EQ */
  416. struct qed_eq *p_eq;
  417. /* Consolidate Q*/
  418. struct qed_consq *p_consq;
  419. /* Slow-Path definitions */
  420. struct tasklet_struct *sp_dpc;
  421. bool b_sp_dpc_enabled;
  422. struct qed_ptt *p_main_ptt;
  423. struct qed_ptt *p_dpc_ptt;
  424. /* PTP will be used only by the leading function.
  425. * Usage of all PTP-apis should be synchronized as result.
  426. */
  427. struct qed_ptt *p_ptp_ptt;
  428. struct qed_sb_sp_info *p_sp_sb;
  429. struct qed_sb_attn_info *p_sb_attn;
  430. /* Protocol related */
  431. bool using_ll2;
  432. struct qed_ll2_info *p_ll2_info;
  433. struct qed_ooo_info *p_ooo_info;
  434. struct qed_rdma_info *p_rdma_info;
  435. struct qed_iscsi_info *p_iscsi_info;
  436. struct qed_fcoe_info *p_fcoe_info;
  437. struct qed_pf_params pf_params;
  438. bool b_rdma_enabled_in_prs;
  439. u32 rdma_prs_search_reg;
  440. struct qed_cxt_mngr *p_cxt_mngr;
  441. /* Flag indicating whether interrupts are enabled or not*/
  442. bool b_int_enabled;
  443. bool b_int_requested;
  444. /* True if the driver requests for the link */
  445. bool b_drv_link_init;
  446. struct qed_vf_iov *vf_iov_info;
  447. struct qed_pf_iov *pf_iov_info;
  448. struct qed_mcp_info *mcp_info;
  449. struct qed_dcbx_info *p_dcbx_info;
  450. struct qed_dmae_info dmae_info;
  451. /* QM init */
  452. struct qed_qm_info qm_info;
  453. struct qed_storm_stats storm_stats;
  454. /* Buffer for unzipping firmware data */
  455. void *unzip_buf;
  456. struct dbg_tools_data dbg_info;
  457. /* PWM region specific data */
  458. u16 wid_count;
  459. u32 dpi_size;
  460. u32 dpi_count;
  461. /* This is used to calculate the doorbell address */
  462. u32 dpi_start_offset;
  463. /* If one of the following is set then EDPM shouldn't be used */
  464. u8 dcbx_no_edpm;
  465. u8 db_bar_no_edpm;
  466. /* L2-related */
  467. struct qed_l2_info *p_l2_info;
  468. struct qed_ptt *p_arfs_ptt;
  469. struct qed_simd_fp_handler simd_proto_handler[64];
  470. #ifdef CONFIG_QED_SRIOV
  471. struct workqueue_struct *iov_wq;
  472. struct delayed_work iov_task;
  473. unsigned long iov_task_flags;
  474. #endif
  475. struct z_stream_s *stream;
  476. };
  477. struct pci_params {
  478. int pm_cap;
  479. unsigned long mem_start;
  480. unsigned long mem_end;
  481. unsigned int irq;
  482. u8 pf_num;
  483. };
  484. struct qed_int_param {
  485. u32 int_mode;
  486. u8 num_vectors;
  487. u8 min_msix_cnt; /* for minimal functionality */
  488. };
  489. struct qed_int_params {
  490. struct qed_int_param in;
  491. struct qed_int_param out;
  492. struct msix_entry *msix_table;
  493. bool fp_initialized;
  494. u8 fp_msix_base;
  495. u8 fp_msix_cnt;
  496. u8 rdma_msix_base;
  497. u8 rdma_msix_cnt;
  498. };
  499. struct qed_dbg_feature {
  500. struct dentry *dentry;
  501. u8 *dump_buf;
  502. u32 buf_size;
  503. u32 dumped_dwords;
  504. };
  505. struct qed_dbg_params {
  506. struct qed_dbg_feature features[DBG_FEATURE_NUM];
  507. u8 engine_for_debug;
  508. bool print_data;
  509. };
  510. struct qed_dev {
  511. u32 dp_module;
  512. u8 dp_level;
  513. char name[NAME_SIZE];
  514. enum qed_dev_type type;
  515. /* Translate type/revision combo into the proper conditions */
  516. #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
  517. #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
  518. CHIP_REV_IS_B0(dev))
  519. #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
  520. #define QED_IS_K2(dev) QED_IS_AH(dev)
  521. u16 vendor_id;
  522. u16 device_id;
  523. #define QED_DEV_ID_MASK 0xff00
  524. #define QED_DEV_ID_MASK_BB 0x1600
  525. #define QED_DEV_ID_MASK_AH 0x8000
  526. u16 chip_num;
  527. #define CHIP_NUM_MASK 0xffff
  528. #define CHIP_NUM_SHIFT 16
  529. u16 chip_rev;
  530. #define CHIP_REV_MASK 0xf
  531. #define CHIP_REV_SHIFT 12
  532. #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
  533. u16 chip_metal;
  534. #define CHIP_METAL_MASK 0xff
  535. #define CHIP_METAL_SHIFT 4
  536. u16 chip_bond_id;
  537. #define CHIP_BOND_ID_MASK 0xf
  538. #define CHIP_BOND_ID_SHIFT 0
  539. u8 num_engines;
  540. u8 num_ports_in_engine;
  541. u8 num_funcs_in_port;
  542. u8 path_id;
  543. enum qed_mf_mode mf_mode;
  544. #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT)
  545. #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR)
  546. #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN)
  547. int pcie_width;
  548. int pcie_speed;
  549. /* Add MF related configuration */
  550. u8 mcp_rev;
  551. u8 boot_mode;
  552. /* WoL related configurations */
  553. u8 wol_config;
  554. u8 wol_mac[ETH_ALEN];
  555. u32 int_mode;
  556. enum qed_coalescing_mode int_coalescing_mode;
  557. u16 rx_coalesce_usecs;
  558. u16 tx_coalesce_usecs;
  559. /* Start Bar offset of first hwfn */
  560. void __iomem *regview;
  561. void __iomem *doorbells;
  562. u64 db_phys_addr;
  563. unsigned long db_size;
  564. /* PCI */
  565. u8 cache_shift;
  566. /* Init */
  567. const struct iro *iro_arr;
  568. #define IRO (p_hwfn->cdev->iro_arr)
  569. /* HW functions */
  570. u8 num_hwfns;
  571. struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
  572. /* SRIOV */
  573. struct qed_hw_sriov_info *p_iov_info;
  574. #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
  575. struct qed_tunnel_info tunnel;
  576. bool b_is_vf;
  577. u32 drv_type;
  578. struct qed_eth_stats *reset_stats;
  579. struct qed_fw_data *fw_data;
  580. u32 mcp_nvm_resp;
  581. /* Linux specific here */
  582. struct qede_dev *edev;
  583. struct pci_dev *pdev;
  584. u32 flags;
  585. #define QED_FLAG_STORAGE_STARTED (BIT(0))
  586. int msg_enable;
  587. struct pci_params pci_params;
  588. struct qed_int_params int_params;
  589. u8 protocol;
  590. #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
  591. #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
  592. /* Callbacks to protocol driver */
  593. union {
  594. struct qed_common_cb_ops *common;
  595. struct qed_eth_cb_ops *eth;
  596. struct qed_fcoe_cb_ops *fcoe;
  597. struct qed_iscsi_cb_ops *iscsi;
  598. } protocol_ops;
  599. void *ops_cookie;
  600. struct qed_dbg_params dbg_params;
  601. #ifdef CONFIG_QED_LL2
  602. struct qed_cb_ll2_info *ll2;
  603. u8 ll2_mac_address[ETH_ALEN];
  604. #endif
  605. DECLARE_HASHTABLE(connections, 10);
  606. const struct firmware *firmware;
  607. u32 rdma_max_sge;
  608. u32 rdma_max_inline;
  609. u32 rdma_max_srq_sge;
  610. u16 tunn_feature_mask;
  611. };
  612. #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
  613. : MAX_NUM_VFS_K2)
  614. #define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
  615. : MAX_NUM_L2_QUEUES_K2)
  616. #define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
  617. : MAX_NUM_PORTS_K2)
  618. #define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
  619. : MAX_SB_PER_PATH_K2)
  620. #define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
  621. : MAX_NUM_PFS_K2)
  622. /**
  623. * @brief qed_concrete_to_sw_fid - get the sw function id from
  624. * the concrete value.
  625. *
  626. * @param concrete_fid
  627. *
  628. * @return inline u8
  629. */
  630. static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
  631. u32 concrete_fid)
  632. {
  633. u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
  634. u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
  635. u8 vf_valid = GET_FIELD(concrete_fid,
  636. PXP_CONCRETE_FID_VFVALID);
  637. u8 sw_fid;
  638. if (vf_valid)
  639. sw_fid = vfid + MAX_NUM_PFS;
  640. else
  641. sw_fid = pfid;
  642. return sw_fid;
  643. }
  644. #define PURE_LB_TC 8
  645. #define PKT_LB_TC 9
  646. int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
  647. void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
  648. struct qed_ptt *p_ptt,
  649. u32 min_pf_rate);
  650. void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  651. int qed_device_num_engines(struct qed_dev *cdev);
  652. int qed_device_get_port_id(struct qed_dev *cdev);
  653. void qed_set_fw_mac_addr(__le16 *fw_msb,
  654. __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
  655. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  656. /* Flags for indication of required queues */
  657. #define PQ_FLAGS_RLS (BIT(0))
  658. #define PQ_FLAGS_MCOS (BIT(1))
  659. #define PQ_FLAGS_LB (BIT(2))
  660. #define PQ_FLAGS_OOO (BIT(3))
  661. #define PQ_FLAGS_ACK (BIT(4))
  662. #define PQ_FLAGS_OFLD (BIT(5))
  663. #define PQ_FLAGS_VFS (BIT(6))
  664. #define PQ_FLAGS_LLT (BIT(7))
  665. /* physical queue index for cm context intialization */
  666. u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
  667. u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
  668. u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
  669. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  670. /* Other Linux specific common definitions */
  671. #define DP_NAME(cdev) ((cdev)->name)
  672. #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
  673. (cdev->regview) + \
  674. (offset))
  675. #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
  676. #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
  677. #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
  678. #define DOORBELL(cdev, db_addr, val) \
  679. writel((u32)val, (void __iomem *)((u8 __iomem *)\
  680. (cdev->doorbells) + (db_addr)))
  681. /* Prototypes */
  682. int qed_fill_dev_info(struct qed_dev *cdev,
  683. struct qed_dev_info *dev_info);
  684. void qed_link_update(struct qed_hwfn *hwfn);
  685. u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
  686. u32 input_len, u8 *input_buf,
  687. u32 max_size, u8 *unzip_buf);
  688. void qed_get_protocol_stats(struct qed_dev *cdev,
  689. enum qed_mcp_protocol_type type,
  690. union qed_mcp_protocol_stats *stats);
  691. int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
  692. void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
  693. #endif /* _QED_H */