qed.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_H
  33. #define _QED_H
  34. #include <linux/types.h>
  35. #include <linux/io.h>
  36. #include <linux/delay.h>
  37. #include <linux/firmware.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/list.h>
  40. #include <linux/mutex.h>
  41. #include <linux/pci.h>
  42. #include <linux/slab.h>
  43. #include <linux/string.h>
  44. #include <linux/workqueue.h>
  45. #include <linux/zlib.h>
  46. #include <linux/hashtable.h>
  47. #include <linux/qed/qed_if.h>
  48. #include "qed_debug.h"
  49. #include "qed_hsi.h"
  50. extern const struct qed_common_ops qed_common_ops_pass;
  51. #define QED_MAJOR_VERSION 8
  52. #define QED_MINOR_VERSION 33
  53. #define QED_REVISION_VERSION 0
  54. #define QED_ENGINEERING_VERSION 20
  55. #define QED_VERSION \
  56. ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
  57. (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
  58. #define STORM_FW_VERSION \
  59. ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
  60. (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
  61. #define MAX_HWFNS_PER_DEVICE (4)
  62. #define NAME_SIZE 16
  63. #define VER_SIZE 16
  64. #define QED_WFQ_UNIT 100
  65. #define QED_WID_SIZE (1024)
  66. #define QED_MIN_WIDS (4)
  67. #define QED_PF_DEMS_SIZE (4)
  68. /* cau states */
  69. enum qed_coalescing_mode {
  70. QED_COAL_MODE_DISABLE,
  71. QED_COAL_MODE_ENABLE
  72. };
  73. enum qed_nvm_cmd {
  74. QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
  75. QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
  76. QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
  77. QED_GET_MCP_NVM_RESP = 0xFFFFFF00
  78. };
  79. struct qed_eth_cb_ops;
  80. struct qed_dev_info;
  81. union qed_mcp_protocol_stats;
  82. enum qed_mcp_protocol_type;
  83. enum qed_mfw_tlv_type;
  84. union qed_mfw_tlv_data;
  85. /* helpers */
  86. #define QED_MFW_GET_FIELD(name, field) \
  87. (((name) & (field ## _MASK)) >> (field ## _SHIFT))
  88. #define QED_MFW_SET_FIELD(name, field, value) \
  89. do { \
  90. (name) &= ~(field ## _MASK); \
  91. (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
  92. } while (0)
  93. static inline u32 qed_db_addr(u32 cid, u32 DEMS)
  94. {
  95. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  96. (cid * QED_PF_DEMS_SIZE);
  97. return db_addr;
  98. }
  99. static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
  100. {
  101. u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
  102. FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
  103. return db_addr;
  104. }
  105. #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
  106. ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
  107. ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
  108. #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
  109. #define D_TRINE(val, cond1, cond2, true1, true2, def) \
  110. (val == (cond1) ? true1 : \
  111. (val == (cond2) ? true2 : def))
  112. /* forward */
  113. struct qed_ptt_pool;
  114. struct qed_spq;
  115. struct qed_sb_info;
  116. struct qed_sb_attn_info;
  117. struct qed_cxt_mngr;
  118. struct qed_sb_sp_info;
  119. struct qed_ll2_info;
  120. struct qed_mcp_info;
  121. struct qed_rt_data {
  122. u32 *init_val;
  123. bool *b_valid;
  124. };
  125. enum qed_tunn_mode {
  126. QED_MODE_L2GENEVE_TUNN,
  127. QED_MODE_IPGENEVE_TUNN,
  128. QED_MODE_L2GRE_TUNN,
  129. QED_MODE_IPGRE_TUNN,
  130. QED_MODE_VXLAN_TUNN,
  131. };
  132. enum qed_tunn_clss {
  133. QED_TUNN_CLSS_MAC_VLAN,
  134. QED_TUNN_CLSS_MAC_VNI,
  135. QED_TUNN_CLSS_INNER_MAC_VLAN,
  136. QED_TUNN_CLSS_INNER_MAC_VNI,
  137. QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
  138. MAX_QED_TUNN_CLSS,
  139. };
  140. struct qed_tunn_update_type {
  141. bool b_update_mode;
  142. bool b_mode_enabled;
  143. enum qed_tunn_clss tun_cls;
  144. };
  145. struct qed_tunn_update_udp_port {
  146. bool b_update_port;
  147. u16 port;
  148. };
  149. struct qed_tunnel_info {
  150. struct qed_tunn_update_type vxlan;
  151. struct qed_tunn_update_type l2_geneve;
  152. struct qed_tunn_update_type ip_geneve;
  153. struct qed_tunn_update_type l2_gre;
  154. struct qed_tunn_update_type ip_gre;
  155. struct qed_tunn_update_udp_port vxlan_port;
  156. struct qed_tunn_update_udp_port geneve_port;
  157. bool b_update_rx_cls;
  158. bool b_update_tx_cls;
  159. };
  160. struct qed_tunn_start_params {
  161. unsigned long tunn_mode;
  162. u16 vxlan_udp_port;
  163. u16 geneve_udp_port;
  164. u8 update_vxlan_udp_port;
  165. u8 update_geneve_udp_port;
  166. u8 tunn_clss_vxlan;
  167. u8 tunn_clss_l2geneve;
  168. u8 tunn_clss_ipgeneve;
  169. u8 tunn_clss_l2gre;
  170. u8 tunn_clss_ipgre;
  171. };
  172. struct qed_tunn_update_params {
  173. unsigned long tunn_mode_update_mask;
  174. unsigned long tunn_mode;
  175. u16 vxlan_udp_port;
  176. u16 geneve_udp_port;
  177. u8 update_rx_pf_clss;
  178. u8 update_tx_pf_clss;
  179. u8 update_vxlan_udp_port;
  180. u8 update_geneve_udp_port;
  181. u8 tunn_clss_vxlan;
  182. u8 tunn_clss_l2geneve;
  183. u8 tunn_clss_ipgeneve;
  184. u8 tunn_clss_l2gre;
  185. u8 tunn_clss_ipgre;
  186. };
  187. /* The PCI personality is not quite synonymous to protocol ID:
  188. * 1. All personalities need CORE connections
  189. * 2. The Ethernet personality may support also the RoCE/iWARP protocol
  190. */
  191. enum qed_pci_personality {
  192. QED_PCI_ETH,
  193. QED_PCI_FCOE,
  194. QED_PCI_ISCSI,
  195. QED_PCI_ETH_ROCE,
  196. QED_PCI_ETH_IWARP,
  197. QED_PCI_ETH_RDMA,
  198. QED_PCI_DEFAULT, /* default in shmem */
  199. };
  200. /* All VFs are symmetric, all counters are PF + all VFs */
  201. struct qed_qm_iids {
  202. u32 cids;
  203. u32 vf_cids;
  204. u32 tids;
  205. };
  206. /* HW / FW resources, output of features supported below, most information
  207. * is received from MFW.
  208. */
  209. enum qed_resources {
  210. QED_SB,
  211. QED_L2_QUEUE,
  212. QED_VPORT,
  213. QED_RSS_ENG,
  214. QED_PQ,
  215. QED_RL,
  216. QED_MAC,
  217. QED_VLAN,
  218. QED_RDMA_CNQ_RAM,
  219. QED_ILT,
  220. QED_LL2_QUEUE,
  221. QED_CMDQS_CQS,
  222. QED_RDMA_STATS_QUEUE,
  223. QED_BDQ,
  224. QED_MAX_RESC,
  225. };
  226. enum QED_FEATURE {
  227. QED_PF_L2_QUE,
  228. QED_VF,
  229. QED_RDMA_CNQ,
  230. QED_ISCSI_CQ,
  231. QED_FCOE_CQ,
  232. QED_VF_L2_QUE,
  233. QED_MAX_FEATURES,
  234. };
  235. enum QED_PORT_MODE {
  236. QED_PORT_MODE_DE_2X40G,
  237. QED_PORT_MODE_DE_2X50G,
  238. QED_PORT_MODE_DE_1X100G,
  239. QED_PORT_MODE_DE_4X10G_F,
  240. QED_PORT_MODE_DE_4X10G_E,
  241. QED_PORT_MODE_DE_4X20G,
  242. QED_PORT_MODE_DE_1X40G,
  243. QED_PORT_MODE_DE_2X25G,
  244. QED_PORT_MODE_DE_1X25G,
  245. QED_PORT_MODE_DE_4X25G,
  246. QED_PORT_MODE_DE_2X10G,
  247. };
  248. enum qed_dev_cap {
  249. QED_DEV_CAP_ETH,
  250. QED_DEV_CAP_FCOE,
  251. QED_DEV_CAP_ISCSI,
  252. QED_DEV_CAP_ROCE,
  253. QED_DEV_CAP_IWARP,
  254. };
  255. enum qed_wol_support {
  256. QED_WOL_SUPPORT_NONE,
  257. QED_WOL_SUPPORT_PME,
  258. };
  259. struct qed_hw_info {
  260. /* PCI personality */
  261. enum qed_pci_personality personality;
  262. #define QED_IS_RDMA_PERSONALITY(dev) \
  263. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  264. (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  265. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  266. #define QED_IS_ROCE_PERSONALITY(dev) \
  267. ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
  268. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  269. #define QED_IS_IWARP_PERSONALITY(dev) \
  270. ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
  271. (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
  272. #define QED_IS_L2_PERSONALITY(dev) \
  273. ((dev)->hw_info.personality == QED_PCI_ETH || \
  274. QED_IS_RDMA_PERSONALITY(dev))
  275. #define QED_IS_FCOE_PERSONALITY(dev) \
  276. ((dev)->hw_info.personality == QED_PCI_FCOE)
  277. #define QED_IS_ISCSI_PERSONALITY(dev) \
  278. ((dev)->hw_info.personality == QED_PCI_ISCSI)
  279. /* Resource Allocation scheme results */
  280. u32 resc_start[QED_MAX_RESC];
  281. u32 resc_num[QED_MAX_RESC];
  282. u32 feat_num[QED_MAX_FEATURES];
  283. #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
  284. #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
  285. #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
  286. RESC_NUM(_p_hwfn, resc))
  287. #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
  288. /* Amount of traffic classes HW supports */
  289. u8 num_hw_tc;
  290. /* Amount of TCs which should be active according to DCBx or upper
  291. * layer driver configuration.
  292. */
  293. u8 num_active_tc;
  294. u8 offload_tc;
  295. u32 concrete_fid;
  296. u16 opaque_fid;
  297. u16 ovlan;
  298. u32 part_num[4];
  299. unsigned char hw_mac_addr[ETH_ALEN];
  300. u64 node_wwn;
  301. u64 port_wwn;
  302. u16 num_fcoe_conns;
  303. struct qed_igu_info *p_igu_info;
  304. u32 port_mode;
  305. u32 hw_mode;
  306. unsigned long device_capabilities;
  307. u16 mtu;
  308. enum qed_wol_support b_wol_support;
  309. };
  310. /* maximun size of read/write commands (HW limit) */
  311. #define DMAE_MAX_RW_SIZE 0x2000
  312. struct qed_dmae_info {
  313. /* Mutex for synchronizing access to functions */
  314. struct mutex mutex;
  315. u8 channel;
  316. dma_addr_t completion_word_phys_addr;
  317. /* The memory location where the DMAE writes the completion
  318. * value when an operation is finished on this context.
  319. */
  320. u32 *p_completion_word;
  321. dma_addr_t intermediate_buffer_phys_addr;
  322. /* An intermediate buffer for DMAE operations that use virtual
  323. * addresses - data is DMA'd to/from this buffer and then
  324. * memcpy'd to/from the virtual address
  325. */
  326. u32 *p_intermediate_buffer;
  327. dma_addr_t dmae_cmd_phys_addr;
  328. struct dmae_cmd *p_dmae_cmd;
  329. };
  330. struct qed_wfq_data {
  331. /* when feature is configured for at least 1 vport */
  332. u32 min_speed;
  333. bool configured;
  334. };
  335. struct qed_qm_info {
  336. struct init_qm_pq_params *qm_pq_params;
  337. struct init_qm_vport_params *qm_vport_params;
  338. struct init_qm_port_params *qm_port_params;
  339. u16 start_pq;
  340. u8 start_vport;
  341. u16 pure_lb_pq;
  342. u16 offload_pq;
  343. u16 low_latency_pq;
  344. u16 pure_ack_pq;
  345. u16 ooo_pq;
  346. u16 first_vf_pq;
  347. u16 first_mcos_pq;
  348. u16 first_rl_pq;
  349. u16 num_pqs;
  350. u16 num_vf_pqs;
  351. u8 num_vports;
  352. u8 max_phys_tcs_per_port;
  353. u8 ooo_tc;
  354. bool pf_rl_en;
  355. bool pf_wfq_en;
  356. bool vport_rl_en;
  357. bool vport_wfq_en;
  358. u8 pf_wfq;
  359. u32 pf_rl;
  360. struct qed_wfq_data *wfq_data;
  361. u8 num_pf_rls;
  362. };
  363. struct storm_stats {
  364. u32 address;
  365. u32 len;
  366. };
  367. struct qed_storm_stats {
  368. struct storm_stats mstats;
  369. struct storm_stats pstats;
  370. struct storm_stats tstats;
  371. struct storm_stats ustats;
  372. };
  373. struct qed_fw_data {
  374. struct fw_ver_info *fw_ver_info;
  375. const u8 *modes_tree_buf;
  376. union init_op *init_ops;
  377. const u32 *arr_data;
  378. u32 init_ops_size;
  379. };
  380. enum qed_mf_mode_bit {
  381. /* Supports PF-classification based on tag */
  382. QED_MF_OVLAN_CLSS,
  383. /* Supports PF-classification based on MAC */
  384. QED_MF_LLH_MAC_CLSS,
  385. /* Supports PF-classification based on protocol type */
  386. QED_MF_LLH_PROTO_CLSS,
  387. /* Requires a default PF to be set */
  388. QED_MF_NEED_DEF_PF,
  389. /* Allow LL2 to multicast/broadcast */
  390. QED_MF_LL2_NON_UNICAST,
  391. /* Allow Cross-PF [& child VFs] Tx-switching */
  392. QED_MF_INTER_PF_SWITCH,
  393. /* Unified Fabtic Port support enabled */
  394. QED_MF_UFP_SPECIFIC,
  395. /* Disable Accelerated Receive Flow Steering (aRFS) */
  396. QED_MF_DISABLE_ARFS,
  397. /* Use vlan for steering */
  398. QED_MF_8021Q_TAGGING,
  399. /* Use stag for steering */
  400. QED_MF_8021AD_TAGGING,
  401. /* Allow DSCP to TC mapping */
  402. QED_MF_DSCP_TO_TC_MAP,
  403. };
  404. enum qed_ufp_mode {
  405. QED_UFP_MODE_ETS,
  406. QED_UFP_MODE_VNIC_BW,
  407. QED_UFP_MODE_UNKNOWN
  408. };
  409. enum qed_ufp_pri_type {
  410. QED_UFP_PRI_OS,
  411. QED_UFP_PRI_VNIC,
  412. QED_UFP_PRI_UNKNOWN
  413. };
  414. struct qed_ufp_info {
  415. enum qed_ufp_pri_type pri_type;
  416. enum qed_ufp_mode mode;
  417. u8 tc;
  418. };
  419. enum BAR_ID {
  420. BAR_ID_0, /* used for GRC */
  421. BAR_ID_1 /* Used for doorbells */
  422. };
  423. struct qed_nvm_image_info {
  424. u32 num_images;
  425. struct bist_nvm_image_att *image_att;
  426. };
  427. #define DRV_MODULE_VERSION \
  428. __stringify(QED_MAJOR_VERSION) "." \
  429. __stringify(QED_MINOR_VERSION) "." \
  430. __stringify(QED_REVISION_VERSION) "." \
  431. __stringify(QED_ENGINEERING_VERSION)
  432. struct qed_simd_fp_handler {
  433. void *token;
  434. void (*func)(void *);
  435. };
  436. enum qed_slowpath_wq_flag {
  437. QED_SLOWPATH_MFW_TLV_REQ,
  438. };
  439. struct qed_hwfn {
  440. struct qed_dev *cdev;
  441. u8 my_id; /* ID inside the PF */
  442. #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
  443. u8 rel_pf_id; /* Relative to engine*/
  444. u8 abs_pf_id;
  445. #define QED_PATH_ID(_p_hwfn) \
  446. (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
  447. u8 port_id;
  448. bool b_active;
  449. u32 dp_module;
  450. u8 dp_level;
  451. char name[NAME_SIZE];
  452. bool first_on_engine;
  453. bool hw_init_done;
  454. u8 num_funcs_on_engine;
  455. u8 enabled_func_idx;
  456. /* BAR access */
  457. void __iomem *regview;
  458. void __iomem *doorbells;
  459. u64 db_phys_addr;
  460. unsigned long db_size;
  461. /* PTT pool */
  462. struct qed_ptt_pool *p_ptt_pool;
  463. /* HW info */
  464. struct qed_hw_info hw_info;
  465. /* rt_array (for init-tool) */
  466. struct qed_rt_data rt_data;
  467. /* SPQ */
  468. struct qed_spq *p_spq;
  469. /* EQ */
  470. struct qed_eq *p_eq;
  471. /* Consolidate Q*/
  472. struct qed_consq *p_consq;
  473. /* Slow-Path definitions */
  474. struct tasklet_struct *sp_dpc;
  475. bool b_sp_dpc_enabled;
  476. struct qed_ptt *p_main_ptt;
  477. struct qed_ptt *p_dpc_ptt;
  478. /* PTP will be used only by the leading function.
  479. * Usage of all PTP-apis should be synchronized as result.
  480. */
  481. struct qed_ptt *p_ptp_ptt;
  482. struct qed_sb_sp_info *p_sp_sb;
  483. struct qed_sb_attn_info *p_sb_attn;
  484. /* Protocol related */
  485. bool using_ll2;
  486. struct qed_ll2_info *p_ll2_info;
  487. struct qed_ooo_info *p_ooo_info;
  488. struct qed_rdma_info *p_rdma_info;
  489. struct qed_iscsi_info *p_iscsi_info;
  490. struct qed_fcoe_info *p_fcoe_info;
  491. struct qed_pf_params pf_params;
  492. bool b_rdma_enabled_in_prs;
  493. u32 rdma_prs_search_reg;
  494. struct qed_cxt_mngr *p_cxt_mngr;
  495. /* Flag indicating whether interrupts are enabled or not*/
  496. bool b_int_enabled;
  497. bool b_int_requested;
  498. /* True if the driver requests for the link */
  499. bool b_drv_link_init;
  500. struct qed_vf_iov *vf_iov_info;
  501. struct qed_pf_iov *pf_iov_info;
  502. struct qed_mcp_info *mcp_info;
  503. struct qed_dcbx_info *p_dcbx_info;
  504. struct qed_ufp_info ufp_info;
  505. struct qed_dmae_info dmae_info;
  506. /* QM init */
  507. struct qed_qm_info qm_info;
  508. struct qed_storm_stats storm_stats;
  509. /* Buffer for unzipping firmware data */
  510. void *unzip_buf;
  511. struct dbg_tools_data dbg_info;
  512. /* PWM region specific data */
  513. u16 wid_count;
  514. u32 dpi_size;
  515. u32 dpi_count;
  516. /* This is used to calculate the doorbell address */
  517. u32 dpi_start_offset;
  518. /* If one of the following is set then EDPM shouldn't be used */
  519. u8 dcbx_no_edpm;
  520. u8 db_bar_no_edpm;
  521. /* L2-related */
  522. struct qed_l2_info *p_l2_info;
  523. /* Nvm images number and attributes */
  524. struct qed_nvm_image_info nvm_info;
  525. struct qed_ptt *p_arfs_ptt;
  526. struct qed_simd_fp_handler simd_proto_handler[64];
  527. #ifdef CONFIG_QED_SRIOV
  528. struct workqueue_struct *iov_wq;
  529. struct delayed_work iov_task;
  530. unsigned long iov_task_flags;
  531. #endif
  532. struct z_stream_s *stream;
  533. struct workqueue_struct *slowpath_wq;
  534. struct delayed_work slowpath_task;
  535. unsigned long slowpath_task_flags;
  536. };
  537. struct pci_params {
  538. int pm_cap;
  539. unsigned long mem_start;
  540. unsigned long mem_end;
  541. unsigned int irq;
  542. u8 pf_num;
  543. };
  544. struct qed_int_param {
  545. u32 int_mode;
  546. u8 num_vectors;
  547. u8 min_msix_cnt; /* for minimal functionality */
  548. };
  549. struct qed_int_params {
  550. struct qed_int_param in;
  551. struct qed_int_param out;
  552. struct msix_entry *msix_table;
  553. bool fp_initialized;
  554. u8 fp_msix_base;
  555. u8 fp_msix_cnt;
  556. u8 rdma_msix_base;
  557. u8 rdma_msix_cnt;
  558. };
  559. struct qed_dbg_feature {
  560. struct dentry *dentry;
  561. u8 *dump_buf;
  562. u32 buf_size;
  563. u32 dumped_dwords;
  564. };
  565. struct qed_dbg_params {
  566. struct qed_dbg_feature features[DBG_FEATURE_NUM];
  567. u8 engine_for_debug;
  568. bool print_data;
  569. };
  570. struct qed_dev {
  571. u32 dp_module;
  572. u8 dp_level;
  573. char name[NAME_SIZE];
  574. enum qed_dev_type type;
  575. /* Translate type/revision combo into the proper conditions */
  576. #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
  577. #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
  578. CHIP_REV_IS_B0(dev))
  579. #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
  580. #define QED_IS_K2(dev) QED_IS_AH(dev)
  581. u16 vendor_id;
  582. u16 device_id;
  583. #define QED_DEV_ID_MASK 0xff00
  584. #define QED_DEV_ID_MASK_BB 0x1600
  585. #define QED_DEV_ID_MASK_AH 0x8000
  586. u16 chip_num;
  587. #define CHIP_NUM_MASK 0xffff
  588. #define CHIP_NUM_SHIFT 16
  589. u16 chip_rev;
  590. #define CHIP_REV_MASK 0xf
  591. #define CHIP_REV_SHIFT 12
  592. #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
  593. u16 chip_metal;
  594. #define CHIP_METAL_MASK 0xff
  595. #define CHIP_METAL_SHIFT 4
  596. u16 chip_bond_id;
  597. #define CHIP_BOND_ID_MASK 0xf
  598. #define CHIP_BOND_ID_SHIFT 0
  599. u8 num_engines;
  600. u8 num_ports_in_engine;
  601. u8 num_funcs_in_port;
  602. u8 path_id;
  603. unsigned long mf_bits;
  604. int pcie_width;
  605. int pcie_speed;
  606. /* Add MF related configuration */
  607. u8 mcp_rev;
  608. u8 boot_mode;
  609. /* WoL related configurations */
  610. u8 wol_config;
  611. u8 wol_mac[ETH_ALEN];
  612. u32 int_mode;
  613. enum qed_coalescing_mode int_coalescing_mode;
  614. u16 rx_coalesce_usecs;
  615. u16 tx_coalesce_usecs;
  616. /* Start Bar offset of first hwfn */
  617. void __iomem *regview;
  618. void __iomem *doorbells;
  619. u64 db_phys_addr;
  620. unsigned long db_size;
  621. /* PCI */
  622. u8 cache_shift;
  623. /* Init */
  624. const struct iro *iro_arr;
  625. #define IRO (p_hwfn->cdev->iro_arr)
  626. /* HW functions */
  627. u8 num_hwfns;
  628. struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
  629. /* SRIOV */
  630. struct qed_hw_sriov_info *p_iov_info;
  631. #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
  632. struct qed_tunnel_info tunnel;
  633. bool b_is_vf;
  634. u32 drv_type;
  635. struct qed_eth_stats *reset_stats;
  636. struct qed_fw_data *fw_data;
  637. u32 mcp_nvm_resp;
  638. /* Linux specific here */
  639. struct qede_dev *edev;
  640. struct pci_dev *pdev;
  641. u32 flags;
  642. #define QED_FLAG_STORAGE_STARTED (BIT(0))
  643. int msg_enable;
  644. struct pci_params pci_params;
  645. struct qed_int_params int_params;
  646. u8 protocol;
  647. #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
  648. #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
  649. /* Callbacks to protocol driver */
  650. union {
  651. struct qed_common_cb_ops *common;
  652. struct qed_eth_cb_ops *eth;
  653. struct qed_fcoe_cb_ops *fcoe;
  654. struct qed_iscsi_cb_ops *iscsi;
  655. } protocol_ops;
  656. void *ops_cookie;
  657. struct qed_dbg_params dbg_params;
  658. #ifdef CONFIG_QED_LL2
  659. struct qed_cb_ll2_info *ll2;
  660. u8 ll2_mac_address[ETH_ALEN];
  661. #endif
  662. DECLARE_HASHTABLE(connections, 10);
  663. const struct firmware *firmware;
  664. u32 rdma_max_sge;
  665. u32 rdma_max_inline;
  666. u32 rdma_max_srq_sge;
  667. u16 tunn_feature_mask;
  668. };
  669. #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
  670. : MAX_NUM_VFS_K2)
  671. #define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
  672. : MAX_NUM_L2_QUEUES_K2)
  673. #define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
  674. : MAX_NUM_PORTS_K2)
  675. #define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
  676. : MAX_SB_PER_PATH_K2)
  677. #define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
  678. : MAX_NUM_PFS_K2)
  679. /**
  680. * @brief qed_concrete_to_sw_fid - get the sw function id from
  681. * the concrete value.
  682. *
  683. * @param concrete_fid
  684. *
  685. * @return inline u8
  686. */
  687. static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
  688. u32 concrete_fid)
  689. {
  690. u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
  691. u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
  692. u8 vf_valid = GET_FIELD(concrete_fid,
  693. PXP_CONCRETE_FID_VFVALID);
  694. u8 sw_fid;
  695. if (vf_valid)
  696. sw_fid = vfid + MAX_NUM_PFS;
  697. else
  698. sw_fid = pfid;
  699. return sw_fid;
  700. }
  701. #define PKT_LB_TC 9
  702. #define MAX_NUM_VOQS_E4 20
  703. int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
  704. void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
  705. struct qed_ptt *p_ptt,
  706. u32 min_pf_rate);
  707. void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  708. int qed_device_num_engines(struct qed_dev *cdev);
  709. int qed_device_get_port_id(struct qed_dev *cdev);
  710. void qed_set_fw_mac_addr(__le16 *fw_msb,
  711. __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
  712. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  713. /* Flags for indication of required queues */
  714. #define PQ_FLAGS_RLS (BIT(0))
  715. #define PQ_FLAGS_MCOS (BIT(1))
  716. #define PQ_FLAGS_LB (BIT(2))
  717. #define PQ_FLAGS_OOO (BIT(3))
  718. #define PQ_FLAGS_ACK (BIT(4))
  719. #define PQ_FLAGS_OFLD (BIT(5))
  720. #define PQ_FLAGS_VFS (BIT(6))
  721. #define PQ_FLAGS_LLT (BIT(7))
  722. /* physical queue index for cm context intialization */
  723. u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
  724. u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
  725. u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
  726. #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
  727. /* Other Linux specific common definitions */
  728. #define DP_NAME(cdev) ((cdev)->name)
  729. #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
  730. (cdev->regview) + \
  731. (offset))
  732. #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
  733. #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
  734. #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
  735. #define DOORBELL(cdev, db_addr, val) \
  736. writel((u32)val, (void __iomem *)((u8 __iomem *)\
  737. (cdev->doorbells) + (db_addr)))
  738. /* Prototypes */
  739. int qed_fill_dev_info(struct qed_dev *cdev,
  740. struct qed_dev_info *dev_info);
  741. void qed_link_update(struct qed_hwfn *hwfn);
  742. u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
  743. u32 input_len, u8 *input_buf,
  744. u32 max_size, u8 *unzip_buf);
  745. void qed_get_protocol_stats(struct qed_dev *cdev,
  746. enum qed_mcp_protocol_type type,
  747. union qed_mcp_protocol_stats *stats);
  748. int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
  749. void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
  750. int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
  751. int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
  752. enum qed_mfw_tlv_type type,
  753. union qed_mfw_tlv_data *tlv_data);
  754. #endif /* _QED_H */