driver.h 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327
  1. /*
  2. * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef MLX5_DRIVER_H
  33. #define MLX5_DRIVER_H
  34. #include <linux/kernel.h>
  35. #include <linux/completion.h>
  36. #include <linux/pci.h>
  37. #include <linux/irq.h>
  38. #include <linux/spinlock_types.h>
  39. #include <linux/semaphore.h>
  40. #include <linux/slab.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/radix-tree.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/mempool.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/idr.h>
  47. #include <linux/mlx5/device.h>
  48. #include <linux/mlx5/doorbell.h>
  49. #include <linux/mlx5/srq.h>
  50. #include <linux/timecounter.h>
  51. #include <linux/ptp_clock_kernel.h>
  52. enum {
  53. MLX5_BOARD_ID_LEN = 64,
  54. MLX5_MAX_NAME_LEN = 16,
  55. };
  56. enum {
  57. /* one minute for the sake of bringup. Generally, commands must always
  58. * complete and we may need to increase this timeout value
  59. */
  60. MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
  61. MLX5_CMD_WQ_MAX_NAME = 32,
  62. };
  63. enum {
  64. CMD_OWNER_SW = 0x0,
  65. CMD_OWNER_HW = 0x1,
  66. CMD_STATUS_SUCCESS = 0,
  67. };
  68. enum mlx5_sqp_t {
  69. MLX5_SQP_SMI = 0,
  70. MLX5_SQP_GSI = 1,
  71. MLX5_SQP_IEEE_1588 = 2,
  72. MLX5_SQP_SNIFFER = 3,
  73. MLX5_SQP_SYNC_UMR = 4,
  74. };
  75. enum {
  76. MLX5_MAX_PORTS = 2,
  77. };
  78. enum {
  79. MLX5_EQ_VEC_PAGES = 0,
  80. MLX5_EQ_VEC_CMD = 1,
  81. MLX5_EQ_VEC_ASYNC = 2,
  82. MLX5_EQ_VEC_PFAULT = 3,
  83. MLX5_EQ_VEC_COMP_BASE,
  84. };
  85. enum {
  86. MLX5_MAX_IRQ_NAME = 32
  87. };
  88. enum {
  89. MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
  90. MLX5_ATOMIC_MODE_CX = 2 << 16,
  91. MLX5_ATOMIC_MODE_8B = 3 << 16,
  92. MLX5_ATOMIC_MODE_16B = 4 << 16,
  93. MLX5_ATOMIC_MODE_32B = 5 << 16,
  94. MLX5_ATOMIC_MODE_64B = 6 << 16,
  95. MLX5_ATOMIC_MODE_128B = 7 << 16,
  96. MLX5_ATOMIC_MODE_256B = 8 << 16,
  97. };
  98. enum {
  99. MLX5_REG_QPTS = 0x4002,
  100. MLX5_REG_QETCR = 0x4005,
  101. MLX5_REG_QTCT = 0x400a,
  102. MLX5_REG_QPDPM = 0x4013,
  103. MLX5_REG_QCAM = 0x4019,
  104. MLX5_REG_DCBX_PARAM = 0x4020,
  105. MLX5_REG_DCBX_APP = 0x4021,
  106. MLX5_REG_FPGA_CAP = 0x4022,
  107. MLX5_REG_FPGA_CTRL = 0x4023,
  108. MLX5_REG_FPGA_ACCESS_REG = 0x4024,
  109. MLX5_REG_PCAP = 0x5001,
  110. MLX5_REG_PMTU = 0x5003,
  111. MLX5_REG_PTYS = 0x5004,
  112. MLX5_REG_PAOS = 0x5006,
  113. MLX5_REG_PFCC = 0x5007,
  114. MLX5_REG_PPCNT = 0x5008,
  115. MLX5_REG_PPTB = 0x500b,
  116. MLX5_REG_PBMC = 0x500c,
  117. MLX5_REG_PMAOS = 0x5012,
  118. MLX5_REG_PUDE = 0x5009,
  119. MLX5_REG_PMPE = 0x5010,
  120. MLX5_REG_PELC = 0x500e,
  121. MLX5_REG_PVLC = 0x500f,
  122. MLX5_REG_PCMR = 0x5041,
  123. MLX5_REG_PMLP = 0x5002,
  124. MLX5_REG_PCAM = 0x507f,
  125. MLX5_REG_NODE_DESC = 0x6001,
  126. MLX5_REG_HOST_ENDIANNESS = 0x7004,
  127. MLX5_REG_MCIA = 0x9014,
  128. MLX5_REG_MLCR = 0x902b,
  129. MLX5_REG_MTRC_CAP = 0x9040,
  130. MLX5_REG_MTRC_CONF = 0x9041,
  131. MLX5_REG_MTRC_STDB = 0x9042,
  132. MLX5_REG_MTRC_CTRL = 0x9043,
  133. MLX5_REG_MPCNT = 0x9051,
  134. MLX5_REG_MTPPS = 0x9053,
  135. MLX5_REG_MTPPSE = 0x9054,
  136. MLX5_REG_MPEGC = 0x9056,
  137. MLX5_REG_MCQI = 0x9061,
  138. MLX5_REG_MCC = 0x9062,
  139. MLX5_REG_MCDA = 0x9063,
  140. MLX5_REG_MCAM = 0x907f,
  141. };
  142. enum mlx5_qpts_trust_state {
  143. MLX5_QPTS_TRUST_PCP = 1,
  144. MLX5_QPTS_TRUST_DSCP = 2,
  145. };
  146. enum mlx5_dcbx_oper_mode {
  147. MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
  148. MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
  149. };
  150. enum mlx5_dct_atomic_mode {
  151. MLX5_ATOMIC_MODE_DCT_OFF = 20,
  152. MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
  153. MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
  154. MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
  155. };
  156. enum {
  157. MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
  158. MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
  159. };
  160. enum mlx5_page_fault_resume_flags {
  161. MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
  162. MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
  163. MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
  164. MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
  165. };
  166. enum dbg_rsc_type {
  167. MLX5_DBG_RSC_QP,
  168. MLX5_DBG_RSC_EQ,
  169. MLX5_DBG_RSC_CQ,
  170. };
  171. enum port_state_policy {
  172. MLX5_POLICY_DOWN = 0,
  173. MLX5_POLICY_UP = 1,
  174. MLX5_POLICY_FOLLOW = 2,
  175. MLX5_POLICY_INVALID = 0xffffffff
  176. };
  177. struct mlx5_field_desc {
  178. struct dentry *dent;
  179. int i;
  180. };
  181. struct mlx5_rsc_debug {
  182. struct mlx5_core_dev *dev;
  183. void *object;
  184. enum dbg_rsc_type type;
  185. struct dentry *root;
  186. struct mlx5_field_desc fields[0];
  187. };
  188. enum mlx5_dev_event {
  189. MLX5_DEV_EVENT_SYS_ERROR,
  190. MLX5_DEV_EVENT_PORT_UP,
  191. MLX5_DEV_EVENT_PORT_DOWN,
  192. MLX5_DEV_EVENT_PORT_INITIALIZED,
  193. MLX5_DEV_EVENT_LID_CHANGE,
  194. MLX5_DEV_EVENT_PKEY_CHANGE,
  195. MLX5_DEV_EVENT_GUID_CHANGE,
  196. MLX5_DEV_EVENT_CLIENT_REREG,
  197. MLX5_DEV_EVENT_PPS,
  198. MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
  199. };
  200. enum mlx5_port_status {
  201. MLX5_PORT_UP = 1,
  202. MLX5_PORT_DOWN = 2,
  203. };
  204. enum mlx5_eq_type {
  205. MLX5_EQ_TYPE_COMP,
  206. MLX5_EQ_TYPE_ASYNC,
  207. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  208. MLX5_EQ_TYPE_PF,
  209. #endif
  210. };
  211. struct mlx5_bfreg_info {
  212. u32 *sys_pages;
  213. int num_low_latency_bfregs;
  214. unsigned int *count;
  215. /*
  216. * protect bfreg allocation data structs
  217. */
  218. struct mutex lock;
  219. u32 ver;
  220. bool lib_uar_4k;
  221. u32 num_sys_pages;
  222. u32 num_static_sys_pages;
  223. u32 total_num_bfregs;
  224. u32 num_dyn_bfregs;
  225. };
  226. struct mlx5_cmd_first {
  227. __be32 data[4];
  228. };
  229. struct mlx5_cmd_msg {
  230. struct list_head list;
  231. struct cmd_msg_cache *parent;
  232. u32 len;
  233. struct mlx5_cmd_first first;
  234. struct mlx5_cmd_mailbox *next;
  235. };
  236. struct mlx5_cmd_debug {
  237. struct dentry *dbg_root;
  238. struct dentry *dbg_in;
  239. struct dentry *dbg_out;
  240. struct dentry *dbg_outlen;
  241. struct dentry *dbg_status;
  242. struct dentry *dbg_run;
  243. void *in_msg;
  244. void *out_msg;
  245. u8 status;
  246. u16 inlen;
  247. u16 outlen;
  248. };
  249. struct cmd_msg_cache {
  250. /* protect block chain allocations
  251. */
  252. spinlock_t lock;
  253. struct list_head head;
  254. unsigned int max_inbox_size;
  255. unsigned int num_ent;
  256. };
  257. enum {
  258. MLX5_NUM_COMMAND_CACHES = 5,
  259. };
  260. struct mlx5_cmd_stats {
  261. u64 sum;
  262. u64 n;
  263. struct dentry *root;
  264. struct dentry *avg;
  265. struct dentry *count;
  266. /* protect command average calculations */
  267. spinlock_t lock;
  268. };
  269. struct mlx5_cmd {
  270. void *cmd_alloc_buf;
  271. dma_addr_t alloc_dma;
  272. int alloc_size;
  273. void *cmd_buf;
  274. dma_addr_t dma;
  275. u16 cmdif_rev;
  276. u8 log_sz;
  277. u8 log_stride;
  278. int max_reg_cmds;
  279. int events;
  280. u32 __iomem *vector;
  281. /* protect command queue allocations
  282. */
  283. spinlock_t alloc_lock;
  284. /* protect token allocations
  285. */
  286. spinlock_t token_lock;
  287. u8 token;
  288. unsigned long bitmask;
  289. char wq_name[MLX5_CMD_WQ_MAX_NAME];
  290. struct workqueue_struct *wq;
  291. struct semaphore sem;
  292. struct semaphore pages_sem;
  293. int mode;
  294. struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
  295. struct dma_pool *pool;
  296. struct mlx5_cmd_debug dbg;
  297. struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
  298. int checksum_disabled;
  299. struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
  300. };
  301. struct mlx5_port_caps {
  302. int gid_table_len;
  303. int pkey_table_len;
  304. u8 ext_port_cap;
  305. bool has_smi;
  306. };
  307. struct mlx5_cmd_mailbox {
  308. void *buf;
  309. dma_addr_t dma;
  310. struct mlx5_cmd_mailbox *next;
  311. };
  312. struct mlx5_buf_list {
  313. void *buf;
  314. dma_addr_t map;
  315. };
  316. struct mlx5_frag_buf {
  317. struct mlx5_buf_list *frags;
  318. int npages;
  319. int size;
  320. u8 page_shift;
  321. };
  322. struct mlx5_frag_buf_ctrl {
  323. struct mlx5_frag_buf frag_buf;
  324. u32 sz_m1;
  325. u16 frag_sz_m1;
  326. u16 strides_offset;
  327. u8 log_sz;
  328. u8 log_stride;
  329. u8 log_frag_strides;
  330. };
  331. struct mlx5_eq_tasklet {
  332. struct list_head list;
  333. struct list_head process_list;
  334. struct tasklet_struct task;
  335. /* lock on completion tasklet list */
  336. spinlock_t lock;
  337. };
  338. struct mlx5_eq_pagefault {
  339. struct work_struct work;
  340. /* Pagefaults lock */
  341. spinlock_t lock;
  342. struct workqueue_struct *wq;
  343. mempool_t *pool;
  344. };
  345. struct mlx5_cq_table {
  346. /* protect radix tree */
  347. spinlock_t lock;
  348. struct radix_tree_root tree;
  349. };
  350. struct mlx5_eq {
  351. struct mlx5_core_dev *dev;
  352. struct mlx5_cq_table cq_table;
  353. __be32 __iomem *doorbell;
  354. u32 cons_index;
  355. struct mlx5_frag_buf buf;
  356. int size;
  357. unsigned int irqn;
  358. u8 eqn;
  359. int nent;
  360. u64 mask;
  361. struct list_head list;
  362. int index;
  363. struct mlx5_rsc_debug *dbg;
  364. enum mlx5_eq_type type;
  365. union {
  366. struct mlx5_eq_tasklet tasklet_ctx;
  367. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  368. struct mlx5_eq_pagefault pf_ctx;
  369. #endif
  370. };
  371. };
  372. struct mlx5_core_psv {
  373. u32 psv_idx;
  374. struct psv_layout {
  375. u32 pd;
  376. u16 syndrome;
  377. u16 reserved;
  378. u16 bg;
  379. u16 app_tag;
  380. u32 ref_tag;
  381. } psv;
  382. };
  383. struct mlx5_core_sig_ctx {
  384. struct mlx5_core_psv psv_memory;
  385. struct mlx5_core_psv psv_wire;
  386. struct ib_sig_err err_item;
  387. bool sig_status_checked;
  388. bool sig_err_exists;
  389. u32 sigerr_count;
  390. };
  391. enum {
  392. MLX5_MKEY_MR = 1,
  393. MLX5_MKEY_MW,
  394. };
  395. struct mlx5_core_mkey {
  396. u64 iova;
  397. u64 size;
  398. u32 key;
  399. u32 pd;
  400. u32 type;
  401. };
  402. #define MLX5_24BIT_MASK ((1 << 24) - 1)
  403. enum mlx5_res_type {
  404. MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
  405. MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
  406. MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
  407. MLX5_RES_SRQ = 3,
  408. MLX5_RES_XSRQ = 4,
  409. MLX5_RES_XRQ = 5,
  410. MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
  411. };
  412. struct mlx5_core_rsc_common {
  413. enum mlx5_res_type res;
  414. atomic_t refcount;
  415. struct completion free;
  416. };
  417. struct mlx5_core_srq {
  418. struct mlx5_core_rsc_common common; /* must be first */
  419. u32 srqn;
  420. int max;
  421. size_t max_gs;
  422. size_t max_avail_gather;
  423. int wqe_shift;
  424. void (*event) (struct mlx5_core_srq *, enum mlx5_event);
  425. atomic_t refcount;
  426. struct completion free;
  427. };
  428. struct mlx5_eq_table {
  429. void __iomem *update_ci;
  430. void __iomem *update_arm_ci;
  431. struct list_head comp_eqs_list;
  432. struct mlx5_eq pages_eq;
  433. struct mlx5_eq async_eq;
  434. struct mlx5_eq cmd_eq;
  435. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  436. struct mlx5_eq pfault_eq;
  437. #endif
  438. int num_comp_vectors;
  439. /* protect EQs list
  440. */
  441. spinlock_t lock;
  442. };
  443. struct mlx5_uars_page {
  444. void __iomem *map;
  445. bool wc;
  446. u32 index;
  447. struct list_head list;
  448. unsigned int bfregs;
  449. unsigned long *reg_bitmap; /* for non fast path bf regs */
  450. unsigned long *fp_bitmap;
  451. unsigned int reg_avail;
  452. unsigned int fp_avail;
  453. struct kref ref_count;
  454. struct mlx5_core_dev *mdev;
  455. };
  456. struct mlx5_bfreg_head {
  457. /* protect blue flame registers allocations */
  458. struct mutex lock;
  459. struct list_head list;
  460. };
  461. struct mlx5_bfreg_data {
  462. struct mlx5_bfreg_head reg_head;
  463. struct mlx5_bfreg_head wc_head;
  464. };
  465. struct mlx5_sq_bfreg {
  466. void __iomem *map;
  467. struct mlx5_uars_page *up;
  468. bool wc;
  469. u32 index;
  470. unsigned int offset;
  471. };
  472. struct mlx5_core_health {
  473. struct health_buffer __iomem *health;
  474. __be32 __iomem *health_counter;
  475. struct timer_list timer;
  476. u32 prev;
  477. int miss_counter;
  478. bool sick;
  479. /* wq spinlock to synchronize draining */
  480. spinlock_t wq_lock;
  481. struct workqueue_struct *wq;
  482. unsigned long flags;
  483. struct work_struct work;
  484. struct delayed_work recover_work;
  485. };
  486. struct mlx5_qp_table {
  487. /* protect radix tree
  488. */
  489. spinlock_t lock;
  490. struct radix_tree_root tree;
  491. };
  492. struct mlx5_srq_table {
  493. /* protect radix tree
  494. */
  495. spinlock_t lock;
  496. struct radix_tree_root tree;
  497. };
  498. struct mlx5_mkey_table {
  499. /* protect radix tree
  500. */
  501. rwlock_t lock;
  502. struct radix_tree_root tree;
  503. };
  504. struct mlx5_vf_context {
  505. int enabled;
  506. u64 port_guid;
  507. u64 node_guid;
  508. enum port_state_policy policy;
  509. };
  510. struct mlx5_core_sriov {
  511. struct mlx5_vf_context *vfs_ctx;
  512. int num_vfs;
  513. int enabled_vfs;
  514. };
  515. struct mlx5_irq_info {
  516. cpumask_var_t mask;
  517. char name[MLX5_MAX_IRQ_NAME];
  518. };
  519. struct mlx5_fc_stats {
  520. struct rb_root counters;
  521. struct list_head addlist;
  522. /* protect addlist add/splice operations */
  523. spinlock_t addlist_lock;
  524. struct workqueue_struct *wq;
  525. struct delayed_work work;
  526. unsigned long next_query;
  527. unsigned long sampling_interval; /* jiffies */
  528. };
  529. struct mlx5_mpfs;
  530. struct mlx5_eswitch;
  531. struct mlx5_lag;
  532. struct mlx5_pagefault;
  533. struct mlx5_rate_limit {
  534. u32 rate;
  535. u32 max_burst_sz;
  536. u16 typical_pkt_sz;
  537. };
  538. struct mlx5_rl_entry {
  539. struct mlx5_rate_limit rl;
  540. u16 index;
  541. u16 refcount;
  542. };
  543. struct mlx5_rl_table {
  544. /* protect rate limit table */
  545. struct mutex rl_lock;
  546. u16 max_size;
  547. u32 max_rate;
  548. u32 min_rate;
  549. struct mlx5_rl_entry *rl_entry;
  550. };
  551. enum port_module_event_status_type {
  552. MLX5_MODULE_STATUS_PLUGGED = 0x1,
  553. MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
  554. MLX5_MODULE_STATUS_ERROR = 0x3,
  555. MLX5_MODULE_STATUS_NUM = 0x3,
  556. };
  557. enum port_module_event_error_type {
  558. MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
  559. MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
  560. MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
  561. MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
  562. MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
  563. MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
  564. MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
  565. MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
  566. MLX5_MODULE_EVENT_ERROR_UNKNOWN,
  567. MLX5_MODULE_EVENT_ERROR_NUM,
  568. };
  569. struct mlx5_port_module_event_stats {
  570. u64 status_counters[MLX5_MODULE_STATUS_NUM];
  571. u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
  572. };
  573. struct mlx5_priv {
  574. char name[MLX5_MAX_NAME_LEN];
  575. struct mlx5_eq_table eq_table;
  576. struct mlx5_irq_info *irq_info;
  577. /* pages stuff */
  578. struct workqueue_struct *pg_wq;
  579. struct rb_root page_root;
  580. int fw_pages;
  581. atomic_t reg_pages;
  582. struct list_head free_list;
  583. int vfs_pages;
  584. struct mlx5_core_health health;
  585. struct mlx5_srq_table srq_table;
  586. /* start: qp staff */
  587. struct mlx5_qp_table qp_table;
  588. struct dentry *qp_debugfs;
  589. struct dentry *eq_debugfs;
  590. struct dentry *cq_debugfs;
  591. struct dentry *cmdif_debugfs;
  592. /* end: qp staff */
  593. /* start: mkey staff */
  594. struct mlx5_mkey_table mkey_table;
  595. /* end: mkey staff */
  596. /* start: alloc staff */
  597. /* protect buffer alocation according to numa node */
  598. struct mutex alloc_mutex;
  599. int numa_node;
  600. struct mutex pgdir_mutex;
  601. struct list_head pgdir_list;
  602. /* end: alloc staff */
  603. struct dentry *dbg_root;
  604. /* protect mkey key part */
  605. spinlock_t mkey_lock;
  606. u8 mkey_key;
  607. struct list_head dev_list;
  608. struct list_head ctx_list;
  609. spinlock_t ctx_lock;
  610. struct list_head waiting_events_list;
  611. bool is_accum_events;
  612. struct mlx5_flow_steering *steering;
  613. struct mlx5_mpfs *mpfs;
  614. struct mlx5_eswitch *eswitch;
  615. struct mlx5_core_sriov sriov;
  616. struct mlx5_lag *lag;
  617. unsigned long pci_dev_data;
  618. struct mlx5_fc_stats fc_stats;
  619. struct mlx5_rl_table rl_table;
  620. struct mlx5_port_module_event_stats pme_stats;
  621. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  622. void (*pfault)(struct mlx5_core_dev *dev,
  623. void *context,
  624. struct mlx5_pagefault *pfault);
  625. void *pfault_ctx;
  626. struct srcu_struct pfault_srcu;
  627. #endif
  628. struct mlx5_bfreg_data bfregs;
  629. struct mlx5_uars_page *uar;
  630. };
  631. enum mlx5_device_state {
  632. MLX5_DEVICE_STATE_UP,
  633. MLX5_DEVICE_STATE_INTERNAL_ERROR,
  634. };
  635. enum mlx5_interface_state {
  636. MLX5_INTERFACE_STATE_UP = BIT(0),
  637. };
  638. enum mlx5_pci_status {
  639. MLX5_PCI_STATUS_DISABLED,
  640. MLX5_PCI_STATUS_ENABLED,
  641. };
  642. enum mlx5_pagefault_type_flags {
  643. MLX5_PFAULT_REQUESTOR = 1 << 0,
  644. MLX5_PFAULT_WRITE = 1 << 1,
  645. MLX5_PFAULT_RDMA = 1 << 2,
  646. };
  647. /* Contains the details of a pagefault. */
  648. struct mlx5_pagefault {
  649. u32 bytes_committed;
  650. u32 token;
  651. u8 event_subtype;
  652. u8 type;
  653. union {
  654. /* Initiator or send message responder pagefault details. */
  655. struct {
  656. /* Received packet size, only valid for responders. */
  657. u32 packet_size;
  658. /*
  659. * Number of resource holding WQE, depends on type.
  660. */
  661. u32 wq_num;
  662. /*
  663. * WQE index. Refers to either the send queue or
  664. * receive queue, according to event_subtype.
  665. */
  666. u16 wqe_index;
  667. } wqe;
  668. /* RDMA responder pagefault details */
  669. struct {
  670. u32 r_key;
  671. /*
  672. * Received packet size, minimal size page fault
  673. * resolution required for forward progress.
  674. */
  675. u32 packet_size;
  676. u32 rdma_op_len;
  677. u64 rdma_va;
  678. } rdma;
  679. };
  680. struct mlx5_eq *eq;
  681. struct work_struct work;
  682. };
  683. struct mlx5_td {
  684. struct list_head tirs_list;
  685. u32 tdn;
  686. };
  687. struct mlx5e_resources {
  688. u32 pdn;
  689. struct mlx5_td td;
  690. struct mlx5_core_mkey mkey;
  691. struct mlx5_sq_bfreg bfreg;
  692. };
  693. #define MLX5_MAX_RESERVED_GIDS 8
  694. struct mlx5_rsvd_gids {
  695. unsigned int start;
  696. unsigned int count;
  697. struct ida ida;
  698. };
  699. #define MAX_PIN_NUM 8
  700. struct mlx5_pps {
  701. u8 pin_caps[MAX_PIN_NUM];
  702. struct work_struct out_work;
  703. u64 start[MAX_PIN_NUM];
  704. u8 enabled;
  705. };
  706. struct mlx5_clock {
  707. rwlock_t lock;
  708. struct cyclecounter cycles;
  709. struct timecounter tc;
  710. struct hwtstamp_config hwtstamp_config;
  711. u32 nominal_c_mult;
  712. unsigned long overflow_period;
  713. struct delayed_work overflow_work;
  714. struct mlx5_core_dev *mdev;
  715. struct ptp_clock *ptp;
  716. struct ptp_clock_info ptp_info;
  717. struct mlx5_pps pps_info;
  718. };
  719. struct mlx5_fw_tracer;
  720. struct mlx5_vxlan;
  721. struct mlx5_core_dev {
  722. struct pci_dev *pdev;
  723. /* sync pci state */
  724. struct mutex pci_status_mutex;
  725. enum mlx5_pci_status pci_status;
  726. u8 rev_id;
  727. char board_id[MLX5_BOARD_ID_LEN];
  728. struct mlx5_cmd cmd;
  729. struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
  730. struct {
  731. u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
  732. u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
  733. u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
  734. u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
  735. u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
  736. u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
  737. } caps;
  738. phys_addr_t iseg_base;
  739. struct mlx5_init_seg __iomem *iseg;
  740. enum mlx5_device_state state;
  741. /* sync interface state */
  742. struct mutex intf_state_mutex;
  743. unsigned long intf_state;
  744. void (*event) (struct mlx5_core_dev *dev,
  745. enum mlx5_dev_event event,
  746. unsigned long param);
  747. struct mlx5_priv priv;
  748. struct mlx5_profile *profile;
  749. atomic_t num_qps;
  750. u32 issi;
  751. struct mlx5e_resources mlx5e_res;
  752. struct mlx5_vxlan *vxlan;
  753. struct {
  754. struct mlx5_rsvd_gids reserved_gids;
  755. u32 roce_en;
  756. } roce;
  757. #ifdef CONFIG_MLX5_FPGA
  758. struct mlx5_fpga_device *fpga;
  759. #endif
  760. #ifdef CONFIG_RFS_ACCEL
  761. struct cpu_rmap *rmap;
  762. #endif
  763. struct mlx5_clock clock;
  764. struct mlx5_ib_clock_info *clock_info;
  765. struct page *clock_info_page;
  766. struct mlx5_fw_tracer *tracer;
  767. };
  768. struct mlx5_db {
  769. __be32 *db;
  770. union {
  771. struct mlx5_db_pgdir *pgdir;
  772. struct mlx5_ib_user_db_page *user_page;
  773. } u;
  774. dma_addr_t dma;
  775. int index;
  776. };
  777. enum {
  778. MLX5_COMP_EQ_SIZE = 1024,
  779. };
  780. enum {
  781. MLX5_PTYS_IB = 1 << 0,
  782. MLX5_PTYS_EN = 1 << 2,
  783. };
  784. typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
  785. enum {
  786. MLX5_CMD_ENT_STATE_PENDING_COMP,
  787. };
  788. struct mlx5_cmd_work_ent {
  789. unsigned long state;
  790. struct mlx5_cmd_msg *in;
  791. struct mlx5_cmd_msg *out;
  792. void *uout;
  793. int uout_size;
  794. mlx5_cmd_cbk_t callback;
  795. struct delayed_work cb_timeout_work;
  796. void *context;
  797. int idx;
  798. struct completion done;
  799. struct mlx5_cmd *cmd;
  800. struct work_struct work;
  801. struct mlx5_cmd_layout *lay;
  802. int ret;
  803. int page_queue;
  804. u8 status;
  805. u8 token;
  806. u64 ts1;
  807. u64 ts2;
  808. u16 op;
  809. bool polling;
  810. };
  811. struct mlx5_pas {
  812. u64 pa;
  813. u8 log_sz;
  814. };
  815. enum phy_port_state {
  816. MLX5_AAA_111
  817. };
  818. struct mlx5_hca_vport_context {
  819. u32 field_select;
  820. bool sm_virt_aware;
  821. bool has_smi;
  822. bool has_raw;
  823. enum port_state_policy policy;
  824. enum phy_port_state phys_state;
  825. enum ib_port_state vport_state;
  826. u8 port_physical_state;
  827. u64 sys_image_guid;
  828. u64 port_guid;
  829. u64 node_guid;
  830. u32 cap_mask1;
  831. u32 cap_mask1_perm;
  832. u32 cap_mask2;
  833. u32 cap_mask2_perm;
  834. u16 lid;
  835. u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
  836. u8 lmc;
  837. u8 subnet_timeout;
  838. u16 sm_lid;
  839. u8 sm_sl;
  840. u16 qkey_violation_counter;
  841. u16 pkey_violation_counter;
  842. bool grh_required;
  843. };
  844. static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
  845. {
  846. return buf->frags->buf + offset;
  847. }
  848. #define STRUCT_FIELD(header, field) \
  849. .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
  850. .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
  851. static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
  852. {
  853. return pci_get_drvdata(pdev);
  854. }
  855. extern struct dentry *mlx5_debugfs_root;
  856. static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
  857. {
  858. return ioread32be(&dev->iseg->fw_rev) & 0xffff;
  859. }
  860. static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
  861. {
  862. return ioread32be(&dev->iseg->fw_rev) >> 16;
  863. }
  864. static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
  865. {
  866. return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
  867. }
  868. static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
  869. {
  870. return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
  871. }
  872. static inline u32 mlx5_base_mkey(const u32 key)
  873. {
  874. return key & 0xffffff00u;
  875. }
  876. static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
  877. u16 strides_offset,
  878. struct mlx5_frag_buf_ctrl *fbc)
  879. {
  880. fbc->log_stride = log_stride;
  881. fbc->log_sz = log_sz;
  882. fbc->sz_m1 = (1 << fbc->log_sz) - 1;
  883. fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
  884. fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
  885. fbc->strides_offset = strides_offset;
  886. }
  887. static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
  888. struct mlx5_frag_buf_ctrl *fbc)
  889. {
  890. mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
  891. }
  892. static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
  893. void *cqc)
  894. {
  895. mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
  896. MLX5_GET(cqc, cqc, log_cq_size),
  897. fbc);
  898. }
  899. static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
  900. u32 ix)
  901. {
  902. unsigned int frag;
  903. ix += fbc->strides_offset;
  904. frag = ix >> fbc->log_frag_strides;
  905. return fbc->frag_buf.frags[frag].buf +
  906. ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
  907. }
  908. static inline u32
  909. mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
  910. {
  911. u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
  912. return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
  913. }
  914. int mlx5_cmd_init(struct mlx5_core_dev *dev);
  915. void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
  916. void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
  917. void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
  918. int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
  919. int out_size);
  920. int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
  921. void *out, int out_size, mlx5_cmd_cbk_t callback,
  922. void *context);
  923. int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
  924. void *out, int out_size);
  925. void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
  926. int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
  927. int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
  928. int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
  929. void mlx5_health_cleanup(struct mlx5_core_dev *dev);
  930. int mlx5_health_init(struct mlx5_core_dev *dev);
  931. void mlx5_start_health_poll(struct mlx5_core_dev *dev);
  932. void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
  933. void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
  934. void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
  935. void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
  936. int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
  937. struct mlx5_frag_buf *buf, int node);
  938. int mlx5_buf_alloc(struct mlx5_core_dev *dev,
  939. int size, struct mlx5_frag_buf *buf);
  940. void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
  941. int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
  942. struct mlx5_frag_buf *buf, int node);
  943. void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
  944. struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
  945. gfp_t flags, int npages);
  946. void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
  947. struct mlx5_cmd_mailbox *head);
  948. int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  949. struct mlx5_srq_attr *in);
  950. int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
  951. int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  952. struct mlx5_srq_attr *out);
  953. int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
  954. u16 lwm, int is_srq);
  955. void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
  956. void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
  957. int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
  958. struct mlx5_core_mkey *mkey,
  959. u32 *in, int inlen,
  960. u32 *out, int outlen,
  961. mlx5_cmd_cbk_t callback, void *context);
  962. int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
  963. struct mlx5_core_mkey *mkey,
  964. u32 *in, int inlen);
  965. int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
  966. struct mlx5_core_mkey *mkey);
  967. int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
  968. u32 *out, int outlen);
  969. int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
  970. int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
  971. int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
  972. u16 opmod, u8 port);
  973. void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
  974. void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
  975. int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
  976. void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
  977. void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
  978. s32 npages);
  979. int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
  980. int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
  981. void mlx5_register_debugfs(void);
  982. void mlx5_unregister_debugfs(void);
  983. void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
  984. void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
  985. void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
  986. void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
  987. struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
  988. int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
  989. unsigned int *irqn);
  990. int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
  991. int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
  992. int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
  993. void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
  994. int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
  995. int size_in, void *data_out, int size_out,
  996. u16 reg_num, int arg, int write);
  997. int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
  998. int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
  999. int node);
  1000. void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
  1001. const char *mlx5_command_str(int command);
  1002. int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
  1003. void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
  1004. int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
  1005. int npsvs, u32 *sig_index);
  1006. int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
  1007. void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
  1008. int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
  1009. struct mlx5_odp_caps *odp_caps);
  1010. int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
  1011. u8 port_num, void *out, size_t sz);
  1012. #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
  1013. int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
  1014. u32 wq_num, u8 type, int error);
  1015. #endif
  1016. int mlx5_init_rl_table(struct mlx5_core_dev *dev);
  1017. void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
  1018. int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
  1019. struct mlx5_rate_limit *rl);
  1020. void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
  1021. bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
  1022. bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
  1023. struct mlx5_rate_limit *rl_1);
  1024. int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
  1025. bool map_wc, bool fast_path);
  1026. void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
  1027. unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
  1028. int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
  1029. u8 roce_version, u8 roce_l3_type, const u8 *gid,
  1030. const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
  1031. static inline int fw_initializing(struct mlx5_core_dev *dev)
  1032. {
  1033. return ioread32be(&dev->iseg->initializing) >> 31;
  1034. }
  1035. static inline u32 mlx5_mkey_to_idx(u32 mkey)
  1036. {
  1037. return mkey >> 8;
  1038. }
  1039. static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
  1040. {
  1041. return mkey_idx << 8;
  1042. }
  1043. static inline u8 mlx5_mkey_variant(u32 mkey)
  1044. {
  1045. return mkey & 0xff;
  1046. }
  1047. enum {
  1048. MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
  1049. MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
  1050. };
  1051. enum {
  1052. MR_CACHE_LAST_STD_ENTRY = 20,
  1053. MLX5_IMR_MTT_CACHE_ENTRY,
  1054. MLX5_IMR_KSM_CACHE_ENTRY,
  1055. MAX_MR_CACHE_ENTRIES
  1056. };
  1057. enum {
  1058. MLX5_INTERFACE_PROTOCOL_IB = 0,
  1059. MLX5_INTERFACE_PROTOCOL_ETH = 1,
  1060. };
  1061. struct mlx5_interface {
  1062. void * (*add)(struct mlx5_core_dev *dev);
  1063. void (*remove)(struct mlx5_core_dev *dev, void *context);
  1064. int (*attach)(struct mlx5_core_dev *dev, void *context);
  1065. void (*detach)(struct mlx5_core_dev *dev, void *context);
  1066. void (*event)(struct mlx5_core_dev *dev, void *context,
  1067. enum mlx5_dev_event event, unsigned long param);
  1068. void (*pfault)(struct mlx5_core_dev *dev,
  1069. void *context,
  1070. struct mlx5_pagefault *pfault);
  1071. void * (*get_dev)(void *context);
  1072. int protocol;
  1073. struct list_head list;
  1074. };
  1075. void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
  1076. int mlx5_register_interface(struct mlx5_interface *intf);
  1077. void mlx5_unregister_interface(struct mlx5_interface *intf);
  1078. int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
  1079. int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
  1080. int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
  1081. bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
  1082. struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
  1083. int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
  1084. u64 *values,
  1085. int num_counters,
  1086. size_t *offsets);
  1087. struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
  1088. void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
  1089. #ifndef CONFIG_MLX5_CORE_IPOIB
  1090. static inline
  1091. struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
  1092. struct ib_device *ibdev,
  1093. const char *name,
  1094. void (*setup)(struct net_device *))
  1095. {
  1096. return ERR_PTR(-EOPNOTSUPP);
  1097. }
  1098. #else
  1099. struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
  1100. struct ib_device *ibdev,
  1101. const char *name,
  1102. void (*setup)(struct net_device *));
  1103. #endif /* CONFIG_MLX5_CORE_IPOIB */
  1104. struct mlx5_profile {
  1105. u64 mask;
  1106. u8 log_max_qp;
  1107. struct {
  1108. int size;
  1109. int limit;
  1110. } mr_cache[MAX_MR_CACHE_ENTRIES];
  1111. };
  1112. enum {
  1113. MLX5_PCI_DEV_IS_VF = 1 << 0,
  1114. };
  1115. static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
  1116. {
  1117. return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
  1118. }
  1119. #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev))
  1120. #define MLX5_VPORT_MANAGER(mdev) \
  1121. (MLX5_CAP_GEN(mdev, vport_group_manager) && \
  1122. (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
  1123. mlx5_core_is_pf(mdev))
  1124. static inline int mlx5_get_gid_table_len(u16 param)
  1125. {
  1126. if (param > 4) {
  1127. pr_warn("gid table length is zero\n");
  1128. return 0;
  1129. }
  1130. return 8 * (1 << param);
  1131. }
  1132. static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
  1133. {
  1134. return !!(dev->priv.rl_table.max_size);
  1135. }
  1136. static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
  1137. {
  1138. return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
  1139. MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
  1140. }
  1141. static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
  1142. {
  1143. return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
  1144. }
  1145. static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
  1146. {
  1147. return mlx5_core_is_mp_slave(dev) ||
  1148. mlx5_core_is_mp_master(dev);
  1149. }
  1150. static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
  1151. {
  1152. if (!mlx5_core_mp_enabled(dev))
  1153. return 1;
  1154. return MLX5_CAP_GEN(dev, native_port_num);
  1155. }
  1156. enum {
  1157. MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
  1158. };
  1159. static inline const struct cpumask *
  1160. mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
  1161. {
  1162. return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
  1163. }
  1164. #endif /* MLX5_DRIVER_H */