qed_chain.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _QED_CHAIN_H
  33. #define _QED_CHAIN_H
  34. #include <linux/types.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/kernel.h>
  37. #include <linux/list.h>
  38. #include <linux/slab.h>
  39. #include <linux/qed/common_hsi.h>
  40. enum qed_chain_mode {
  41. /* Each Page contains a next pointer at its end */
  42. QED_CHAIN_MODE_NEXT_PTR,
  43. /* Chain is a single page (next ptr) is unrequired */
  44. QED_CHAIN_MODE_SINGLE,
  45. /* Page pointers are located in a side list */
  46. QED_CHAIN_MODE_PBL,
  47. };
  48. enum qed_chain_use_mode {
  49. QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
  50. QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */
  51. QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
  52. };
  53. enum qed_chain_cnt_type {
  54. /* The chain's size/prod/cons are kept in 16-bit variables */
  55. QED_CHAIN_CNT_TYPE_U16,
  56. /* The chain's size/prod/cons are kept in 32-bit variables */
  57. QED_CHAIN_CNT_TYPE_U32,
  58. };
  59. struct qed_chain_next {
  60. struct regpair next_phys;
  61. void *next_virt;
  62. };
  63. struct qed_chain_pbl_u16 {
  64. u16 prod_page_idx;
  65. u16 cons_page_idx;
  66. };
  67. struct qed_chain_pbl_u32 {
  68. u32 prod_page_idx;
  69. u32 cons_page_idx;
  70. };
  71. struct qed_chain_ext_pbl {
  72. dma_addr_t p_pbl_phys;
  73. void *p_pbl_virt;
  74. };
  75. struct qed_chain_u16 {
  76. /* Cyclic index of next element to produce/consme */
  77. u16 prod_idx;
  78. u16 cons_idx;
  79. };
  80. struct qed_chain_u32 {
  81. /* Cyclic index of next element to produce/consme */
  82. u32 prod_idx;
  83. u32 cons_idx;
  84. };
  85. struct qed_chain {
  86. /* fastpath portion of the chain - required for commands such
  87. * as produce / consume.
  88. */
  89. /* Point to next element to produce/consume */
  90. void *p_prod_elem;
  91. void *p_cons_elem;
  92. /* Fastpath portions of the PBL [if exists] */
  93. struct {
  94. /* Table for keeping the virtual addresses of the chain pages,
  95. * respectively to the physical addresses in the pbl table.
  96. */
  97. void **pp_virt_addr_tbl;
  98. union {
  99. struct qed_chain_pbl_u16 u16;
  100. struct qed_chain_pbl_u32 u32;
  101. } c;
  102. } pbl;
  103. union {
  104. struct qed_chain_u16 chain16;
  105. struct qed_chain_u32 chain32;
  106. } u;
  107. /* Capacity counts only usable elements */
  108. u32 capacity;
  109. u32 page_cnt;
  110. enum qed_chain_mode mode;
  111. /* Elements information for fast calculations */
  112. u16 elem_per_page;
  113. u16 elem_per_page_mask;
  114. u16 elem_size;
  115. u16 next_page_mask;
  116. u16 usable_per_page;
  117. u8 elem_unusable;
  118. u8 cnt_type;
  119. /* Slowpath of the chain - required for initialization and destruction,
  120. * but isn't involved in regular functionality.
  121. */
  122. /* Base address of a pre-allocated buffer for pbl */
  123. struct {
  124. dma_addr_t p_phys_table;
  125. void *p_virt_table;
  126. } pbl_sp;
  127. /* Address of first page of the chain - the address is required
  128. * for fastpath operation [consume/produce] but only for the the SINGLE
  129. * flavour which isn't considered fastpath [== SPQ].
  130. */
  131. void *p_virt_addr;
  132. dma_addr_t p_phys_addr;
  133. /* Total number of elements [for entire chain] */
  134. u32 size;
  135. u8 intended_use;
  136. bool b_external_pbl;
  137. };
  138. #define QED_CHAIN_PBL_ENTRY_SIZE (8)
  139. #define QED_CHAIN_PAGE_SIZE (0x1000)
  140. #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
  141. #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
  142. (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
  143. (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
  144. (elem_size))) : 0)
  145. #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
  146. ((u32)(ELEMS_PER_PAGE(elem_size) - \
  147. UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
  148. #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
  149. DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
  150. #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
  151. #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
  152. /* Accessors */
  153. static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
  154. {
  155. return p_chain->u.chain16.prod_idx;
  156. }
  157. static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
  158. {
  159. return p_chain->u.chain16.cons_idx;
  160. }
  161. static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
  162. {
  163. return p_chain->u.chain32.cons_idx;
  164. }
  165. static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
  166. {
  167. u16 used;
  168. used = (u16) (((u32)0x10000 +
  169. (u32)p_chain->u.chain16.prod_idx) -
  170. (u32)p_chain->u.chain16.cons_idx);
  171. if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
  172. used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
  173. p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
  174. return (u16)(p_chain->capacity - used);
  175. }
  176. static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
  177. {
  178. u32 used;
  179. used = (u32) (((u64)0x100000000ULL +
  180. (u64)p_chain->u.chain32.prod_idx) -
  181. (u64)p_chain->u.chain32.cons_idx);
  182. if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
  183. used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
  184. p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
  185. return p_chain->capacity - used;
  186. }
  187. static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
  188. {
  189. return p_chain->usable_per_page;
  190. }
  191. static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
  192. {
  193. return p_chain->elem_unusable;
  194. }
  195. static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
  196. {
  197. return p_chain->page_cnt;
  198. }
  199. static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
  200. {
  201. return p_chain->pbl_sp.p_phys_table;
  202. }
  203. /**
  204. * @brief qed_chain_advance_page -
  205. *
  206. * Advance the next element accros pages for a linked chain
  207. *
  208. * @param p_chain
  209. * @param p_next_elem
  210. * @param idx_to_inc
  211. * @param page_to_inc
  212. */
  213. static inline void
  214. qed_chain_advance_page(struct qed_chain *p_chain,
  215. void **p_next_elem, void *idx_to_inc, void *page_to_inc)
  216. {
  217. struct qed_chain_next *p_next = NULL;
  218. u32 page_index = 0;
  219. switch (p_chain->mode) {
  220. case QED_CHAIN_MODE_NEXT_PTR:
  221. p_next = *p_next_elem;
  222. *p_next_elem = p_next->next_virt;
  223. if (is_chain_u16(p_chain))
  224. *(u16 *)idx_to_inc += p_chain->elem_unusable;
  225. else
  226. *(u32 *)idx_to_inc += p_chain->elem_unusable;
  227. break;
  228. case QED_CHAIN_MODE_SINGLE:
  229. *p_next_elem = p_chain->p_virt_addr;
  230. break;
  231. case QED_CHAIN_MODE_PBL:
  232. if (is_chain_u16(p_chain)) {
  233. if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
  234. *(u16 *)page_to_inc = 0;
  235. page_index = *(u16 *)page_to_inc;
  236. } else {
  237. if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
  238. *(u32 *)page_to_inc = 0;
  239. page_index = *(u32 *)page_to_inc;
  240. }
  241. *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
  242. }
  243. }
  244. #define is_unusable_idx(p, idx) \
  245. (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
  246. #define is_unusable_idx_u32(p, idx) \
  247. (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
  248. #define is_unusable_next_idx(p, idx) \
  249. ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
  250. (p)->usable_per_page)
  251. #define is_unusable_next_idx_u32(p, idx) \
  252. ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
  253. (p)->usable_per_page)
  254. #define test_and_skip(p, idx) \
  255. do { \
  256. if (is_chain_u16(p)) { \
  257. if (is_unusable_idx(p, idx)) \
  258. (p)->u.chain16.idx += (p)->elem_unusable; \
  259. } else { \
  260. if (is_unusable_idx_u32(p, idx)) \
  261. (p)->u.chain32.idx += (p)->elem_unusable; \
  262. } \
  263. } while (0)
  264. /**
  265. * @brief qed_chain_return_produced -
  266. *
  267. * A chain in which the driver "Produces" elements should use this API
  268. * to indicate previous produced elements are now consumed.
  269. *
  270. * @param p_chain
  271. */
  272. static inline void qed_chain_return_produced(struct qed_chain *p_chain)
  273. {
  274. if (is_chain_u16(p_chain))
  275. p_chain->u.chain16.cons_idx++;
  276. else
  277. p_chain->u.chain32.cons_idx++;
  278. test_and_skip(p_chain, cons_idx);
  279. }
  280. /**
  281. * @brief qed_chain_produce -
  282. *
  283. * A chain in which the driver "Produces" elements should use this to get
  284. * a pointer to the next element which can be "Produced". It's driver
  285. * responsibility to validate that the chain has room for new element.
  286. *
  287. * @param p_chain
  288. *
  289. * @return void*, a pointer to next element
  290. */
  291. static inline void *qed_chain_produce(struct qed_chain *p_chain)
  292. {
  293. void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
  294. if (is_chain_u16(p_chain)) {
  295. if ((p_chain->u.chain16.prod_idx &
  296. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  297. p_prod_idx = &p_chain->u.chain16.prod_idx;
  298. p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
  299. qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
  300. p_prod_idx, p_prod_page_idx);
  301. }
  302. p_chain->u.chain16.prod_idx++;
  303. } else {
  304. if ((p_chain->u.chain32.prod_idx &
  305. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  306. p_prod_idx = &p_chain->u.chain32.prod_idx;
  307. p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
  308. qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
  309. p_prod_idx, p_prod_page_idx);
  310. }
  311. p_chain->u.chain32.prod_idx++;
  312. }
  313. p_ret = p_chain->p_prod_elem;
  314. p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
  315. p_chain->elem_size);
  316. return p_ret;
  317. }
  318. /**
  319. * @brief qed_chain_get_capacity -
  320. *
  321. * Get the maximum number of BDs in chain
  322. *
  323. * @param p_chain
  324. * @param num
  325. *
  326. * @return number of unusable BDs
  327. */
  328. static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
  329. {
  330. return p_chain->capacity;
  331. }
  332. /**
  333. * @brief qed_chain_recycle_consumed -
  334. *
  335. * Returns an element which was previously consumed;
  336. * Increments producers so they could be written to FW.
  337. *
  338. * @param p_chain
  339. */
  340. static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
  341. {
  342. test_and_skip(p_chain, prod_idx);
  343. if (is_chain_u16(p_chain))
  344. p_chain->u.chain16.prod_idx++;
  345. else
  346. p_chain->u.chain32.prod_idx++;
  347. }
  348. /**
  349. * @brief qed_chain_consume -
  350. *
  351. * A Chain in which the driver utilizes data written by a different source
  352. * (i.e., FW) should use this to access passed buffers.
  353. *
  354. * @param p_chain
  355. *
  356. * @return void*, a pointer to the next buffer written
  357. */
  358. static inline void *qed_chain_consume(struct qed_chain *p_chain)
  359. {
  360. void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
  361. if (is_chain_u16(p_chain)) {
  362. if ((p_chain->u.chain16.cons_idx &
  363. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  364. p_cons_idx = &p_chain->u.chain16.cons_idx;
  365. p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
  366. qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
  367. p_cons_idx, p_cons_page_idx);
  368. }
  369. p_chain->u.chain16.cons_idx++;
  370. } else {
  371. if ((p_chain->u.chain32.cons_idx &
  372. p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
  373. p_cons_idx = &p_chain->u.chain32.cons_idx;
  374. p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
  375. qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
  376. p_cons_idx, p_cons_page_idx);
  377. }
  378. p_chain->u.chain32.cons_idx++;
  379. }
  380. p_ret = p_chain->p_cons_elem;
  381. p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
  382. p_chain->elem_size);
  383. return p_ret;
  384. }
  385. /**
  386. * @brief qed_chain_reset - Resets the chain to its start state
  387. *
  388. * @param p_chain pointer to a previously allocted chain
  389. */
  390. static inline void qed_chain_reset(struct qed_chain *p_chain)
  391. {
  392. u32 i;
  393. if (is_chain_u16(p_chain)) {
  394. p_chain->u.chain16.prod_idx = 0;
  395. p_chain->u.chain16.cons_idx = 0;
  396. } else {
  397. p_chain->u.chain32.prod_idx = 0;
  398. p_chain->u.chain32.cons_idx = 0;
  399. }
  400. p_chain->p_cons_elem = p_chain->p_virt_addr;
  401. p_chain->p_prod_elem = p_chain->p_virt_addr;
  402. if (p_chain->mode == QED_CHAIN_MODE_PBL) {
  403. /* Use (page_cnt - 1) as a reset value for the prod/cons page's
  404. * indices, to avoid unnecessary page advancing on the first
  405. * call to qed_chain_produce/consume. Instead, the indices
  406. * will be advanced to page_cnt and then will be wrapped to 0.
  407. */
  408. u32 reset_val = p_chain->page_cnt - 1;
  409. if (is_chain_u16(p_chain)) {
  410. p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
  411. p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
  412. } else {
  413. p_chain->pbl.c.u32.prod_page_idx = reset_val;
  414. p_chain->pbl.c.u32.cons_page_idx = reset_val;
  415. }
  416. }
  417. switch (p_chain->intended_use) {
  418. case QED_CHAIN_USE_TO_CONSUME:
  419. /* produce empty elements */
  420. for (i = 0; i < p_chain->capacity; i++)
  421. qed_chain_recycle_consumed(p_chain);
  422. break;
  423. case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
  424. case QED_CHAIN_USE_TO_PRODUCE:
  425. default:
  426. /* Do nothing */
  427. break;
  428. }
  429. }
  430. /**
  431. * @brief qed_chain_init - Initalizes a basic chain struct
  432. *
  433. * @param p_chain
  434. * @param p_virt_addr
  435. * @param p_phys_addr physical address of allocated buffer's beginning
  436. * @param page_cnt number of pages in the allocated buffer
  437. * @param elem_size size of each element in the chain
  438. * @param intended_use
  439. * @param mode
  440. */
  441. static inline void qed_chain_init_params(struct qed_chain *p_chain,
  442. u32 page_cnt,
  443. u8 elem_size,
  444. enum qed_chain_use_mode intended_use,
  445. enum qed_chain_mode mode,
  446. enum qed_chain_cnt_type cnt_type)
  447. {
  448. /* chain fixed parameters */
  449. p_chain->p_virt_addr = NULL;
  450. p_chain->p_phys_addr = 0;
  451. p_chain->elem_size = elem_size;
  452. p_chain->intended_use = (u8)intended_use;
  453. p_chain->mode = mode;
  454. p_chain->cnt_type = (u8)cnt_type;
  455. p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
  456. p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
  457. p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
  458. p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
  459. p_chain->next_page_mask = (p_chain->usable_per_page &
  460. p_chain->elem_per_page_mask);
  461. p_chain->page_cnt = page_cnt;
  462. p_chain->capacity = p_chain->usable_per_page * page_cnt;
  463. p_chain->size = p_chain->elem_per_page * page_cnt;
  464. p_chain->pbl_sp.p_phys_table = 0;
  465. p_chain->pbl_sp.p_virt_table = NULL;
  466. p_chain->pbl.pp_virt_addr_tbl = NULL;
  467. }
  468. /**
  469. * @brief qed_chain_init_mem -
  470. *
  471. * Initalizes a basic chain struct with its chain buffers
  472. *
  473. * @param p_chain
  474. * @param p_virt_addr virtual address of allocated buffer's beginning
  475. * @param p_phys_addr physical address of allocated buffer's beginning
  476. *
  477. */
  478. static inline void qed_chain_init_mem(struct qed_chain *p_chain,
  479. void *p_virt_addr, dma_addr_t p_phys_addr)
  480. {
  481. p_chain->p_virt_addr = p_virt_addr;
  482. p_chain->p_phys_addr = p_phys_addr;
  483. }
  484. /**
  485. * @brief qed_chain_init_pbl_mem -
  486. *
  487. * Initalizes a basic chain struct with its pbl buffers
  488. *
  489. * @param p_chain
  490. * @param p_virt_pbl pointer to a pre allocated side table which will hold
  491. * virtual page addresses.
  492. * @param p_phys_pbl pointer to a pre-allocated side table which will hold
  493. * physical page addresses.
  494. * @param pp_virt_addr_tbl
  495. * pointer to a pre-allocated side table which will hold
  496. * the virtual addresses of the chain pages.
  497. *
  498. */
  499. static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
  500. void *p_virt_pbl,
  501. dma_addr_t p_phys_pbl,
  502. void **pp_virt_addr_tbl)
  503. {
  504. p_chain->pbl_sp.p_phys_table = p_phys_pbl;
  505. p_chain->pbl_sp.p_virt_table = p_virt_pbl;
  506. p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
  507. }
  508. /**
  509. * @brief qed_chain_init_next_ptr_elem -
  510. *
  511. * Initalizes a next pointer element
  512. *
  513. * @param p_chain
  514. * @param p_virt_curr virtual address of a chain page of which the next
  515. * pointer element is initialized
  516. * @param p_virt_next virtual address of the next chain page
  517. * @param p_phys_next physical address of the next chain page
  518. *
  519. */
  520. static inline void
  521. qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
  522. void *p_virt_curr,
  523. void *p_virt_next, dma_addr_t p_phys_next)
  524. {
  525. struct qed_chain_next *p_next;
  526. u32 size;
  527. size = p_chain->elem_size * p_chain->usable_per_page;
  528. p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
  529. DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
  530. p_next->next_virt = p_virt_next;
  531. }
  532. /**
  533. * @brief qed_chain_get_last_elem -
  534. *
  535. * Returns a pointer to the last element of the chain
  536. *
  537. * @param p_chain
  538. *
  539. * @return void*
  540. */
  541. static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
  542. {
  543. struct qed_chain_next *p_next = NULL;
  544. void *p_virt_addr = NULL;
  545. u32 size, last_page_idx;
  546. if (!p_chain->p_virt_addr)
  547. goto out;
  548. switch (p_chain->mode) {
  549. case QED_CHAIN_MODE_NEXT_PTR:
  550. size = p_chain->elem_size * p_chain->usable_per_page;
  551. p_virt_addr = p_chain->p_virt_addr;
  552. p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
  553. while (p_next->next_virt != p_chain->p_virt_addr) {
  554. p_virt_addr = p_next->next_virt;
  555. p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
  556. size);
  557. }
  558. break;
  559. case QED_CHAIN_MODE_SINGLE:
  560. p_virt_addr = p_chain->p_virt_addr;
  561. break;
  562. case QED_CHAIN_MODE_PBL:
  563. last_page_idx = p_chain->page_cnt - 1;
  564. p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
  565. break;
  566. }
  567. /* p_virt_addr points at this stage to the last page of the chain */
  568. size = p_chain->elem_size * (p_chain->usable_per_page - 1);
  569. p_virt_addr = (u8 *)p_virt_addr + size;
  570. out:
  571. return p_virt_addr;
  572. }
  573. /**
  574. * @brief qed_chain_set_prod - sets the prod to the given value
  575. *
  576. * @param prod_idx
  577. * @param p_prod_elem
  578. */
  579. static inline void qed_chain_set_prod(struct qed_chain *p_chain,
  580. u32 prod_idx, void *p_prod_elem)
  581. {
  582. if (is_chain_u16(p_chain))
  583. p_chain->u.chain16.prod_idx = (u16) prod_idx;
  584. else
  585. p_chain->u.chain32.prod_idx = prod_idx;
  586. p_chain->p_prod_elem = p_prod_elem;
  587. }
  588. /**
  589. * @brief qed_chain_pbl_zero_mem - set chain memory to 0
  590. *
  591. * @param p_chain
  592. */
  593. static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
  594. {
  595. u32 i, page_cnt;
  596. if (p_chain->mode != QED_CHAIN_MODE_PBL)
  597. return;
  598. page_cnt = qed_chain_get_page_cnt(p_chain);
  599. for (i = 0; i < page_cnt; i++)
  600. memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
  601. QED_CHAIN_PAGE_SIZE);
  602. }
  603. #endif