qed_spq.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015-2017 QLogic Corporation
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and /or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/types.h>
  33. #include <asm/byteorder.h>
  34. #include <linux/io.h>
  35. #include <linux/delay.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/errno.h>
  38. #include <linux/kernel.h>
  39. #include <linux/list.h>
  40. #include <linux/pci.h>
  41. #include <linux/slab.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/string.h>
  44. #include "qed.h"
  45. #include "qed_cxt.h"
  46. #include "qed_dev_api.h"
  47. #include "qed_hsi.h"
  48. #include "qed_hw.h"
  49. #include "qed_int.h"
  50. #include "qed_iscsi.h"
  51. #include "qed_mcp.h"
  52. #include "qed_ooo.h"
  53. #include "qed_reg_addr.h"
  54. #include "qed_sp.h"
  55. #include "qed_sriov.h"
  56. #include "qed_rdma.h"
  57. /***************************************************************************
  58. * Structures & Definitions
  59. ***************************************************************************/
  60. #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
  61. #define SPQ_BLOCK_DELAY_MAX_ITER (10)
  62. #define SPQ_BLOCK_DELAY_US (10)
  63. #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
  64. #define SPQ_BLOCK_SLEEP_MS (5)
  65. /***************************************************************************
  66. * Blocking Imp. (BLOCK/EBLOCK mode)
  67. ***************************************************************************/
  68. static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
  69. void *cookie,
  70. union event_ring_data *data, u8 fw_return_code)
  71. {
  72. struct qed_spq_comp_done *comp_done;
  73. comp_done = (struct qed_spq_comp_done *)cookie;
  74. comp_done->fw_return_code = fw_return_code;
  75. /* Make sure completion done is visible on waiting thread */
  76. smp_store_release(&comp_done->done, 0x1);
  77. }
  78. static int __qed_spq_block(struct qed_hwfn *p_hwfn,
  79. struct qed_spq_entry *p_ent,
  80. u8 *p_fw_ret, bool sleep_between_iter)
  81. {
  82. struct qed_spq_comp_done *comp_done;
  83. u32 iter_cnt;
  84. comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  85. iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
  86. : SPQ_BLOCK_DELAY_MAX_ITER;
  87. while (iter_cnt--) {
  88. /* Validate we receive completion update */
  89. if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
  90. if (p_fw_ret)
  91. *p_fw_ret = comp_done->fw_return_code;
  92. return 0;
  93. }
  94. if (sleep_between_iter)
  95. msleep(SPQ_BLOCK_SLEEP_MS);
  96. else
  97. udelay(SPQ_BLOCK_DELAY_US);
  98. }
  99. return -EBUSY;
  100. }
  101. static int qed_spq_block(struct qed_hwfn *p_hwfn,
  102. struct qed_spq_entry *p_ent,
  103. u8 *p_fw_ret, bool skip_quick_poll)
  104. {
  105. struct qed_spq_comp_done *comp_done;
  106. struct qed_ptt *p_ptt;
  107. int rc;
  108. /* A relatively short polling period w/o sleeping, to allow the FW to
  109. * complete the ramrod and thus possibly to avoid the following sleeps.
  110. */
  111. if (!skip_quick_poll) {
  112. rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
  113. if (!rc)
  114. return 0;
  115. }
  116. /* Move to polling with a sleeping period between iterations */
  117. rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
  118. if (!rc)
  119. return 0;
  120. p_ptt = qed_ptt_acquire(p_hwfn);
  121. if (!p_ptt) {
  122. DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
  123. return -EAGAIN;
  124. }
  125. DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
  126. rc = qed_mcp_drain(p_hwfn, p_ptt);
  127. if (rc) {
  128. DP_NOTICE(p_hwfn, "MCP drain failed\n");
  129. goto err;
  130. }
  131. /* Retry after drain */
  132. rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
  133. if (!rc)
  134. goto out;
  135. comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  136. if (comp_done->done == 1)
  137. if (p_fw_ret)
  138. *p_fw_ret = comp_done->fw_return_code;
  139. out:
  140. qed_ptt_release(p_hwfn, p_ptt);
  141. return 0;
  142. err:
  143. qed_ptt_release(p_hwfn, p_ptt);
  144. DP_NOTICE(p_hwfn,
  145. "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
  146. le32_to_cpu(p_ent->elem.hdr.cid),
  147. p_ent->elem.hdr.cmd_id,
  148. p_ent->elem.hdr.protocol_id,
  149. le16_to_cpu(p_ent->elem.hdr.echo));
  150. return -EBUSY;
  151. }
  152. /***************************************************************************
  153. * SPQ entries inner API
  154. ***************************************************************************/
  155. static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
  156. struct qed_spq_entry *p_ent)
  157. {
  158. p_ent->flags = 0;
  159. switch (p_ent->comp_mode) {
  160. case QED_SPQ_MODE_EBLOCK:
  161. case QED_SPQ_MODE_BLOCK:
  162. p_ent->comp_cb.function = qed_spq_blocking_cb;
  163. break;
  164. case QED_SPQ_MODE_CB:
  165. break;
  166. default:
  167. DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
  168. p_ent->comp_mode);
  169. return -EINVAL;
  170. }
  171. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  172. "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
  173. p_ent->elem.hdr.cid,
  174. p_ent->elem.hdr.cmd_id,
  175. p_ent->elem.hdr.protocol_id,
  176. p_ent->elem.data_ptr.hi,
  177. p_ent->elem.data_ptr.lo,
  178. D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
  179. QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
  180. "MODE_CB"));
  181. return 0;
  182. }
  183. /***************************************************************************
  184. * HSI access
  185. ***************************************************************************/
  186. static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
  187. struct qed_spq *p_spq)
  188. {
  189. struct e4_core_conn_context *p_cxt;
  190. struct qed_cxt_info cxt_info;
  191. u16 physical_q;
  192. int rc;
  193. cxt_info.iid = p_spq->cid;
  194. rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
  195. if (rc < 0) {
  196. DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
  197. p_spq->cid);
  198. return;
  199. }
  200. p_cxt = cxt_info.p_cxt;
  201. SET_FIELD(p_cxt->xstorm_ag_context.flags10,
  202. E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
  203. SET_FIELD(p_cxt->xstorm_ag_context.flags1,
  204. E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
  205. SET_FIELD(p_cxt->xstorm_ag_context.flags9,
  206. E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
  207. /* QM physical queue */
  208. physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
  209. p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
  210. p_cxt->xstorm_st_context.spq_base_lo =
  211. DMA_LO_LE(p_spq->chain.p_phys_addr);
  212. p_cxt->xstorm_st_context.spq_base_hi =
  213. DMA_HI_LE(p_spq->chain.p_phys_addr);
  214. DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
  215. p_hwfn->p_consq->chain.p_phys_addr);
  216. }
  217. static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
  218. struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
  219. {
  220. struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
  221. u16 echo = qed_chain_get_prod_idx(p_chain);
  222. struct slow_path_element *elem;
  223. struct core_db_data db;
  224. p_ent->elem.hdr.echo = cpu_to_le16(echo);
  225. elem = qed_chain_produce(p_chain);
  226. if (!elem) {
  227. DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
  228. return -EINVAL;
  229. }
  230. *elem = p_ent->elem; /* struct assignment */
  231. /* send a doorbell on the slow hwfn session */
  232. memset(&db, 0, sizeof(db));
  233. SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
  234. SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
  235. SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
  236. DQ_XCM_CORE_SPQ_PROD_CMD);
  237. db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
  238. db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
  239. /* make sure the SPQE is updated before the doorbell */
  240. wmb();
  241. DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
  242. /* make sure doorbell is rang */
  243. wmb();
  244. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  245. "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
  246. qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
  247. p_spq->cid, db.params, db.agg_flags,
  248. qed_chain_get_prod_idx(p_chain));
  249. return 0;
  250. }
  251. /***************************************************************************
  252. * Asynchronous events
  253. ***************************************************************************/
  254. static int
  255. qed_async_event_completion(struct qed_hwfn *p_hwfn,
  256. struct event_ring_entry *p_eqe)
  257. {
  258. qed_spq_async_comp_cb cb;
  259. if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
  260. return -EINVAL;
  261. cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
  262. if (cb) {
  263. return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
  264. &p_eqe->data, p_eqe->fw_return_code);
  265. } else {
  266. DP_NOTICE(p_hwfn,
  267. "Unknown Async completion for protocol: %d\n",
  268. p_eqe->protocol_id);
  269. return -EINVAL;
  270. }
  271. }
  272. int
  273. qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
  274. enum protocol_type protocol_id,
  275. qed_spq_async_comp_cb cb)
  276. {
  277. if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
  278. return -EINVAL;
  279. p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
  280. return 0;
  281. }
  282. void
  283. qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
  284. enum protocol_type protocol_id)
  285. {
  286. if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
  287. return;
  288. p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
  289. }
  290. /***************************************************************************
  291. * EQ API
  292. ***************************************************************************/
  293. void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
  294. {
  295. u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
  296. USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
  297. REG_WR16(p_hwfn, addr, prod);
  298. /* keep prod updates ordered */
  299. mmiowb();
  300. }
  301. int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
  302. {
  303. struct qed_eq *p_eq = cookie;
  304. struct qed_chain *p_chain = &p_eq->chain;
  305. int rc = 0;
  306. /* take a snapshot of the FW consumer */
  307. u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
  308. DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
  309. /* Need to guarantee the fw_cons index we use points to a usuable
  310. * element (to comply with our chain), so our macros would comply
  311. */
  312. if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
  313. qed_chain_get_usable_per_page(p_chain))
  314. fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
  315. /* Complete current segment of eq entries */
  316. while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
  317. struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
  318. if (!p_eqe) {
  319. rc = -EINVAL;
  320. break;
  321. }
  322. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  323. "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
  324. p_eqe->opcode,
  325. p_eqe->protocol_id,
  326. p_eqe->reserved0,
  327. le16_to_cpu(p_eqe->echo),
  328. p_eqe->fw_return_code,
  329. p_eqe->flags);
  330. if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
  331. if (qed_async_event_completion(p_hwfn, p_eqe))
  332. rc = -EINVAL;
  333. } else if (qed_spq_completion(p_hwfn,
  334. p_eqe->echo,
  335. p_eqe->fw_return_code,
  336. &p_eqe->data)) {
  337. rc = -EINVAL;
  338. }
  339. qed_chain_recycle_consumed(p_chain);
  340. }
  341. qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
  342. return rc;
  343. }
  344. int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
  345. {
  346. struct qed_eq *p_eq;
  347. /* Allocate EQ struct */
  348. p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
  349. if (!p_eq)
  350. return -ENOMEM;
  351. /* Allocate and initialize EQ chain*/
  352. if (qed_chain_alloc(p_hwfn->cdev,
  353. QED_CHAIN_USE_TO_PRODUCE,
  354. QED_CHAIN_MODE_PBL,
  355. QED_CHAIN_CNT_TYPE_U16,
  356. num_elem,
  357. sizeof(union event_ring_element),
  358. &p_eq->chain, NULL))
  359. goto eq_allocate_fail;
  360. /* register EQ completion on the SP SB */
  361. qed_int_register_cb(p_hwfn, qed_eq_completion,
  362. p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
  363. p_hwfn->p_eq = p_eq;
  364. return 0;
  365. eq_allocate_fail:
  366. kfree(p_eq);
  367. return -ENOMEM;
  368. }
  369. void qed_eq_setup(struct qed_hwfn *p_hwfn)
  370. {
  371. qed_chain_reset(&p_hwfn->p_eq->chain);
  372. }
  373. void qed_eq_free(struct qed_hwfn *p_hwfn)
  374. {
  375. if (!p_hwfn->p_eq)
  376. return;
  377. qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
  378. kfree(p_hwfn->p_eq);
  379. p_hwfn->p_eq = NULL;
  380. }
  381. /***************************************************************************
  382. * CQE API - manipulate EQ functionality
  383. ***************************************************************************/
  384. static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
  385. struct eth_slow_path_rx_cqe *cqe,
  386. enum protocol_type protocol)
  387. {
  388. if (IS_VF(p_hwfn->cdev))
  389. return 0;
  390. /* @@@tmp - it's possible we'll eventually want to handle some
  391. * actual commands that can arrive here, but for now this is only
  392. * used to complete the ramrod using the echo value on the cqe
  393. */
  394. return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
  395. }
  396. int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
  397. struct eth_slow_path_rx_cqe *cqe)
  398. {
  399. int rc;
  400. rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
  401. if (rc)
  402. DP_NOTICE(p_hwfn,
  403. "Failed to handle RXQ CQE [cmd 0x%02x]\n",
  404. cqe->ramrod_cmd_id);
  405. return rc;
  406. }
  407. /***************************************************************************
  408. * Slow hwfn Queue (spq)
  409. ***************************************************************************/
  410. void qed_spq_setup(struct qed_hwfn *p_hwfn)
  411. {
  412. struct qed_spq *p_spq = p_hwfn->p_spq;
  413. struct qed_spq_entry *p_virt = NULL;
  414. dma_addr_t p_phys = 0;
  415. u32 i, capacity;
  416. INIT_LIST_HEAD(&p_spq->pending);
  417. INIT_LIST_HEAD(&p_spq->completion_pending);
  418. INIT_LIST_HEAD(&p_spq->free_pool);
  419. INIT_LIST_HEAD(&p_spq->unlimited_pending);
  420. spin_lock_init(&p_spq->lock);
  421. /* SPQ empty pool */
  422. p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
  423. p_virt = p_spq->p_virt;
  424. capacity = qed_chain_get_capacity(&p_spq->chain);
  425. for (i = 0; i < capacity; i++) {
  426. DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
  427. list_add_tail(&p_virt->list, &p_spq->free_pool);
  428. p_virt++;
  429. p_phys += sizeof(struct qed_spq_entry);
  430. }
  431. /* Statistics */
  432. p_spq->normal_count = 0;
  433. p_spq->comp_count = 0;
  434. p_spq->comp_sent_count = 0;
  435. p_spq->unlimited_pending_count = 0;
  436. bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
  437. p_spq->comp_bitmap_idx = 0;
  438. /* SPQ cid, cannot fail */
  439. qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
  440. qed_spq_hw_initialize(p_hwfn, p_spq);
  441. /* reset the chain itself */
  442. qed_chain_reset(&p_spq->chain);
  443. }
  444. int qed_spq_alloc(struct qed_hwfn *p_hwfn)
  445. {
  446. struct qed_spq_entry *p_virt = NULL;
  447. struct qed_spq *p_spq = NULL;
  448. dma_addr_t p_phys = 0;
  449. u32 capacity;
  450. /* SPQ struct */
  451. p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
  452. if (!p_spq)
  453. return -ENOMEM;
  454. /* SPQ ring */
  455. if (qed_chain_alloc(p_hwfn->cdev,
  456. QED_CHAIN_USE_TO_PRODUCE,
  457. QED_CHAIN_MODE_SINGLE,
  458. QED_CHAIN_CNT_TYPE_U16,
  459. 0, /* N/A when the mode is SINGLE */
  460. sizeof(struct slow_path_element),
  461. &p_spq->chain, NULL))
  462. goto spq_allocate_fail;
  463. /* allocate and fill the SPQ elements (incl. ramrod data list) */
  464. capacity = qed_chain_get_capacity(&p_spq->chain);
  465. p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  466. capacity * sizeof(struct qed_spq_entry),
  467. &p_phys, GFP_KERNEL);
  468. if (!p_virt)
  469. goto spq_allocate_fail;
  470. p_spq->p_virt = p_virt;
  471. p_spq->p_phys = p_phys;
  472. p_hwfn->p_spq = p_spq;
  473. return 0;
  474. spq_allocate_fail:
  475. qed_chain_free(p_hwfn->cdev, &p_spq->chain);
  476. kfree(p_spq);
  477. return -ENOMEM;
  478. }
  479. void qed_spq_free(struct qed_hwfn *p_hwfn)
  480. {
  481. struct qed_spq *p_spq = p_hwfn->p_spq;
  482. u32 capacity;
  483. if (!p_spq)
  484. return;
  485. if (p_spq->p_virt) {
  486. capacity = qed_chain_get_capacity(&p_spq->chain);
  487. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  488. capacity *
  489. sizeof(struct qed_spq_entry),
  490. p_spq->p_virt, p_spq->p_phys);
  491. }
  492. qed_chain_free(p_hwfn->cdev, &p_spq->chain);
  493. kfree(p_spq);
  494. p_hwfn->p_spq = NULL;
  495. }
  496. int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
  497. {
  498. struct qed_spq *p_spq = p_hwfn->p_spq;
  499. struct qed_spq_entry *p_ent = NULL;
  500. int rc = 0;
  501. spin_lock_bh(&p_spq->lock);
  502. if (list_empty(&p_spq->free_pool)) {
  503. p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
  504. if (!p_ent) {
  505. DP_NOTICE(p_hwfn,
  506. "Failed to allocate an SPQ entry for a pending ramrod\n");
  507. rc = -ENOMEM;
  508. goto out_unlock;
  509. }
  510. p_ent->queue = &p_spq->unlimited_pending;
  511. } else {
  512. p_ent = list_first_entry(&p_spq->free_pool,
  513. struct qed_spq_entry, list);
  514. list_del(&p_ent->list);
  515. p_ent->queue = &p_spq->pending;
  516. }
  517. *pp_ent = p_ent;
  518. out_unlock:
  519. spin_unlock_bh(&p_spq->lock);
  520. return rc;
  521. }
  522. /* Locked variant; Should be called while the SPQ lock is taken */
  523. static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
  524. struct qed_spq_entry *p_ent)
  525. {
  526. list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
  527. }
  528. void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
  529. {
  530. spin_lock_bh(&p_hwfn->p_spq->lock);
  531. __qed_spq_return_entry(p_hwfn, p_ent);
  532. spin_unlock_bh(&p_hwfn->p_spq->lock);
  533. }
  534. /**
  535. * @brief qed_spq_add_entry - adds a new entry to the pending
  536. * list. Should be used while lock is being held.
  537. *
  538. * Addes an entry to the pending list is there is room (en empty
  539. * element is available in the free_pool), or else places the
  540. * entry in the unlimited_pending pool.
  541. *
  542. * @param p_hwfn
  543. * @param p_ent
  544. * @param priority
  545. *
  546. * @return int
  547. */
  548. static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
  549. struct qed_spq_entry *p_ent,
  550. enum spq_priority priority)
  551. {
  552. struct qed_spq *p_spq = p_hwfn->p_spq;
  553. if (p_ent->queue == &p_spq->unlimited_pending) {
  554. if (list_empty(&p_spq->free_pool)) {
  555. list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
  556. p_spq->unlimited_pending_count++;
  557. return 0;
  558. } else {
  559. struct qed_spq_entry *p_en2;
  560. p_en2 = list_first_entry(&p_spq->free_pool,
  561. struct qed_spq_entry, list);
  562. list_del(&p_en2->list);
  563. /* Copy the ring element physical pointer to the new
  564. * entry, since we are about to override the entire ring
  565. * entry and don't want to lose the pointer.
  566. */
  567. p_ent->elem.data_ptr = p_en2->elem.data_ptr;
  568. *p_en2 = *p_ent;
  569. /* EBLOCK responsible to free the allocated p_ent */
  570. if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
  571. kfree(p_ent);
  572. p_ent = p_en2;
  573. }
  574. }
  575. /* entry is to be placed in 'pending' queue */
  576. switch (priority) {
  577. case QED_SPQ_PRIORITY_NORMAL:
  578. list_add_tail(&p_ent->list, &p_spq->pending);
  579. p_spq->normal_count++;
  580. break;
  581. case QED_SPQ_PRIORITY_HIGH:
  582. list_add(&p_ent->list, &p_spq->pending);
  583. p_spq->high_count++;
  584. break;
  585. default:
  586. return -EINVAL;
  587. }
  588. return 0;
  589. }
  590. /***************************************************************************
  591. * Accessor
  592. ***************************************************************************/
  593. u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
  594. {
  595. if (!p_hwfn->p_spq)
  596. return 0xffffffff; /* illegal */
  597. return p_hwfn->p_spq->cid;
  598. }
  599. /***************************************************************************
  600. * Posting new Ramrods
  601. ***************************************************************************/
  602. static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
  603. struct list_head *head, u32 keep_reserve)
  604. {
  605. struct qed_spq *p_spq = p_hwfn->p_spq;
  606. int rc;
  607. while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
  608. !list_empty(head)) {
  609. struct qed_spq_entry *p_ent =
  610. list_first_entry(head, struct qed_spq_entry, list);
  611. list_del(&p_ent->list);
  612. list_add_tail(&p_ent->list, &p_spq->completion_pending);
  613. p_spq->comp_sent_count++;
  614. rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
  615. if (rc) {
  616. list_del(&p_ent->list);
  617. __qed_spq_return_entry(p_hwfn, p_ent);
  618. return rc;
  619. }
  620. }
  621. return 0;
  622. }
  623. static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
  624. {
  625. struct qed_spq *p_spq = p_hwfn->p_spq;
  626. struct qed_spq_entry *p_ent = NULL;
  627. while (!list_empty(&p_spq->free_pool)) {
  628. if (list_empty(&p_spq->unlimited_pending))
  629. break;
  630. p_ent = list_first_entry(&p_spq->unlimited_pending,
  631. struct qed_spq_entry, list);
  632. if (!p_ent)
  633. return -EINVAL;
  634. list_del(&p_ent->list);
  635. qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
  636. }
  637. return qed_spq_post_list(p_hwfn, &p_spq->pending,
  638. SPQ_HIGH_PRI_RESERVE_DEFAULT);
  639. }
  640. int qed_spq_post(struct qed_hwfn *p_hwfn,
  641. struct qed_spq_entry *p_ent, u8 *fw_return_code)
  642. {
  643. int rc = 0;
  644. struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
  645. bool b_ret_ent = true;
  646. bool eblock;
  647. if (!p_hwfn)
  648. return -EINVAL;
  649. if (!p_ent) {
  650. DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
  651. return -EINVAL;
  652. }
  653. /* Complete the entry */
  654. rc = qed_spq_fill_entry(p_hwfn, p_ent);
  655. spin_lock_bh(&p_spq->lock);
  656. /* Check return value after LOCK is taken for cleaner error flow */
  657. if (rc)
  658. goto spq_post_fail;
  659. /* Check if entry is in block mode before qed_spq_add_entry,
  660. * which might kfree p_ent.
  661. */
  662. eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
  663. /* Add the request to the pending queue */
  664. rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
  665. if (rc)
  666. goto spq_post_fail;
  667. rc = qed_spq_pend_post(p_hwfn);
  668. if (rc) {
  669. /* Since it's possible that pending failed for a different
  670. * entry [although unlikely], the failed entry was already
  671. * dealt with; No need to return it here.
  672. */
  673. b_ret_ent = false;
  674. goto spq_post_fail;
  675. }
  676. spin_unlock_bh(&p_spq->lock);
  677. if (eblock) {
  678. /* For entries in QED BLOCK mode, the completion code cannot
  679. * perform the necessary cleanup - if it did, we couldn't
  680. * access p_ent here to see whether it's successful or not.
  681. * Thus, after gaining the answer perform the cleanup here.
  682. */
  683. rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
  684. p_ent->queue == &p_spq->unlimited_pending);
  685. if (p_ent->queue == &p_spq->unlimited_pending) {
  686. /* This is an allocated p_ent which does not need to
  687. * return to pool.
  688. */
  689. kfree(p_ent);
  690. return rc;
  691. }
  692. if (rc)
  693. goto spq_post_fail2;
  694. /* return to pool */
  695. qed_spq_return_entry(p_hwfn, p_ent);
  696. }
  697. return rc;
  698. spq_post_fail2:
  699. spin_lock_bh(&p_spq->lock);
  700. list_del(&p_ent->list);
  701. qed_chain_return_produced(&p_spq->chain);
  702. spq_post_fail:
  703. /* return to the free pool */
  704. if (b_ret_ent)
  705. __qed_spq_return_entry(p_hwfn, p_ent);
  706. spin_unlock_bh(&p_spq->lock);
  707. return rc;
  708. }
  709. int qed_spq_completion(struct qed_hwfn *p_hwfn,
  710. __le16 echo,
  711. u8 fw_return_code,
  712. union event_ring_data *p_data)
  713. {
  714. struct qed_spq *p_spq;
  715. struct qed_spq_entry *p_ent = NULL;
  716. struct qed_spq_entry *tmp;
  717. struct qed_spq_entry *found = NULL;
  718. int rc;
  719. if (!p_hwfn)
  720. return -EINVAL;
  721. p_spq = p_hwfn->p_spq;
  722. if (!p_spq)
  723. return -EINVAL;
  724. spin_lock_bh(&p_spq->lock);
  725. list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
  726. if (p_ent->elem.hdr.echo == echo) {
  727. u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
  728. list_del(&p_ent->list);
  729. /* Avoid overriding of SPQ entries when getting
  730. * out-of-order completions, by marking the completions
  731. * in a bitmap and increasing the chain consumer only
  732. * for the first successive completed entries.
  733. */
  734. __set_bit(pos, p_spq->p_comp_bitmap);
  735. while (test_bit(p_spq->comp_bitmap_idx,
  736. p_spq->p_comp_bitmap)) {
  737. __clear_bit(p_spq->comp_bitmap_idx,
  738. p_spq->p_comp_bitmap);
  739. p_spq->comp_bitmap_idx++;
  740. qed_chain_return_produced(&p_spq->chain);
  741. }
  742. p_spq->comp_count++;
  743. found = p_ent;
  744. break;
  745. }
  746. /* This is relatively uncommon - depends on scenarios
  747. * which have mutliple per-PF sent ramrods.
  748. */
  749. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  750. "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
  751. le16_to_cpu(echo),
  752. le16_to_cpu(p_ent->elem.hdr.echo));
  753. }
  754. /* Release lock before callback, as callback may post
  755. * an additional ramrod.
  756. */
  757. spin_unlock_bh(&p_spq->lock);
  758. if (!found) {
  759. DP_NOTICE(p_hwfn,
  760. "Failed to find an entry this EQE [echo %04x] completes\n",
  761. le16_to_cpu(echo));
  762. return -EEXIST;
  763. }
  764. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  765. "Complete EQE [echo %04x]: func %p cookie %p)\n",
  766. le16_to_cpu(echo),
  767. p_ent->comp_cb.function, p_ent->comp_cb.cookie);
  768. if (found->comp_cb.function)
  769. found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
  770. fw_return_code);
  771. else
  772. DP_VERBOSE(p_hwfn,
  773. QED_MSG_SPQ,
  774. "Got a completion without a callback function\n");
  775. if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
  776. (found->queue == &p_spq->unlimited_pending))
  777. /* EBLOCK is responsible for returning its own entry into the
  778. * free list, unless it originally added the entry into the
  779. * unlimited pending list.
  780. */
  781. qed_spq_return_entry(p_hwfn, found);
  782. /* Attempt to post pending requests */
  783. spin_lock_bh(&p_spq->lock);
  784. rc = qed_spq_pend_post(p_hwfn);
  785. spin_unlock_bh(&p_spq->lock);
  786. return rc;
  787. }
  788. int qed_consq_alloc(struct qed_hwfn *p_hwfn)
  789. {
  790. struct qed_consq *p_consq;
  791. /* Allocate ConsQ struct */
  792. p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
  793. if (!p_consq)
  794. return -ENOMEM;
  795. /* Allocate and initialize EQ chain*/
  796. if (qed_chain_alloc(p_hwfn->cdev,
  797. QED_CHAIN_USE_TO_PRODUCE,
  798. QED_CHAIN_MODE_PBL,
  799. QED_CHAIN_CNT_TYPE_U16,
  800. QED_CHAIN_PAGE_SIZE / 0x80,
  801. 0x80, &p_consq->chain, NULL))
  802. goto consq_allocate_fail;
  803. p_hwfn->p_consq = p_consq;
  804. return 0;
  805. consq_allocate_fail:
  806. kfree(p_consq);
  807. return -ENOMEM;
  808. }
  809. void qed_consq_setup(struct qed_hwfn *p_hwfn)
  810. {
  811. qed_chain_reset(&p_hwfn->p_consq->chain);
  812. }
  813. void qed_consq_free(struct qed_hwfn *p_hwfn)
  814. {
  815. if (!p_hwfn->p_consq)
  816. return;
  817. qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
  818. kfree(p_hwfn->p_consq);
  819. p_hwfn->p_consq = NULL;
  820. }