qed_spq.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/io.h>
  11. #include <linux/delay.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/errno.h>
  14. #include <linux/kernel.h>
  15. #include <linux/list.h>
  16. #include <linux/pci.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/string.h>
  20. #include "qed.h"
  21. #include "qed_cxt.h"
  22. #include "qed_dev_api.h"
  23. #include "qed_hsi.h"
  24. #include "qed_hw.h"
  25. #include "qed_int.h"
  26. #include "qed_mcp.h"
  27. #include "qed_reg_addr.h"
  28. #include "qed_sp.h"
  29. #include "qed_sriov.h"
  30. /***************************************************************************
  31. * Structures & Definitions
  32. ***************************************************************************/
  33. #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
  34. #define SPQ_BLOCK_SLEEP_LENGTH (1000)
  35. /***************************************************************************
  36. * Blocking Imp. (BLOCK/EBLOCK mode)
  37. ***************************************************************************/
  38. static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
  39. void *cookie,
  40. union event_ring_data *data,
  41. u8 fw_return_code)
  42. {
  43. struct qed_spq_comp_done *comp_done;
  44. comp_done = (struct qed_spq_comp_done *)cookie;
  45. comp_done->done = 0x1;
  46. comp_done->fw_return_code = fw_return_code;
  47. /* make update visible to waiting thread */
  48. smp_wmb();
  49. }
  50. static int qed_spq_block(struct qed_hwfn *p_hwfn,
  51. struct qed_spq_entry *p_ent,
  52. u8 *p_fw_ret)
  53. {
  54. int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
  55. struct qed_spq_comp_done *comp_done;
  56. int rc;
  57. comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
  58. while (sleep_count) {
  59. /* validate we receive completion update */
  60. smp_rmb();
  61. if (comp_done->done == 1) {
  62. if (p_fw_ret)
  63. *p_fw_ret = comp_done->fw_return_code;
  64. return 0;
  65. }
  66. usleep_range(5000, 10000);
  67. sleep_count--;
  68. }
  69. DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
  70. rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
  71. if (rc != 0)
  72. DP_NOTICE(p_hwfn, "MCP drain failed\n");
  73. /* Retry after drain */
  74. sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
  75. while (sleep_count) {
  76. /* validate we receive completion update */
  77. smp_rmb();
  78. if (comp_done->done == 1) {
  79. if (p_fw_ret)
  80. *p_fw_ret = comp_done->fw_return_code;
  81. return 0;
  82. }
  83. usleep_range(5000, 10000);
  84. sleep_count--;
  85. }
  86. if (comp_done->done == 1) {
  87. if (p_fw_ret)
  88. *p_fw_ret = comp_done->fw_return_code;
  89. return 0;
  90. }
  91. DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
  92. return -EBUSY;
  93. }
  94. /***************************************************************************
  95. * SPQ entries inner API
  96. ***************************************************************************/
  97. static int
  98. qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
  99. struct qed_spq_entry *p_ent)
  100. {
  101. p_ent->flags = 0;
  102. switch (p_ent->comp_mode) {
  103. case QED_SPQ_MODE_EBLOCK:
  104. case QED_SPQ_MODE_BLOCK:
  105. p_ent->comp_cb.function = qed_spq_blocking_cb;
  106. break;
  107. case QED_SPQ_MODE_CB:
  108. break;
  109. default:
  110. DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
  111. p_ent->comp_mode);
  112. return -EINVAL;
  113. }
  114. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  115. "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
  116. p_ent->elem.hdr.cid,
  117. p_ent->elem.hdr.cmd_id,
  118. p_ent->elem.hdr.protocol_id,
  119. p_ent->elem.data_ptr.hi,
  120. p_ent->elem.data_ptr.lo,
  121. D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
  122. QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
  123. "MODE_CB"));
  124. return 0;
  125. }
  126. /***************************************************************************
  127. * HSI access
  128. ***************************************************************************/
  129. static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
  130. struct qed_spq *p_spq)
  131. {
  132. u16 pq;
  133. struct qed_cxt_info cxt_info;
  134. struct core_conn_context *p_cxt;
  135. union qed_qm_pq_params pq_params;
  136. int rc;
  137. cxt_info.iid = p_spq->cid;
  138. rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
  139. if (rc < 0) {
  140. DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
  141. p_spq->cid);
  142. return;
  143. }
  144. p_cxt = cxt_info.p_cxt;
  145. SET_FIELD(p_cxt->xstorm_ag_context.flags10,
  146. XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
  147. SET_FIELD(p_cxt->xstorm_ag_context.flags1,
  148. XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
  149. SET_FIELD(p_cxt->xstorm_ag_context.flags9,
  150. XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
  151. /* QM physical queue */
  152. memset(&pq_params, 0, sizeof(pq_params));
  153. pq_params.core.tc = LB_TC;
  154. pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
  155. p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
  156. p_cxt->xstorm_st_context.spq_base_lo =
  157. DMA_LO_LE(p_spq->chain.p_phys_addr);
  158. p_cxt->xstorm_st_context.spq_base_hi =
  159. DMA_HI_LE(p_spq->chain.p_phys_addr);
  160. DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
  161. p_hwfn->p_consq->chain.p_phys_addr);
  162. }
  163. static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
  164. struct qed_spq *p_spq,
  165. struct qed_spq_entry *p_ent)
  166. {
  167. struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
  168. u16 echo = qed_chain_get_prod_idx(p_chain);
  169. struct slow_path_element *elem;
  170. struct core_db_data db;
  171. p_ent->elem.hdr.echo = cpu_to_le16(echo);
  172. elem = qed_chain_produce(p_chain);
  173. if (!elem) {
  174. DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
  175. return -EINVAL;
  176. }
  177. *elem = p_ent->elem; /* struct assignment */
  178. /* send a doorbell on the slow hwfn session */
  179. memset(&db, 0, sizeof(db));
  180. SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
  181. SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
  182. SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
  183. DQ_XCM_CORE_SPQ_PROD_CMD);
  184. db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
  185. db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
  186. /* make sure the SPQE is updated before the doorbell */
  187. wmb();
  188. DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
  189. /* make sure doorbell is rang */
  190. wmb();
  191. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  192. "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
  193. qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
  194. p_spq->cid, db.params, db.agg_flags,
  195. qed_chain_get_prod_idx(p_chain));
  196. return 0;
  197. }
  198. /***************************************************************************
  199. * Asynchronous events
  200. ***************************************************************************/
  201. static int
  202. qed_async_event_completion(struct qed_hwfn *p_hwfn,
  203. struct event_ring_entry *p_eqe)
  204. {
  205. switch (p_eqe->protocol_id) {
  206. case PROTOCOLID_COMMON:
  207. return qed_sriov_eqe_event(p_hwfn,
  208. p_eqe->opcode,
  209. p_eqe->echo, &p_eqe->data);
  210. default:
  211. DP_NOTICE(p_hwfn,
  212. "Unknown Async completion for protocol: %d\n",
  213. p_eqe->protocol_id);
  214. return -EINVAL;
  215. }
  216. }
  217. /***************************************************************************
  218. * EQ API
  219. ***************************************************************************/
  220. void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
  221. u16 prod)
  222. {
  223. u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
  224. USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
  225. REG_WR16(p_hwfn, addr, prod);
  226. /* keep prod updates ordered */
  227. mmiowb();
  228. }
  229. int qed_eq_completion(struct qed_hwfn *p_hwfn,
  230. void *cookie)
  231. {
  232. struct qed_eq *p_eq = cookie;
  233. struct qed_chain *p_chain = &p_eq->chain;
  234. int rc = 0;
  235. /* take a snapshot of the FW consumer */
  236. u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
  237. DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
  238. /* Need to guarantee the fw_cons index we use points to a usuable
  239. * element (to comply with our chain), so our macros would comply
  240. */
  241. if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
  242. qed_chain_get_usable_per_page(p_chain))
  243. fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
  244. /* Complete current segment of eq entries */
  245. while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
  246. struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
  247. if (!p_eqe) {
  248. rc = -EINVAL;
  249. break;
  250. }
  251. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  252. "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
  253. p_eqe->opcode,
  254. p_eqe->protocol_id,
  255. p_eqe->reserved0,
  256. le16_to_cpu(p_eqe->echo),
  257. p_eqe->fw_return_code,
  258. p_eqe->flags);
  259. if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
  260. if (qed_async_event_completion(p_hwfn, p_eqe))
  261. rc = -EINVAL;
  262. } else if (qed_spq_completion(p_hwfn,
  263. p_eqe->echo,
  264. p_eqe->fw_return_code,
  265. &p_eqe->data)) {
  266. rc = -EINVAL;
  267. }
  268. qed_chain_recycle_consumed(p_chain);
  269. }
  270. qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
  271. return rc;
  272. }
  273. struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
  274. u16 num_elem)
  275. {
  276. struct qed_eq *p_eq;
  277. /* Allocate EQ struct */
  278. p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
  279. if (!p_eq) {
  280. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
  281. return NULL;
  282. }
  283. /* Allocate and initialize EQ chain*/
  284. if (qed_chain_alloc(p_hwfn->cdev,
  285. QED_CHAIN_USE_TO_PRODUCE,
  286. QED_CHAIN_MODE_PBL,
  287. num_elem,
  288. sizeof(union event_ring_element),
  289. &p_eq->chain)) {
  290. DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
  291. goto eq_allocate_fail;
  292. }
  293. /* register EQ completion on the SP SB */
  294. qed_int_register_cb(p_hwfn,
  295. qed_eq_completion,
  296. p_eq,
  297. &p_eq->eq_sb_index,
  298. &p_eq->p_fw_cons);
  299. return p_eq;
  300. eq_allocate_fail:
  301. qed_eq_free(p_hwfn, p_eq);
  302. return NULL;
  303. }
  304. void qed_eq_setup(struct qed_hwfn *p_hwfn,
  305. struct qed_eq *p_eq)
  306. {
  307. qed_chain_reset(&p_eq->chain);
  308. }
  309. void qed_eq_free(struct qed_hwfn *p_hwfn,
  310. struct qed_eq *p_eq)
  311. {
  312. if (!p_eq)
  313. return;
  314. qed_chain_free(p_hwfn->cdev, &p_eq->chain);
  315. kfree(p_eq);
  316. }
  317. /***************************************************************************
  318. * CQE API - manipulate EQ functionality
  319. ***************************************************************************/
  320. static int qed_cqe_completion(
  321. struct qed_hwfn *p_hwfn,
  322. struct eth_slow_path_rx_cqe *cqe,
  323. enum protocol_type protocol)
  324. {
  325. if (IS_VF(p_hwfn->cdev))
  326. return 0;
  327. /* @@@tmp - it's possible we'll eventually want to handle some
  328. * actual commands that can arrive here, but for now this is only
  329. * used to complete the ramrod using the echo value on the cqe
  330. */
  331. return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
  332. }
  333. int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
  334. struct eth_slow_path_rx_cqe *cqe)
  335. {
  336. int rc;
  337. rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
  338. if (rc)
  339. DP_NOTICE(p_hwfn,
  340. "Failed to handle RXQ CQE [cmd 0x%02x]\n",
  341. cqe->ramrod_cmd_id);
  342. return rc;
  343. }
  344. /***************************************************************************
  345. * Slow hwfn Queue (spq)
  346. ***************************************************************************/
  347. void qed_spq_setup(struct qed_hwfn *p_hwfn)
  348. {
  349. struct qed_spq *p_spq = p_hwfn->p_spq;
  350. struct qed_spq_entry *p_virt = NULL;
  351. dma_addr_t p_phys = 0;
  352. unsigned int i = 0;
  353. INIT_LIST_HEAD(&p_spq->pending);
  354. INIT_LIST_HEAD(&p_spq->completion_pending);
  355. INIT_LIST_HEAD(&p_spq->free_pool);
  356. INIT_LIST_HEAD(&p_spq->unlimited_pending);
  357. spin_lock_init(&p_spq->lock);
  358. /* SPQ empty pool */
  359. p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
  360. p_virt = p_spq->p_virt;
  361. for (i = 0; i < p_spq->chain.capacity; i++) {
  362. DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
  363. list_add_tail(&p_virt->list, &p_spq->free_pool);
  364. p_virt++;
  365. p_phys += sizeof(struct qed_spq_entry);
  366. }
  367. /* Statistics */
  368. p_spq->normal_count = 0;
  369. p_spq->comp_count = 0;
  370. p_spq->comp_sent_count = 0;
  371. p_spq->unlimited_pending_count = 0;
  372. bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
  373. p_spq->comp_bitmap_idx = 0;
  374. /* SPQ cid, cannot fail */
  375. qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
  376. qed_spq_hw_initialize(p_hwfn, p_spq);
  377. /* reset the chain itself */
  378. qed_chain_reset(&p_spq->chain);
  379. }
  380. int qed_spq_alloc(struct qed_hwfn *p_hwfn)
  381. {
  382. struct qed_spq *p_spq = NULL;
  383. dma_addr_t p_phys = 0;
  384. struct qed_spq_entry *p_virt = NULL;
  385. /* SPQ struct */
  386. p_spq =
  387. kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
  388. if (!p_spq) {
  389. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
  390. return -ENOMEM;
  391. }
  392. /* SPQ ring */
  393. if (qed_chain_alloc(p_hwfn->cdev,
  394. QED_CHAIN_USE_TO_PRODUCE,
  395. QED_CHAIN_MODE_SINGLE,
  396. 0, /* N/A when the mode is SINGLE */
  397. sizeof(struct slow_path_element),
  398. &p_spq->chain)) {
  399. DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
  400. goto spq_allocate_fail;
  401. }
  402. /* allocate and fill the SPQ elements (incl. ramrod data list) */
  403. p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  404. p_spq->chain.capacity *
  405. sizeof(struct qed_spq_entry),
  406. &p_phys,
  407. GFP_KERNEL);
  408. if (!p_virt)
  409. goto spq_allocate_fail;
  410. p_spq->p_virt = p_virt;
  411. p_spq->p_phys = p_phys;
  412. p_hwfn->p_spq = p_spq;
  413. return 0;
  414. spq_allocate_fail:
  415. qed_chain_free(p_hwfn->cdev, &p_spq->chain);
  416. kfree(p_spq);
  417. return -ENOMEM;
  418. }
  419. void qed_spq_free(struct qed_hwfn *p_hwfn)
  420. {
  421. struct qed_spq *p_spq = p_hwfn->p_spq;
  422. if (!p_spq)
  423. return;
  424. if (p_spq->p_virt)
  425. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  426. p_spq->chain.capacity *
  427. sizeof(struct qed_spq_entry),
  428. p_spq->p_virt,
  429. p_spq->p_phys);
  430. qed_chain_free(p_hwfn->cdev, &p_spq->chain);
  431. ;
  432. kfree(p_spq);
  433. }
  434. int
  435. qed_spq_get_entry(struct qed_hwfn *p_hwfn,
  436. struct qed_spq_entry **pp_ent)
  437. {
  438. struct qed_spq *p_spq = p_hwfn->p_spq;
  439. struct qed_spq_entry *p_ent = NULL;
  440. int rc = 0;
  441. spin_lock_bh(&p_spq->lock);
  442. if (list_empty(&p_spq->free_pool)) {
  443. p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
  444. if (!p_ent) {
  445. rc = -ENOMEM;
  446. goto out_unlock;
  447. }
  448. p_ent->queue = &p_spq->unlimited_pending;
  449. } else {
  450. p_ent = list_first_entry(&p_spq->free_pool,
  451. struct qed_spq_entry,
  452. list);
  453. list_del(&p_ent->list);
  454. p_ent->queue = &p_spq->pending;
  455. }
  456. *pp_ent = p_ent;
  457. out_unlock:
  458. spin_unlock_bh(&p_spq->lock);
  459. return rc;
  460. }
  461. /* Locked variant; Should be called while the SPQ lock is taken */
  462. static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
  463. struct qed_spq_entry *p_ent)
  464. {
  465. list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
  466. }
  467. void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
  468. struct qed_spq_entry *p_ent)
  469. {
  470. spin_lock_bh(&p_hwfn->p_spq->lock);
  471. __qed_spq_return_entry(p_hwfn, p_ent);
  472. spin_unlock_bh(&p_hwfn->p_spq->lock);
  473. }
  474. /**
  475. * @brief qed_spq_add_entry - adds a new entry to the pending
  476. * list. Should be used while lock is being held.
  477. *
  478. * Addes an entry to the pending list is there is room (en empty
  479. * element is available in the free_pool), or else places the
  480. * entry in the unlimited_pending pool.
  481. *
  482. * @param p_hwfn
  483. * @param p_ent
  484. * @param priority
  485. *
  486. * @return int
  487. */
  488. static int
  489. qed_spq_add_entry(struct qed_hwfn *p_hwfn,
  490. struct qed_spq_entry *p_ent,
  491. enum spq_priority priority)
  492. {
  493. struct qed_spq *p_spq = p_hwfn->p_spq;
  494. if (p_ent->queue == &p_spq->unlimited_pending) {
  495. if (list_empty(&p_spq->free_pool)) {
  496. list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
  497. p_spq->unlimited_pending_count++;
  498. return 0;
  499. } else {
  500. struct qed_spq_entry *p_en2;
  501. p_en2 = list_first_entry(&p_spq->free_pool,
  502. struct qed_spq_entry,
  503. list);
  504. list_del(&p_en2->list);
  505. /* Copy the ring element physical pointer to the new
  506. * entry, since we are about to override the entire ring
  507. * entry and don't want to lose the pointer.
  508. */
  509. p_ent->elem.data_ptr = p_en2->elem.data_ptr;
  510. *p_en2 = *p_ent;
  511. /* EBLOCK responsible to free the allocated p_ent */
  512. if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
  513. kfree(p_ent);
  514. p_ent = p_en2;
  515. }
  516. }
  517. /* entry is to be placed in 'pending' queue */
  518. switch (priority) {
  519. case QED_SPQ_PRIORITY_NORMAL:
  520. list_add_tail(&p_ent->list, &p_spq->pending);
  521. p_spq->normal_count++;
  522. break;
  523. case QED_SPQ_PRIORITY_HIGH:
  524. list_add(&p_ent->list, &p_spq->pending);
  525. p_spq->high_count++;
  526. break;
  527. default:
  528. return -EINVAL;
  529. }
  530. return 0;
  531. }
  532. /***************************************************************************
  533. * Accessor
  534. ***************************************************************************/
  535. u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
  536. {
  537. if (!p_hwfn->p_spq)
  538. return 0xffffffff; /* illegal */
  539. return p_hwfn->p_spq->cid;
  540. }
  541. /***************************************************************************
  542. * Posting new Ramrods
  543. ***************************************************************************/
  544. static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
  545. struct list_head *head,
  546. u32 keep_reserve)
  547. {
  548. struct qed_spq *p_spq = p_hwfn->p_spq;
  549. int rc;
  550. while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
  551. !list_empty(head)) {
  552. struct qed_spq_entry *p_ent =
  553. list_first_entry(head, struct qed_spq_entry, list);
  554. list_del(&p_ent->list);
  555. list_add_tail(&p_ent->list, &p_spq->completion_pending);
  556. p_spq->comp_sent_count++;
  557. rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
  558. if (rc) {
  559. list_del(&p_ent->list);
  560. __qed_spq_return_entry(p_hwfn, p_ent);
  561. return rc;
  562. }
  563. }
  564. return 0;
  565. }
  566. static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
  567. {
  568. struct qed_spq *p_spq = p_hwfn->p_spq;
  569. struct qed_spq_entry *p_ent = NULL;
  570. while (!list_empty(&p_spq->free_pool)) {
  571. if (list_empty(&p_spq->unlimited_pending))
  572. break;
  573. p_ent = list_first_entry(&p_spq->unlimited_pending,
  574. struct qed_spq_entry,
  575. list);
  576. if (!p_ent)
  577. return -EINVAL;
  578. list_del(&p_ent->list);
  579. qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
  580. }
  581. return qed_spq_post_list(p_hwfn, &p_spq->pending,
  582. SPQ_HIGH_PRI_RESERVE_DEFAULT);
  583. }
  584. int qed_spq_post(struct qed_hwfn *p_hwfn,
  585. struct qed_spq_entry *p_ent,
  586. u8 *fw_return_code)
  587. {
  588. int rc = 0;
  589. struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
  590. bool b_ret_ent = true;
  591. if (!p_hwfn)
  592. return -EINVAL;
  593. if (!p_ent) {
  594. DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
  595. return -EINVAL;
  596. }
  597. /* Complete the entry */
  598. rc = qed_spq_fill_entry(p_hwfn, p_ent);
  599. spin_lock_bh(&p_spq->lock);
  600. /* Check return value after LOCK is taken for cleaner error flow */
  601. if (rc)
  602. goto spq_post_fail;
  603. /* Add the request to the pending queue */
  604. rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
  605. if (rc)
  606. goto spq_post_fail;
  607. rc = qed_spq_pend_post(p_hwfn);
  608. if (rc) {
  609. /* Since it's possible that pending failed for a different
  610. * entry [although unlikely], the failed entry was already
  611. * dealt with; No need to return it here.
  612. */
  613. b_ret_ent = false;
  614. goto spq_post_fail;
  615. }
  616. spin_unlock_bh(&p_spq->lock);
  617. if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
  618. /* For entries in QED BLOCK mode, the completion code cannot
  619. * perform the necessary cleanup - if it did, we couldn't
  620. * access p_ent here to see whether it's successful or not.
  621. * Thus, after gaining the answer perform the cleanup here.
  622. */
  623. rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
  624. if (p_ent->queue == &p_spq->unlimited_pending) {
  625. /* This is an allocated p_ent which does not need to
  626. * return to pool.
  627. */
  628. kfree(p_ent);
  629. return rc;
  630. }
  631. if (rc)
  632. goto spq_post_fail2;
  633. /* return to pool */
  634. qed_spq_return_entry(p_hwfn, p_ent);
  635. }
  636. return rc;
  637. spq_post_fail2:
  638. spin_lock_bh(&p_spq->lock);
  639. list_del(&p_ent->list);
  640. qed_chain_return_produced(&p_spq->chain);
  641. spq_post_fail:
  642. /* return to the free pool */
  643. if (b_ret_ent)
  644. __qed_spq_return_entry(p_hwfn, p_ent);
  645. spin_unlock_bh(&p_spq->lock);
  646. return rc;
  647. }
  648. int qed_spq_completion(struct qed_hwfn *p_hwfn,
  649. __le16 echo,
  650. u8 fw_return_code,
  651. union event_ring_data *p_data)
  652. {
  653. struct qed_spq *p_spq;
  654. struct qed_spq_entry *p_ent = NULL;
  655. struct qed_spq_entry *tmp;
  656. struct qed_spq_entry *found = NULL;
  657. int rc;
  658. if (!p_hwfn)
  659. return -EINVAL;
  660. p_spq = p_hwfn->p_spq;
  661. if (!p_spq)
  662. return -EINVAL;
  663. spin_lock_bh(&p_spq->lock);
  664. list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
  665. list) {
  666. if (p_ent->elem.hdr.echo == echo) {
  667. u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
  668. list_del(&p_ent->list);
  669. /* Avoid overriding of SPQ entries when getting
  670. * out-of-order completions, by marking the completions
  671. * in a bitmap and increasing the chain consumer only
  672. * for the first successive completed entries.
  673. */
  674. bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
  675. while (test_bit(p_spq->comp_bitmap_idx,
  676. p_spq->p_comp_bitmap)) {
  677. bitmap_clear(p_spq->p_comp_bitmap,
  678. p_spq->comp_bitmap_idx,
  679. SPQ_RING_SIZE);
  680. p_spq->comp_bitmap_idx++;
  681. qed_chain_return_produced(&p_spq->chain);
  682. }
  683. p_spq->comp_count++;
  684. found = p_ent;
  685. break;
  686. }
  687. /* This is relatively uncommon - depends on scenarios
  688. * which have mutliple per-PF sent ramrods.
  689. */
  690. DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  691. "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
  692. le16_to_cpu(echo),
  693. le16_to_cpu(p_ent->elem.hdr.echo));
  694. }
  695. /* Release lock before callback, as callback may post
  696. * an additional ramrod.
  697. */
  698. spin_unlock_bh(&p_spq->lock);
  699. if (!found) {
  700. DP_NOTICE(p_hwfn,
  701. "Failed to find an entry this EQE completes\n");
  702. return -EEXIST;
  703. }
  704. DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
  705. p_ent->comp_cb.function, p_ent->comp_cb.cookie);
  706. if (found->comp_cb.function)
  707. found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
  708. fw_return_code);
  709. if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
  710. (found->queue == &p_spq->unlimited_pending))
  711. /* EBLOCK is responsible for returning its own entry into the
  712. * free list, unless it originally added the entry into the
  713. * unlimited pending list.
  714. */
  715. qed_spq_return_entry(p_hwfn, found);
  716. /* Attempt to post pending requests */
  717. spin_lock_bh(&p_spq->lock);
  718. rc = qed_spq_pend_post(p_hwfn);
  719. spin_unlock_bh(&p_spq->lock);
  720. return rc;
  721. }
  722. struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
  723. {
  724. struct qed_consq *p_consq;
  725. /* Allocate ConsQ struct */
  726. p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
  727. if (!p_consq) {
  728. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
  729. return NULL;
  730. }
  731. /* Allocate and initialize EQ chain*/
  732. if (qed_chain_alloc(p_hwfn->cdev,
  733. QED_CHAIN_USE_TO_PRODUCE,
  734. QED_CHAIN_MODE_PBL,
  735. QED_CHAIN_PAGE_SIZE / 0x80,
  736. 0x80,
  737. &p_consq->chain)) {
  738. DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
  739. goto consq_allocate_fail;
  740. }
  741. return p_consq;
  742. consq_allocate_fail:
  743. qed_consq_free(p_hwfn, p_consq);
  744. return NULL;
  745. }
  746. void qed_consq_setup(struct qed_hwfn *p_hwfn,
  747. struct qed_consq *p_consq)
  748. {
  749. qed_chain_reset(&p_consq->chain);
  750. }
  751. void qed_consq_free(struct qed_hwfn *p_hwfn,
  752. struct qed_consq *p_consq)
  753. {
  754. if (!p_consq)
  755. return;
  756. qed_chain_free(p_hwfn->cdev, &p_consq->chain);
  757. kfree(p_consq);
  758. }