nitrox_reqmgr.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/gfp.h>
  3. #include <linux/workqueue.h>
  4. #include <crypto/internal/skcipher.h>
  5. #include "nitrox_dev.h"
  6. #include "nitrox_req.h"
  7. #include "nitrox_csr.h"
  8. /* SLC_STORE_INFO */
  9. #define MIN_UDD_LEN 16
  10. /* PKT_IN_HDR + SLC_STORE_INFO */
  11. #define FDATA_SIZE 32
  12. /* Base destination port for the solicited requests */
  13. #define SOLICIT_BASE_DPORT 256
  14. #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
  15. #define REQ_NOT_POSTED 1
  16. #define REQ_BACKLOG 2
  17. #define REQ_POSTED 3
  18. /**
  19. * Response codes from SE microcode
  20. * 0x00 - Success
  21. * Completion with no error
  22. * 0x43 - ERR_GC_DATA_LEN_INVALID
  23. * Invalid Data length if Encryption Data length is
  24. * less than 16 bytes for AES-XTS and AES-CTS.
  25. * 0x45 - ERR_GC_CTX_LEN_INVALID
  26. * Invalid context length: CTXL != 23 words.
  27. * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
  28. * DOCSIS support is enabled with other than
  29. * AES/DES-CBC mode encryption.
  30. * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
  31. * Authentication offset is other than 0 with
  32. * Encryption IV source = 0.
  33. * Authentication offset is other than 8 (DES)/16 (AES)
  34. * with Encryption IV source = 1
  35. * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
  36. * CRC32 is enabled for other than DOCSIS encryption.
  37. * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
  38. * Invalid flag options in AES-CCM IV.
  39. */
  40. static inline int incr_index(int index, int count, int max)
  41. {
  42. if ((index + count) >= max)
  43. index = index + count - max;
  44. else
  45. index += count;
  46. return index;
  47. }
  48. /**
  49. * dma_free_sglist - unmap and free the sg lists.
  50. * @ndev: N5 device
  51. * @sgtbl: SG table
  52. */
  53. static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
  54. {
  55. struct nitrox_device *ndev = sr->ndev;
  56. struct device *dev = DEV(ndev);
  57. struct nitrox_sglist *sglist;
  58. /* unmap in sgbuf */
  59. sglist = sr->in.sglist;
  60. if (!sglist)
  61. goto out_unmap;
  62. /* unmap iv */
  63. dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
  64. /* unmpa src sglist */
  65. dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
  66. /* unamp gather component */
  67. dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
  68. kfree(sr->in.sglist);
  69. kfree(sr->in.sgcomp);
  70. sr->in.sglist = NULL;
  71. sr->in.buf = NULL;
  72. sr->in.map_bufs_cnt = 0;
  73. out_unmap:
  74. /* unmap out sgbuf */
  75. sglist = sr->out.sglist;
  76. if (!sglist)
  77. return;
  78. /* unmap orh */
  79. dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
  80. /* unmap dst sglist */
  81. if (!sr->inplace) {
  82. dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
  83. sr->out.dir);
  84. }
  85. /* unmap completion */
  86. dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
  87. /* unmap scatter component */
  88. dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
  89. kfree(sr->out.sglist);
  90. kfree(sr->out.sgcomp);
  91. sr->out.sglist = NULL;
  92. sr->out.buf = NULL;
  93. sr->out.map_bufs_cnt = 0;
  94. }
  95. static void softreq_destroy(struct nitrox_softreq *sr)
  96. {
  97. softreq_unmap_sgbufs(sr);
  98. kfree(sr);
  99. }
  100. /**
  101. * create_sg_component - create SG componets for N5 device.
  102. * @sr: Request structure
  103. * @sgtbl: SG table
  104. * @nr_comp: total number of components required
  105. *
  106. * Component structure
  107. *
  108. * 63 48 47 32 31 16 15 0
  109. * --------------------------------------
  110. * | LEN0 | LEN1 | LEN2 | LEN3 |
  111. * |-------------------------------------
  112. * | PTR0 |
  113. * --------------------------------------
  114. * | PTR1 |
  115. * --------------------------------------
  116. * | PTR2 |
  117. * --------------------------------------
  118. * | PTR3 |
  119. * --------------------------------------
  120. *
  121. * Returns 0 if success or a negative errno code on error.
  122. */
  123. static int create_sg_component(struct nitrox_softreq *sr,
  124. struct nitrox_sgtable *sgtbl, int map_nents)
  125. {
  126. struct nitrox_device *ndev = sr->ndev;
  127. struct nitrox_sgcomp *sgcomp;
  128. struct nitrox_sglist *sglist;
  129. dma_addr_t dma;
  130. size_t sz_comp;
  131. int i, j, nr_sgcomp;
  132. nr_sgcomp = roundup(map_nents, 4) / 4;
  133. /* each component holds 4 dma pointers */
  134. sz_comp = nr_sgcomp * sizeof(*sgcomp);
  135. sgcomp = kzalloc(sz_comp, sr->gfp);
  136. if (!sgcomp)
  137. return -ENOMEM;
  138. sgtbl->sgcomp = sgcomp;
  139. sgtbl->nr_sgcomp = nr_sgcomp;
  140. sglist = sgtbl->sglist;
  141. /* populate device sg component */
  142. for (i = 0; i < nr_sgcomp; i++) {
  143. for (j = 0; j < 4; j++) {
  144. sgcomp->len[j] = cpu_to_be16(sglist->len);
  145. sgcomp->dma[j] = cpu_to_be64(sglist->dma);
  146. sglist++;
  147. }
  148. sgcomp++;
  149. }
  150. /* map the device sg component */
  151. dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
  152. if (dma_mapping_error(DEV(ndev), dma)) {
  153. kfree(sgtbl->sgcomp);
  154. sgtbl->sgcomp = NULL;
  155. return -ENOMEM;
  156. }
  157. sgtbl->dma = dma;
  158. sgtbl->len = sz_comp;
  159. return 0;
  160. }
  161. /**
  162. * dma_map_inbufs - DMA map input sglist and creates sglist component
  163. * for N5 device.
  164. * @sr: Request structure
  165. * @req: Crypto request structre
  166. *
  167. * Returns 0 if successful or a negative errno code on error.
  168. */
  169. static int dma_map_inbufs(struct nitrox_softreq *sr,
  170. struct se_crypto_request *req)
  171. {
  172. struct device *dev = DEV(sr->ndev);
  173. struct scatterlist *sg = req->src;
  174. struct nitrox_sglist *glist;
  175. int i, nents, ret = 0;
  176. dma_addr_t dma;
  177. size_t sz;
  178. nents = sg_nents(req->src);
  179. /* creater gather list IV and src entries */
  180. sz = roundup((1 + nents), 4) * sizeof(*glist);
  181. glist = kzalloc(sz, sr->gfp);
  182. if (!glist)
  183. return -ENOMEM;
  184. sr->in.sglist = glist;
  185. /* map IV */
  186. dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
  187. if (dma_mapping_error(dev, dma)) {
  188. ret = -EINVAL;
  189. goto iv_map_err;
  190. }
  191. sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
  192. /* map src entries */
  193. nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
  194. if (!nents) {
  195. ret = -EINVAL;
  196. goto src_map_err;
  197. }
  198. sr->in.buf = req->src;
  199. /* store the mappings */
  200. glist->len = req->ivsize;
  201. glist->dma = dma;
  202. glist++;
  203. sr->in.total_bytes += req->ivsize;
  204. for_each_sg(req->src, sg, nents, i) {
  205. glist->len = sg_dma_len(sg);
  206. glist->dma = sg_dma_address(sg);
  207. sr->in.total_bytes += glist->len;
  208. glist++;
  209. }
  210. /* roundup map count to align with entires in sg component */
  211. sr->in.map_bufs_cnt = (1 + nents);
  212. /* create NITROX gather component */
  213. ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
  214. if (ret)
  215. goto incomp_err;
  216. return 0;
  217. incomp_err:
  218. dma_unmap_sg(dev, req->src, nents, sr->in.dir);
  219. sr->in.map_bufs_cnt = 0;
  220. src_map_err:
  221. dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
  222. iv_map_err:
  223. kfree(sr->in.sglist);
  224. sr->in.sglist = NULL;
  225. return ret;
  226. }
  227. static int dma_map_outbufs(struct nitrox_softreq *sr,
  228. struct se_crypto_request *req)
  229. {
  230. struct device *dev = DEV(sr->ndev);
  231. struct nitrox_sglist *glist = sr->in.sglist;
  232. struct nitrox_sglist *slist;
  233. struct scatterlist *sg;
  234. int i, nents, map_bufs_cnt, ret = 0;
  235. size_t sz;
  236. nents = sg_nents(req->dst);
  237. /* create scatter list ORH, IV, dst entries and Completion header */
  238. sz = roundup((3 + nents), 4) * sizeof(*slist);
  239. slist = kzalloc(sz, sr->gfp);
  240. if (!slist)
  241. return -ENOMEM;
  242. sr->out.sglist = slist;
  243. sr->out.dir = DMA_BIDIRECTIONAL;
  244. /* map ORH */
  245. sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
  246. sr->out.dir);
  247. if (dma_mapping_error(dev, sr->resp.orh_dma)) {
  248. ret = -EINVAL;
  249. goto orh_map_err;
  250. }
  251. /* map completion */
  252. sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
  253. COMP_HLEN, sr->out.dir);
  254. if (dma_mapping_error(dev, sr->resp.completion_dma)) {
  255. ret = -EINVAL;
  256. goto compl_map_err;
  257. }
  258. sr->inplace = (req->src == req->dst) ? true : false;
  259. /* out place */
  260. if (!sr->inplace) {
  261. nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
  262. if (!nents) {
  263. ret = -EINVAL;
  264. goto dst_map_err;
  265. }
  266. }
  267. sr->out.buf = req->dst;
  268. /* store the mappings */
  269. /* orh */
  270. slist->len = ORH_HLEN;
  271. slist->dma = sr->resp.orh_dma;
  272. slist++;
  273. /* copy the glist mappings */
  274. if (sr->inplace) {
  275. nents = sr->in.map_bufs_cnt - 1;
  276. map_bufs_cnt = sr->in.map_bufs_cnt;
  277. while (map_bufs_cnt--) {
  278. slist->len = glist->len;
  279. slist->dma = glist->dma;
  280. slist++;
  281. glist++;
  282. }
  283. } else {
  284. /* copy iv mapping */
  285. slist->len = glist->len;
  286. slist->dma = glist->dma;
  287. slist++;
  288. /* copy remaining maps */
  289. for_each_sg(req->dst, sg, nents, i) {
  290. slist->len = sg_dma_len(sg);
  291. slist->dma = sg_dma_address(sg);
  292. slist++;
  293. }
  294. }
  295. /* completion */
  296. slist->len = COMP_HLEN;
  297. slist->dma = sr->resp.completion_dma;
  298. sr->out.map_bufs_cnt = (3 + nents);
  299. ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
  300. if (ret)
  301. goto outcomp_map_err;
  302. return 0;
  303. outcomp_map_err:
  304. if (!sr->inplace)
  305. dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
  306. sr->out.map_bufs_cnt = 0;
  307. sr->out.buf = NULL;
  308. dst_map_err:
  309. dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
  310. sr->resp.completion_dma = 0;
  311. compl_map_err:
  312. dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
  313. sr->resp.orh_dma = 0;
  314. orh_map_err:
  315. kfree(sr->out.sglist);
  316. sr->out.sglist = NULL;
  317. return ret;
  318. }
  319. static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
  320. struct se_crypto_request *creq)
  321. {
  322. int ret;
  323. ret = dma_map_inbufs(sr, creq);
  324. if (ret)
  325. return ret;
  326. ret = dma_map_outbufs(sr, creq);
  327. if (ret)
  328. softreq_unmap_sgbufs(sr);
  329. return ret;
  330. }
  331. static inline void backlog_list_add(struct nitrox_softreq *sr,
  332. struct nitrox_cmdq *cmdq)
  333. {
  334. INIT_LIST_HEAD(&sr->backlog);
  335. spin_lock_bh(&cmdq->backlog_qlock);
  336. list_add_tail(&sr->backlog, &cmdq->backlog_head);
  337. atomic_inc(&cmdq->backlog_count);
  338. atomic_set(&sr->status, REQ_BACKLOG);
  339. spin_unlock_bh(&cmdq->backlog_qlock);
  340. }
  341. static inline void response_list_add(struct nitrox_softreq *sr,
  342. struct nitrox_cmdq *cmdq)
  343. {
  344. INIT_LIST_HEAD(&sr->response);
  345. spin_lock_bh(&cmdq->resp_qlock);
  346. list_add_tail(&sr->response, &cmdq->response_head);
  347. spin_unlock_bh(&cmdq->resp_qlock);
  348. }
  349. static inline void response_list_del(struct nitrox_softreq *sr,
  350. struct nitrox_cmdq *cmdq)
  351. {
  352. spin_lock_bh(&cmdq->resp_qlock);
  353. list_del(&sr->response);
  354. spin_unlock_bh(&cmdq->resp_qlock);
  355. }
  356. static struct nitrox_softreq *
  357. get_first_response_entry(struct nitrox_cmdq *cmdq)
  358. {
  359. return list_first_entry_or_null(&cmdq->response_head,
  360. struct nitrox_softreq, response);
  361. }
  362. static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
  363. {
  364. if (atomic_inc_return(&cmdq->pending_count) > qlen) {
  365. atomic_dec(&cmdq->pending_count);
  366. /* sync with other cpus */
  367. smp_mb__after_atomic();
  368. return true;
  369. }
  370. return false;
  371. }
  372. /**
  373. * post_se_instr - Post SE instruction to Packet Input ring
  374. * @sr: Request structure
  375. *
  376. * Returns 0 if successful or a negative error code,
  377. * if no space in ring.
  378. */
  379. static void post_se_instr(struct nitrox_softreq *sr,
  380. struct nitrox_cmdq *cmdq)
  381. {
  382. struct nitrox_device *ndev = sr->ndev;
  383. int idx;
  384. u8 *ent;
  385. spin_lock_bh(&cmdq->cmd_qlock);
  386. idx = cmdq->write_idx;
  387. /* copy the instruction */
  388. ent = cmdq->base + (idx * cmdq->instr_size);
  389. memcpy(ent, &sr->instr, cmdq->instr_size);
  390. atomic_set(&sr->status, REQ_POSTED);
  391. response_list_add(sr, cmdq);
  392. sr->tstamp = jiffies;
  393. /* flush the command queue updates */
  394. dma_wmb();
  395. /* Ring doorbell with count 1 */
  396. writeq(1, cmdq->dbell_csr_addr);
  397. /* orders the doorbell rings */
  398. mmiowb();
  399. cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
  400. spin_unlock_bh(&cmdq->cmd_qlock);
  401. /* increment the posted command count */
  402. atomic64_inc(&ndev->stats.posted);
  403. }
  404. static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
  405. {
  406. struct nitrox_device *ndev = cmdq->ndev;
  407. struct nitrox_softreq *sr, *tmp;
  408. int ret = 0;
  409. if (!atomic_read(&cmdq->backlog_count))
  410. return 0;
  411. spin_lock_bh(&cmdq->backlog_qlock);
  412. list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
  413. struct skcipher_request *skreq;
  414. /* submit until space available */
  415. if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
  416. ret = -ENOSPC;
  417. break;
  418. }
  419. /* delete from backlog list */
  420. list_del(&sr->backlog);
  421. atomic_dec(&cmdq->backlog_count);
  422. /* sync with other cpus */
  423. smp_mb__after_atomic();
  424. skreq = sr->skreq;
  425. /* post the command */
  426. post_se_instr(sr, cmdq);
  427. /* backlog requests are posted, wakeup with -EINPROGRESS */
  428. skcipher_request_complete(skreq, -EINPROGRESS);
  429. }
  430. spin_unlock_bh(&cmdq->backlog_qlock);
  431. return ret;
  432. }
  433. static int nitrox_enqueue_request(struct nitrox_softreq *sr)
  434. {
  435. struct nitrox_cmdq *cmdq = sr->cmdq;
  436. struct nitrox_device *ndev = sr->ndev;
  437. /* try to post backlog requests */
  438. post_backlog_cmds(cmdq);
  439. if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
  440. if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  441. /* increment drop count */
  442. atomic64_inc(&ndev->stats.dropped);
  443. return -ENOSPC;
  444. }
  445. /* add to backlog list */
  446. backlog_list_add(sr, cmdq);
  447. return -EBUSY;
  448. }
  449. post_se_instr(sr, cmdq);
  450. return -EINPROGRESS;
  451. }
  452. /**
  453. * nitrox_se_request - Send request to SE core
  454. * @ndev: NITROX device
  455. * @req: Crypto request
  456. *
  457. * Returns 0 on success, or a negative error code.
  458. */
  459. int nitrox_process_se_request(struct nitrox_device *ndev,
  460. struct se_crypto_request *req,
  461. completion_t callback,
  462. struct skcipher_request *skreq)
  463. {
  464. struct nitrox_softreq *sr;
  465. dma_addr_t ctx_handle = 0;
  466. int qno, ret = 0;
  467. if (!nitrox_ready(ndev))
  468. return -ENODEV;
  469. sr = kzalloc(sizeof(*sr), req->gfp);
  470. if (!sr)
  471. return -ENOMEM;
  472. sr->ndev = ndev;
  473. sr->flags = req->flags;
  474. sr->gfp = req->gfp;
  475. sr->callback = callback;
  476. sr->skreq = skreq;
  477. atomic_set(&sr->status, REQ_NOT_POSTED);
  478. WRITE_ONCE(sr->resp.orh, PENDING_SIG);
  479. WRITE_ONCE(sr->resp.completion, PENDING_SIG);
  480. ret = softreq_map_iobuf(sr, req);
  481. if (ret) {
  482. kfree(sr);
  483. return ret;
  484. }
  485. /* get the context handle */
  486. if (req->ctx_handle) {
  487. struct ctx_hdr *hdr;
  488. u8 *ctx_ptr;
  489. ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
  490. hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
  491. ctx_handle = hdr->ctx_dma;
  492. }
  493. /* select the queue */
  494. qno = smp_processor_id() % ndev->nr_queues;
  495. sr->cmdq = &ndev->pkt_inq[qno];
  496. /*
  497. * 64-Byte Instruction Format
  498. *
  499. * ----------------------
  500. * | DPTR0 | 8 bytes
  501. * ----------------------
  502. * | PKT_IN_INSTR_HDR | 8 bytes
  503. * ----------------------
  504. * | PKT_IN_HDR | 16 bytes
  505. * ----------------------
  506. * | SLC_INFO | 16 bytes
  507. * ----------------------
  508. * | Front data | 16 bytes
  509. * ----------------------
  510. */
  511. /* fill the packet instruction */
  512. /* word 0 */
  513. sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
  514. /* word 1 */
  515. sr->instr.ih.value = 0;
  516. sr->instr.ih.s.g = 1;
  517. sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
  518. sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
  519. sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
  520. sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
  521. sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
  522. /* word 2 */
  523. sr->instr.irh.value[0] = 0;
  524. sr->instr.irh.s.uddl = MIN_UDD_LEN;
  525. /* context length in 64-bit words */
  526. sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
  527. /* offset from solicit base port 256 */
  528. sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
  529. sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
  530. sr->instr.irh.s.arg = req->ctrl.s.arg;
  531. sr->instr.irh.s.opcode = req->opcode;
  532. sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
  533. /* word 3 */
  534. sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
  535. /* word 4 */
  536. sr->instr.slc.value[0] = 0;
  537. sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
  538. sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
  539. /* word 5 */
  540. sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
  541. /*
  542. * No conversion for front data,
  543. * It goes into payload
  544. * put GP Header in front data
  545. */
  546. sr->instr.fdata[0] = *((u64 *)&req->gph);
  547. sr->instr.fdata[1] = 0;
  548. ret = nitrox_enqueue_request(sr);
  549. if (ret == -ENOSPC)
  550. goto send_fail;
  551. return ret;
  552. send_fail:
  553. softreq_destroy(sr);
  554. return ret;
  555. }
  556. static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
  557. {
  558. return time_after_eq(jiffies, (tstamp + timeout));
  559. }
  560. void backlog_qflush_work(struct work_struct *work)
  561. {
  562. struct nitrox_cmdq *cmdq;
  563. cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
  564. post_backlog_cmds(cmdq);
  565. }
  566. /**
  567. * process_request_list - process completed requests
  568. * @ndev: N5 device
  569. * @qno: queue to operate
  570. *
  571. * Returns the number of responses processed.
  572. */
  573. static void process_response_list(struct nitrox_cmdq *cmdq)
  574. {
  575. struct nitrox_device *ndev = cmdq->ndev;
  576. struct nitrox_softreq *sr;
  577. struct skcipher_request *skreq;
  578. completion_t callback;
  579. int req_completed = 0, err = 0, budget;
  580. /* check all pending requests */
  581. budget = atomic_read(&cmdq->pending_count);
  582. while (req_completed < budget) {
  583. sr = get_first_response_entry(cmdq);
  584. if (!sr)
  585. break;
  586. if (atomic_read(&sr->status) != REQ_POSTED)
  587. break;
  588. /* check orh and completion bytes updates */
  589. if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
  590. /* request not completed, check for timeout */
  591. if (!cmd_timeout(sr->tstamp, ndev->timeout))
  592. break;
  593. dev_err_ratelimited(DEV(ndev),
  594. "Request timeout, orh 0x%016llx\n",
  595. READ_ONCE(sr->resp.orh));
  596. }
  597. atomic_dec(&cmdq->pending_count);
  598. atomic64_inc(&ndev->stats.completed);
  599. /* sync with other cpus */
  600. smp_mb__after_atomic();
  601. /* remove from response list */
  602. response_list_del(sr, cmdq);
  603. callback = sr->callback;
  604. skreq = sr->skreq;
  605. /* ORH error code */
  606. err = READ_ONCE(sr->resp.orh) & 0xff;
  607. softreq_destroy(sr);
  608. if (callback)
  609. callback(skreq, err);
  610. req_completed++;
  611. }
  612. }
  613. /**
  614. * pkt_slc_resp_tasklet - post processing of SE responses
  615. */
  616. void pkt_slc_resp_tasklet(unsigned long data)
  617. {
  618. struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
  619. struct nitrox_cmdq *cmdq = qvec->cmdq;
  620. union nps_pkt_slc_cnts slc_cnts;
  621. /* read completion count */
  622. slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
  623. /* resend the interrupt if more work to do */
  624. slc_cnts.s.resend = 1;
  625. process_response_list(cmdq);
  626. /*
  627. * clear the interrupt with resend bit enabled,
  628. * MSI-X interrupt generates if Completion count > Threshold
  629. */
  630. writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
  631. /* order the writes */
  632. mmiowb();
  633. if (atomic_read(&cmdq->backlog_count))
  634. schedule_work(&cmdq->backlog_qflush);
  635. }