nitrox_lib.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/cpumask.h>
  3. #include <linux/dma-mapping.h>
  4. #include <linux/dmapool.h>
  5. #include <linux/delay.h>
  6. #include <linux/gfp.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/pci_regs.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/pci.h>
  12. #include "nitrox_dev.h"
  13. #include "nitrox_common.h"
  14. #include "nitrox_req.h"
  15. #include "nitrox_csr.h"
  16. #define CRYPTO_CTX_SIZE 256
  17. /* command queue alignments */
  18. #define PKT_IN_ALIGN 16
  19. static int cmdq_common_init(struct nitrox_cmdq *cmdq)
  20. {
  21. struct nitrox_device *ndev = cmdq->ndev;
  22. u32 qsize;
  23. qsize = (ndev->qlen) * cmdq->instr_size;
  24. cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
  25. (qsize + PKT_IN_ALIGN),
  26. &cmdq->dma_unaligned,
  27. GFP_KERNEL);
  28. if (!cmdq->head_unaligned)
  29. return -ENOMEM;
  30. cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
  31. cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
  32. cmdq->qsize = (qsize + PKT_IN_ALIGN);
  33. cmdq->write_idx = 0;
  34. spin_lock_init(&cmdq->response_lock);
  35. spin_lock_init(&cmdq->cmdq_lock);
  36. spin_lock_init(&cmdq->backlog_lock);
  37. INIT_LIST_HEAD(&cmdq->response_head);
  38. INIT_LIST_HEAD(&cmdq->backlog_head);
  39. INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
  40. atomic_set(&cmdq->pending_count, 0);
  41. atomic_set(&cmdq->backlog_count, 0);
  42. return 0;
  43. }
  44. static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
  45. {
  46. struct nitrox_device *ndev = cmdq->ndev;
  47. cancel_work_sync(&cmdq->backlog_qflush);
  48. dma_free_coherent(DEV(ndev), cmdq->qsize,
  49. cmdq->head_unaligned, cmdq->dma_unaligned);
  50. atomic_set(&cmdq->pending_count, 0);
  51. atomic_set(&cmdq->backlog_count, 0);
  52. cmdq->dbell_csr_addr = NULL;
  53. cmdq->head = NULL;
  54. cmdq->dma = 0;
  55. cmdq->qsize = 0;
  56. cmdq->instr_size = 0;
  57. }
  58. static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
  59. {
  60. int i;
  61. for (i = 0; i < ndev->nr_queues; i++) {
  62. struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
  63. cmdq_common_cleanup(cmdq);
  64. }
  65. kfree(ndev->pkt_cmdqs);
  66. ndev->pkt_cmdqs = NULL;
  67. }
  68. static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
  69. {
  70. int i, err, size;
  71. size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
  72. ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
  73. if (!ndev->pkt_cmdqs)
  74. return -ENOMEM;
  75. for (i = 0; i < ndev->nr_queues; i++) {
  76. struct nitrox_cmdq *cmdq;
  77. u64 offset;
  78. cmdq = &ndev->pkt_cmdqs[i];
  79. cmdq->ndev = ndev;
  80. cmdq->qno = i;
  81. cmdq->instr_size = sizeof(struct nps_pkt_instr);
  82. offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
  83. /* SE ring doorbell address for this queue */
  84. cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
  85. err = cmdq_common_init(cmdq);
  86. if (err)
  87. goto pkt_cmdq_fail;
  88. }
  89. return 0;
  90. pkt_cmdq_fail:
  91. nitrox_cleanup_pkt_cmdqs(ndev);
  92. return err;
  93. }
  94. static int create_crypto_dma_pool(struct nitrox_device *ndev)
  95. {
  96. size_t size;
  97. /* Crypto context pool, 16 byte aligned */
  98. size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
  99. ndev->ctx_pool = dma_pool_create("crypto-context",
  100. DEV(ndev), size, 16, 0);
  101. if (!ndev->ctx_pool)
  102. return -ENOMEM;
  103. return 0;
  104. }
  105. static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
  106. {
  107. if (!ndev->ctx_pool)
  108. return;
  109. dma_pool_destroy(ndev->ctx_pool);
  110. ndev->ctx_pool = NULL;
  111. }
  112. /*
  113. * crypto_alloc_context - Allocate crypto context from pool
  114. * @ndev: NITROX Device
  115. */
  116. void *crypto_alloc_context(struct nitrox_device *ndev)
  117. {
  118. struct ctx_hdr *ctx;
  119. void *vaddr;
  120. dma_addr_t dma;
  121. vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
  122. if (!vaddr)
  123. return NULL;
  124. /* fill meta data */
  125. ctx = vaddr;
  126. ctx->pool = ndev->ctx_pool;
  127. ctx->dma = dma;
  128. ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
  129. return ((u8 *)vaddr + sizeof(struct ctx_hdr));
  130. }
  131. /**
  132. * crypto_free_context - Free crypto context to pool
  133. * @ctx: context to free
  134. */
  135. void crypto_free_context(void *ctx)
  136. {
  137. struct ctx_hdr *ctxp;
  138. if (!ctx)
  139. return;
  140. ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
  141. dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
  142. }
  143. /**
  144. * nitrox_common_sw_init - allocate software resources.
  145. * @ndev: NITROX device
  146. *
  147. * Allocates crypto context pools and command queues etc.
  148. *
  149. * Return: 0 on success, or a negative error code on error.
  150. */
  151. int nitrox_common_sw_init(struct nitrox_device *ndev)
  152. {
  153. int err = 0;
  154. /* per device crypto context pool */
  155. err = create_crypto_dma_pool(ndev);
  156. if (err)
  157. return err;
  158. err = nitrox_init_pkt_cmdqs(ndev);
  159. if (err)
  160. destroy_crypto_dma_pool(ndev);
  161. return err;
  162. }
  163. /**
  164. * nitrox_common_sw_cleanup - free software resources.
  165. * @ndev: NITROX device
  166. */
  167. void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
  168. {
  169. nitrox_cleanup_pkt_cmdqs(ndev);
  170. destroy_crypto_dma_pool(ndev);
  171. }