safexcel_ring.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2017 Marvell
  4. *
  5. * Antoine Tenart <antoine.tenart@free-electrons.com>
  6. */
  7. #include <linux/dma-mapping.h>
  8. #include <linux/spinlock.h>
  9. #include "safexcel.h"
  10. int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
  11. struct safexcel_desc_ring *cdr,
  12. struct safexcel_desc_ring *rdr)
  13. {
  14. cdr->offset = sizeof(u32) * priv->config.cd_offset;
  15. cdr->base = dmam_alloc_coherent(priv->dev,
  16. cdr->offset * EIP197_DEFAULT_RING_SIZE,
  17. &cdr->base_dma, GFP_KERNEL);
  18. if (!cdr->base)
  19. return -ENOMEM;
  20. cdr->write = cdr->base;
  21. cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
  22. cdr->read = cdr->base;
  23. rdr->offset = sizeof(u32) * priv->config.rd_offset;
  24. rdr->base = dmam_alloc_coherent(priv->dev,
  25. rdr->offset * EIP197_DEFAULT_RING_SIZE,
  26. &rdr->base_dma, GFP_KERNEL);
  27. if (!rdr->base)
  28. return -ENOMEM;
  29. rdr->write = rdr->base;
  30. rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
  31. rdr->read = rdr->base;
  32. return 0;
  33. }
  34. inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
  35. {
  36. return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
  37. }
  38. static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
  39. struct safexcel_desc_ring *ring)
  40. {
  41. void *ptr = ring->write;
  42. if ((ring->write == ring->read - ring->offset) ||
  43. (ring->read == ring->base && ring->write == ring->base_end))
  44. return ERR_PTR(-ENOMEM);
  45. if (ring->write == ring->base_end)
  46. ring->write = ring->base;
  47. else
  48. ring->write += ring->offset;
  49. return ptr;
  50. }
  51. void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
  52. struct safexcel_desc_ring *ring)
  53. {
  54. void *ptr = ring->read;
  55. if (ring->write == ring->read)
  56. return ERR_PTR(-ENOENT);
  57. if (ring->read == ring->base_end)
  58. ring->read = ring->base;
  59. else
  60. ring->read += ring->offset;
  61. return ptr;
  62. }
  63. inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
  64. int ring)
  65. {
  66. struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
  67. return rdr->read;
  68. }
  69. inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
  70. int ring)
  71. {
  72. struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
  73. return (rdr->read - rdr->base) / rdr->offset;
  74. }
  75. inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
  76. int ring,
  77. struct safexcel_result_desc *rdesc)
  78. {
  79. struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
  80. return ((void *)rdesc - rdr->base) / rdr->offset;
  81. }
  82. void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
  83. struct safexcel_desc_ring *ring)
  84. {
  85. if (ring->write == ring->read)
  86. return;
  87. if (ring->write == ring->base)
  88. ring->write = ring->base_end;
  89. else
  90. ring->write -= ring->offset;
  91. }
  92. struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
  93. int ring_id,
  94. bool first, bool last,
  95. dma_addr_t data, u32 data_len,
  96. u32 full_data_len,
  97. dma_addr_t context) {
  98. struct safexcel_command_desc *cdesc;
  99. int i;
  100. cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
  101. if (IS_ERR(cdesc))
  102. return cdesc;
  103. memset(cdesc, 0, sizeof(struct safexcel_command_desc));
  104. cdesc->first_seg = first;
  105. cdesc->last_seg = last;
  106. cdesc->particle_size = data_len;
  107. cdesc->data_lo = lower_32_bits(data);
  108. cdesc->data_hi = upper_32_bits(data);
  109. if (first && context) {
  110. struct safexcel_token *token =
  111. (struct safexcel_token *)cdesc->control_data.token;
  112. cdesc->control_data.packet_length = full_data_len;
  113. cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
  114. EIP197_OPTION_64BIT_CTX |
  115. EIP197_OPTION_CTX_CTRL_IN_CMD;
  116. cdesc->control_data.context_lo =
  117. (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
  118. cdesc->control_data.context_hi = upper_32_bits(context);
  119. /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
  120. cdesc->control_data.refresh = 2;
  121. for (i = 0; i < EIP197_MAX_TOKENS; i++)
  122. eip197_noop_token(&token[i]);
  123. }
  124. return cdesc;
  125. }
  126. struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
  127. int ring_id,
  128. bool first, bool last,
  129. dma_addr_t data, u32 len)
  130. {
  131. struct safexcel_result_desc *rdesc;
  132. rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
  133. if (IS_ERR(rdesc))
  134. return rdesc;
  135. memset(rdesc, 0, sizeof(struct safexcel_result_desc));
  136. rdesc->first_seg = first;
  137. rdesc->last_seg = last;
  138. rdesc->particle_size = len;
  139. rdesc->data_lo = lower_32_bits(data);
  140. rdesc->data_hi = upper_32_bits(data);
  141. return rdesc;
  142. }