n2_core.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267
  1. /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
  2. *
  3. * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/of.h>
  9. #include <linux/of_device.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/slab.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/crypto.h>
  14. #include <crypto/md5.h>
  15. #include <crypto/sha.h>
  16. #include <crypto/aes.h>
  17. #include <crypto/des.h>
  18. #include <linux/mutex.h>
  19. #include <linux/delay.h>
  20. #include <linux/sched.h>
  21. #include <crypto/internal/hash.h>
  22. #include <crypto/scatterwalk.h>
  23. #include <crypto/algapi.h>
  24. #include <asm/hypervisor.h>
  25. #include <asm/mdesc.h>
  26. #include "n2_core.h"
  27. #define DRV_MODULE_NAME "n2_crypto"
  28. #define DRV_MODULE_VERSION "0.2"
  29. #define DRV_MODULE_RELDATE "July 28, 2011"
  30. static char version[] =
  31. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  32. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  33. MODULE_DESCRIPTION("Niagara2 Crypto driver");
  34. MODULE_LICENSE("GPL");
  35. MODULE_VERSION(DRV_MODULE_VERSION);
  36. #define N2_CRA_PRIORITY 200
  37. static DEFINE_MUTEX(spu_lock);
  38. struct spu_queue {
  39. cpumask_t sharing;
  40. unsigned long qhandle;
  41. spinlock_t lock;
  42. u8 q_type;
  43. void *q;
  44. unsigned long head;
  45. unsigned long tail;
  46. struct list_head jobs;
  47. unsigned long devino;
  48. char irq_name[32];
  49. unsigned int irq;
  50. struct list_head list;
  51. };
  52. static struct spu_queue **cpu_to_cwq;
  53. static struct spu_queue **cpu_to_mau;
  54. static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
  55. {
  56. if (q->q_type == HV_NCS_QTYPE_MAU) {
  57. off += MAU_ENTRY_SIZE;
  58. if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
  59. off = 0;
  60. } else {
  61. off += CWQ_ENTRY_SIZE;
  62. if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
  63. off = 0;
  64. }
  65. return off;
  66. }
  67. struct n2_request_common {
  68. struct list_head entry;
  69. unsigned int offset;
  70. };
  71. #define OFFSET_NOT_RUNNING (~(unsigned int)0)
  72. /* An async job request records the final tail value it used in
  73. * n2_request_common->offset, test to see if that offset is in
  74. * the range old_head, new_head, inclusive.
  75. */
  76. static inline bool job_finished(struct spu_queue *q, unsigned int offset,
  77. unsigned long old_head, unsigned long new_head)
  78. {
  79. if (old_head <= new_head) {
  80. if (offset > old_head && offset <= new_head)
  81. return true;
  82. } else {
  83. if (offset > old_head || offset <= new_head)
  84. return true;
  85. }
  86. return false;
  87. }
  88. /* When the HEAD marker is unequal to the actual HEAD, we get
  89. * a virtual device INO interrupt. We should process the
  90. * completed CWQ entries and adjust the HEAD marker to clear
  91. * the IRQ.
  92. */
  93. static irqreturn_t cwq_intr(int irq, void *dev_id)
  94. {
  95. unsigned long off, new_head, hv_ret;
  96. struct spu_queue *q = dev_id;
  97. pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
  98. smp_processor_id(), q->qhandle);
  99. spin_lock(&q->lock);
  100. hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
  101. pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
  102. smp_processor_id(), new_head, hv_ret);
  103. for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
  104. /* XXX ... XXX */
  105. }
  106. hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
  107. if (hv_ret == HV_EOK)
  108. q->head = new_head;
  109. spin_unlock(&q->lock);
  110. return IRQ_HANDLED;
  111. }
  112. static irqreturn_t mau_intr(int irq, void *dev_id)
  113. {
  114. struct spu_queue *q = dev_id;
  115. unsigned long head, hv_ret;
  116. spin_lock(&q->lock);
  117. pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
  118. smp_processor_id(), q->qhandle);
  119. hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
  120. pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
  121. smp_processor_id(), head, hv_ret);
  122. sun4v_ncs_sethead_marker(q->qhandle, head);
  123. spin_unlock(&q->lock);
  124. return IRQ_HANDLED;
  125. }
  126. static void *spu_queue_next(struct spu_queue *q, void *cur)
  127. {
  128. return q->q + spu_next_offset(q, cur - q->q);
  129. }
  130. static int spu_queue_num_free(struct spu_queue *q)
  131. {
  132. unsigned long head = q->head;
  133. unsigned long tail = q->tail;
  134. unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
  135. unsigned long diff;
  136. if (head > tail)
  137. diff = head - tail;
  138. else
  139. diff = (end - tail) + head;
  140. return (diff / CWQ_ENTRY_SIZE) - 1;
  141. }
  142. static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
  143. {
  144. int avail = spu_queue_num_free(q);
  145. if (avail >= num_entries)
  146. return q->q + q->tail;
  147. return NULL;
  148. }
  149. static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
  150. {
  151. unsigned long hv_ret, new_tail;
  152. new_tail = spu_next_offset(q, last - q->q);
  153. hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
  154. if (hv_ret == HV_EOK)
  155. q->tail = new_tail;
  156. return hv_ret;
  157. }
  158. static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
  159. int enc_type, int auth_type,
  160. unsigned int hash_len,
  161. bool sfas, bool sob, bool eob, bool encrypt,
  162. int opcode)
  163. {
  164. u64 word = (len - 1) & CONTROL_LEN;
  165. word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
  166. word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
  167. word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
  168. if (sfas)
  169. word |= CONTROL_STORE_FINAL_AUTH_STATE;
  170. if (sob)
  171. word |= CONTROL_START_OF_BLOCK;
  172. if (eob)
  173. word |= CONTROL_END_OF_BLOCK;
  174. if (encrypt)
  175. word |= CONTROL_ENCRYPT;
  176. if (hmac_key_len)
  177. word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
  178. if (hash_len)
  179. word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
  180. return word;
  181. }
  182. #if 0
  183. static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
  184. {
  185. if (this_len >= 64 ||
  186. qp->head != qp->tail)
  187. return true;
  188. return false;
  189. }
  190. #endif
  191. struct n2_ahash_alg {
  192. struct list_head entry;
  193. const char *hash_zero;
  194. const u32 *hash_init;
  195. u8 hw_op_hashsz;
  196. u8 digest_size;
  197. u8 auth_type;
  198. u8 hmac_type;
  199. struct ahash_alg alg;
  200. };
  201. static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
  202. {
  203. struct crypto_alg *alg = tfm->__crt_alg;
  204. struct ahash_alg *ahash_alg;
  205. ahash_alg = container_of(alg, struct ahash_alg, halg.base);
  206. return container_of(ahash_alg, struct n2_ahash_alg, alg);
  207. }
  208. struct n2_hmac_alg {
  209. const char *child_alg;
  210. struct n2_ahash_alg derived;
  211. };
  212. static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
  213. {
  214. struct crypto_alg *alg = tfm->__crt_alg;
  215. struct ahash_alg *ahash_alg;
  216. ahash_alg = container_of(alg, struct ahash_alg, halg.base);
  217. return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
  218. }
  219. struct n2_hash_ctx {
  220. struct crypto_ahash *fallback_tfm;
  221. };
  222. #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
  223. struct n2_hmac_ctx {
  224. struct n2_hash_ctx base;
  225. struct crypto_shash *child_shash;
  226. int hash_key_len;
  227. unsigned char hash_key[N2_HASH_KEY_MAX];
  228. };
  229. struct n2_hash_req_ctx {
  230. union {
  231. struct md5_state md5;
  232. struct sha1_state sha1;
  233. struct sha256_state sha256;
  234. } u;
  235. struct ahash_request fallback_req;
  236. };
  237. static int n2_hash_async_init(struct ahash_request *req)
  238. {
  239. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  240. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  241. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  242. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  243. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  244. return crypto_ahash_init(&rctx->fallback_req);
  245. }
  246. static int n2_hash_async_update(struct ahash_request *req)
  247. {
  248. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  249. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  250. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  251. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  252. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  253. rctx->fallback_req.nbytes = req->nbytes;
  254. rctx->fallback_req.src = req->src;
  255. return crypto_ahash_update(&rctx->fallback_req);
  256. }
  257. static int n2_hash_async_final(struct ahash_request *req)
  258. {
  259. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  260. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  261. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  262. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  263. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  264. rctx->fallback_req.result = req->result;
  265. return crypto_ahash_final(&rctx->fallback_req);
  266. }
  267. static int n2_hash_async_finup(struct ahash_request *req)
  268. {
  269. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  270. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  271. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  272. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  273. rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  274. rctx->fallback_req.nbytes = req->nbytes;
  275. rctx->fallback_req.src = req->src;
  276. rctx->fallback_req.result = req->result;
  277. return crypto_ahash_finup(&rctx->fallback_req);
  278. }
  279. static int n2_hash_cra_init(struct crypto_tfm *tfm)
  280. {
  281. const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
  282. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  283. struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  284. struct crypto_ahash *fallback_tfm;
  285. int err;
  286. fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
  287. CRYPTO_ALG_NEED_FALLBACK);
  288. if (IS_ERR(fallback_tfm)) {
  289. pr_warning("Fallback driver '%s' could not be loaded!\n",
  290. fallback_driver_name);
  291. err = PTR_ERR(fallback_tfm);
  292. goto out;
  293. }
  294. crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
  295. crypto_ahash_reqsize(fallback_tfm)));
  296. ctx->fallback_tfm = fallback_tfm;
  297. return 0;
  298. out:
  299. return err;
  300. }
  301. static void n2_hash_cra_exit(struct crypto_tfm *tfm)
  302. {
  303. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  304. struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
  305. crypto_free_ahash(ctx->fallback_tfm);
  306. }
  307. static int n2_hmac_cra_init(struct crypto_tfm *tfm)
  308. {
  309. const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
  310. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  311. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
  312. struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
  313. struct crypto_ahash *fallback_tfm;
  314. struct crypto_shash *child_shash;
  315. int err;
  316. fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
  317. CRYPTO_ALG_NEED_FALLBACK);
  318. if (IS_ERR(fallback_tfm)) {
  319. pr_warning("Fallback driver '%s' could not be loaded!\n",
  320. fallback_driver_name);
  321. err = PTR_ERR(fallback_tfm);
  322. goto out;
  323. }
  324. child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
  325. if (IS_ERR(child_shash)) {
  326. pr_warning("Child shash '%s' could not be loaded!\n",
  327. n2alg->child_alg);
  328. err = PTR_ERR(child_shash);
  329. goto out_free_fallback;
  330. }
  331. crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
  332. crypto_ahash_reqsize(fallback_tfm)));
  333. ctx->child_shash = child_shash;
  334. ctx->base.fallback_tfm = fallback_tfm;
  335. return 0;
  336. out_free_fallback:
  337. crypto_free_ahash(fallback_tfm);
  338. out:
  339. return err;
  340. }
  341. static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
  342. {
  343. struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
  344. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
  345. crypto_free_ahash(ctx->base.fallback_tfm);
  346. crypto_free_shash(ctx->child_shash);
  347. }
  348. static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
  349. unsigned int keylen)
  350. {
  351. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
  352. struct crypto_shash *child_shash = ctx->child_shash;
  353. struct crypto_ahash *fallback_tfm;
  354. SHASH_DESC_ON_STACK(shash, child_shash);
  355. int err, bs, ds;
  356. fallback_tfm = ctx->base.fallback_tfm;
  357. err = crypto_ahash_setkey(fallback_tfm, key, keylen);
  358. if (err)
  359. return err;
  360. shash->tfm = child_shash;
  361. shash->flags = crypto_ahash_get_flags(tfm) &
  362. CRYPTO_TFM_REQ_MAY_SLEEP;
  363. bs = crypto_shash_blocksize(child_shash);
  364. ds = crypto_shash_digestsize(child_shash);
  365. BUG_ON(ds > N2_HASH_KEY_MAX);
  366. if (keylen > bs) {
  367. err = crypto_shash_digest(shash, key, keylen,
  368. ctx->hash_key);
  369. if (err)
  370. return err;
  371. keylen = ds;
  372. } else if (keylen <= N2_HASH_KEY_MAX)
  373. memcpy(ctx->hash_key, key, keylen);
  374. ctx->hash_key_len = keylen;
  375. return err;
  376. }
  377. static unsigned long wait_for_tail(struct spu_queue *qp)
  378. {
  379. unsigned long head, hv_ret;
  380. do {
  381. hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
  382. if (hv_ret != HV_EOK) {
  383. pr_err("Hypervisor error on gethead\n");
  384. break;
  385. }
  386. if (head == qp->tail) {
  387. qp->head = head;
  388. break;
  389. }
  390. } while (1);
  391. return hv_ret;
  392. }
  393. static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
  394. struct cwq_initial_entry *ent)
  395. {
  396. unsigned long hv_ret = spu_queue_submit(qp, ent);
  397. if (hv_ret == HV_EOK)
  398. hv_ret = wait_for_tail(qp);
  399. return hv_ret;
  400. }
  401. static int n2_do_async_digest(struct ahash_request *req,
  402. unsigned int auth_type, unsigned int digest_size,
  403. unsigned int result_size, void *hash_loc,
  404. unsigned long auth_key, unsigned int auth_key_len)
  405. {
  406. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  407. struct cwq_initial_entry *ent;
  408. struct crypto_hash_walk walk;
  409. struct spu_queue *qp;
  410. unsigned long flags;
  411. int err = -ENODEV;
  412. int nbytes, cpu;
  413. /* The total effective length of the operation may not
  414. * exceed 2^16.
  415. */
  416. if (unlikely(req->nbytes > (1 << 16))) {
  417. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  418. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  419. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  420. rctx->fallback_req.base.flags =
  421. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  422. rctx->fallback_req.nbytes = req->nbytes;
  423. rctx->fallback_req.src = req->src;
  424. rctx->fallback_req.result = req->result;
  425. return crypto_ahash_digest(&rctx->fallback_req);
  426. }
  427. nbytes = crypto_hash_walk_first(req, &walk);
  428. cpu = get_cpu();
  429. qp = cpu_to_cwq[cpu];
  430. if (!qp)
  431. goto out;
  432. spin_lock_irqsave(&qp->lock, flags);
  433. /* XXX can do better, improve this later by doing a by-hand scatterlist
  434. * XXX walk, etc.
  435. */
  436. ent = qp->q + qp->tail;
  437. ent->control = control_word_base(nbytes, auth_key_len, 0,
  438. auth_type, digest_size,
  439. false, true, false, false,
  440. OPCODE_INPLACE_BIT |
  441. OPCODE_AUTH_MAC);
  442. ent->src_addr = __pa(walk.data);
  443. ent->auth_key_addr = auth_key;
  444. ent->auth_iv_addr = __pa(hash_loc);
  445. ent->final_auth_state_addr = 0UL;
  446. ent->enc_key_addr = 0UL;
  447. ent->enc_iv_addr = 0UL;
  448. ent->dest_addr = __pa(hash_loc);
  449. nbytes = crypto_hash_walk_done(&walk, 0);
  450. while (nbytes > 0) {
  451. ent = spu_queue_next(qp, ent);
  452. ent->control = (nbytes - 1);
  453. ent->src_addr = __pa(walk.data);
  454. ent->auth_key_addr = 0UL;
  455. ent->auth_iv_addr = 0UL;
  456. ent->final_auth_state_addr = 0UL;
  457. ent->enc_key_addr = 0UL;
  458. ent->enc_iv_addr = 0UL;
  459. ent->dest_addr = 0UL;
  460. nbytes = crypto_hash_walk_done(&walk, 0);
  461. }
  462. ent->control |= CONTROL_END_OF_BLOCK;
  463. if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
  464. err = -EINVAL;
  465. else
  466. err = 0;
  467. spin_unlock_irqrestore(&qp->lock, flags);
  468. if (!err)
  469. memcpy(req->result, hash_loc, result_size);
  470. out:
  471. put_cpu();
  472. return err;
  473. }
  474. static int n2_hash_async_digest(struct ahash_request *req)
  475. {
  476. struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
  477. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  478. int ds;
  479. ds = n2alg->digest_size;
  480. if (unlikely(req->nbytes == 0)) {
  481. memcpy(req->result, n2alg->hash_zero, ds);
  482. return 0;
  483. }
  484. memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
  485. return n2_do_async_digest(req, n2alg->auth_type,
  486. n2alg->hw_op_hashsz, ds,
  487. &rctx->u, 0UL, 0);
  488. }
  489. static int n2_hmac_async_digest(struct ahash_request *req)
  490. {
  491. struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
  492. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  493. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  494. struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
  495. int ds;
  496. ds = n2alg->derived.digest_size;
  497. if (unlikely(req->nbytes == 0) ||
  498. unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
  499. struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
  500. struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  501. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
  502. rctx->fallback_req.base.flags =
  503. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  504. rctx->fallback_req.nbytes = req->nbytes;
  505. rctx->fallback_req.src = req->src;
  506. rctx->fallback_req.result = req->result;
  507. return crypto_ahash_digest(&rctx->fallback_req);
  508. }
  509. memcpy(&rctx->u, n2alg->derived.hash_init,
  510. n2alg->derived.hw_op_hashsz);
  511. return n2_do_async_digest(req, n2alg->derived.hmac_type,
  512. n2alg->derived.hw_op_hashsz, ds,
  513. &rctx->u,
  514. __pa(&ctx->hash_key),
  515. ctx->hash_key_len);
  516. }
  517. struct n2_cipher_context {
  518. int key_len;
  519. int enc_type;
  520. union {
  521. u8 aes[AES_MAX_KEY_SIZE];
  522. u8 des[DES_KEY_SIZE];
  523. u8 des3[3 * DES_KEY_SIZE];
  524. u8 arc4[258]; /* S-box, X, Y */
  525. } key;
  526. };
  527. #define N2_CHUNK_ARR_LEN 16
  528. struct n2_crypto_chunk {
  529. struct list_head entry;
  530. unsigned long iv_paddr : 44;
  531. unsigned long arr_len : 20;
  532. unsigned long dest_paddr;
  533. unsigned long dest_final;
  534. struct {
  535. unsigned long src_paddr : 44;
  536. unsigned long src_len : 20;
  537. } arr[N2_CHUNK_ARR_LEN];
  538. };
  539. struct n2_request_context {
  540. struct ablkcipher_walk walk;
  541. struct list_head chunk_list;
  542. struct n2_crypto_chunk chunk;
  543. u8 temp_iv[16];
  544. };
  545. /* The SPU allows some level of flexibility for partial cipher blocks
  546. * being specified in a descriptor.
  547. *
  548. * It merely requires that every descriptor's length field is at least
  549. * as large as the cipher block size. This means that a cipher block
  550. * can span at most 2 descriptors. However, this does not allow a
  551. * partial block to span into the final descriptor as that would
  552. * violate the rule (since every descriptor's length must be at lest
  553. * the block size). So, for example, assuming an 8 byte block size:
  554. *
  555. * 0xe --> 0xa --> 0x8
  556. *
  557. * is a valid length sequence, whereas:
  558. *
  559. * 0xe --> 0xb --> 0x7
  560. *
  561. * is not a valid sequence.
  562. */
  563. struct n2_cipher_alg {
  564. struct list_head entry;
  565. u8 enc_type;
  566. struct crypto_alg alg;
  567. };
  568. static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
  569. {
  570. struct crypto_alg *alg = tfm->__crt_alg;
  571. return container_of(alg, struct n2_cipher_alg, alg);
  572. }
  573. struct n2_cipher_request_context {
  574. struct ablkcipher_walk walk;
  575. };
  576. static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  577. unsigned int keylen)
  578. {
  579. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  580. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  581. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  582. ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
  583. switch (keylen) {
  584. case AES_KEYSIZE_128:
  585. ctx->enc_type |= ENC_TYPE_ALG_AES128;
  586. break;
  587. case AES_KEYSIZE_192:
  588. ctx->enc_type |= ENC_TYPE_ALG_AES192;
  589. break;
  590. case AES_KEYSIZE_256:
  591. ctx->enc_type |= ENC_TYPE_ALG_AES256;
  592. break;
  593. default:
  594. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  595. return -EINVAL;
  596. }
  597. ctx->key_len = keylen;
  598. memcpy(ctx->key.aes, key, keylen);
  599. return 0;
  600. }
  601. static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  602. unsigned int keylen)
  603. {
  604. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  605. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  606. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  607. u32 tmp[DES_EXPKEY_WORDS];
  608. int err;
  609. ctx->enc_type = n2alg->enc_type;
  610. if (keylen != DES_KEY_SIZE) {
  611. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  612. return -EINVAL;
  613. }
  614. err = des_ekey(tmp, key);
  615. if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  616. tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
  617. return -EINVAL;
  618. }
  619. ctx->key_len = keylen;
  620. memcpy(ctx->key.des, key, keylen);
  621. return 0;
  622. }
  623. static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  624. unsigned int keylen)
  625. {
  626. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  627. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  628. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  629. ctx->enc_type = n2alg->enc_type;
  630. if (keylen != (3 * DES_KEY_SIZE)) {
  631. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  632. return -EINVAL;
  633. }
  634. ctx->key_len = keylen;
  635. memcpy(ctx->key.des3, key, keylen);
  636. return 0;
  637. }
  638. static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  639. unsigned int keylen)
  640. {
  641. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  642. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  643. struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
  644. u8 *s = ctx->key.arc4;
  645. u8 *x = s + 256;
  646. u8 *y = x + 1;
  647. int i, j, k;
  648. ctx->enc_type = n2alg->enc_type;
  649. j = k = 0;
  650. *x = 0;
  651. *y = 0;
  652. for (i = 0; i < 256; i++)
  653. s[i] = i;
  654. for (i = 0; i < 256; i++) {
  655. u8 a = s[i];
  656. j = (j + key[k] + a) & 0xff;
  657. s[i] = s[j];
  658. s[j] = a;
  659. if (++k >= keylen)
  660. k = 0;
  661. }
  662. return 0;
  663. }
  664. static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
  665. {
  666. int this_len = nbytes;
  667. this_len -= (nbytes & (block_size - 1));
  668. return this_len > (1 << 16) ? (1 << 16) : this_len;
  669. }
  670. static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
  671. struct spu_queue *qp, bool encrypt)
  672. {
  673. struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
  674. struct cwq_initial_entry *ent;
  675. bool in_place;
  676. int i;
  677. ent = spu_queue_alloc(qp, cp->arr_len);
  678. if (!ent) {
  679. pr_info("queue_alloc() of %d fails\n",
  680. cp->arr_len);
  681. return -EBUSY;
  682. }
  683. in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
  684. ent->control = control_word_base(cp->arr[0].src_len,
  685. 0, ctx->enc_type, 0, 0,
  686. false, true, false, encrypt,
  687. OPCODE_ENCRYPT |
  688. (in_place ? OPCODE_INPLACE_BIT : 0));
  689. ent->src_addr = cp->arr[0].src_paddr;
  690. ent->auth_key_addr = 0UL;
  691. ent->auth_iv_addr = 0UL;
  692. ent->final_auth_state_addr = 0UL;
  693. ent->enc_key_addr = __pa(&ctx->key);
  694. ent->enc_iv_addr = cp->iv_paddr;
  695. ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
  696. for (i = 1; i < cp->arr_len; i++) {
  697. ent = spu_queue_next(qp, ent);
  698. ent->control = cp->arr[i].src_len - 1;
  699. ent->src_addr = cp->arr[i].src_paddr;
  700. ent->auth_key_addr = 0UL;
  701. ent->auth_iv_addr = 0UL;
  702. ent->final_auth_state_addr = 0UL;
  703. ent->enc_key_addr = 0UL;
  704. ent->enc_iv_addr = 0UL;
  705. ent->dest_addr = 0UL;
  706. }
  707. ent->control |= CONTROL_END_OF_BLOCK;
  708. return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
  709. }
  710. static int n2_compute_chunks(struct ablkcipher_request *req)
  711. {
  712. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  713. struct ablkcipher_walk *walk = &rctx->walk;
  714. struct n2_crypto_chunk *chunk;
  715. unsigned long dest_prev;
  716. unsigned int tot_len;
  717. bool prev_in_place;
  718. int err, nbytes;
  719. ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
  720. err = ablkcipher_walk_phys(req, walk);
  721. if (err)
  722. return err;
  723. INIT_LIST_HEAD(&rctx->chunk_list);
  724. chunk = &rctx->chunk;
  725. INIT_LIST_HEAD(&chunk->entry);
  726. chunk->iv_paddr = 0UL;
  727. chunk->arr_len = 0;
  728. chunk->dest_paddr = 0UL;
  729. prev_in_place = false;
  730. dest_prev = ~0UL;
  731. tot_len = 0;
  732. while ((nbytes = walk->nbytes) != 0) {
  733. unsigned long dest_paddr, src_paddr;
  734. bool in_place;
  735. int this_len;
  736. src_paddr = (page_to_phys(walk->src.page) +
  737. walk->src.offset);
  738. dest_paddr = (page_to_phys(walk->dst.page) +
  739. walk->dst.offset);
  740. in_place = (src_paddr == dest_paddr);
  741. this_len = cipher_descriptor_len(nbytes, walk->blocksize);
  742. if (chunk->arr_len != 0) {
  743. if (in_place != prev_in_place ||
  744. (!prev_in_place &&
  745. dest_paddr != dest_prev) ||
  746. chunk->arr_len == N2_CHUNK_ARR_LEN ||
  747. tot_len + this_len > (1 << 16)) {
  748. chunk->dest_final = dest_prev;
  749. list_add_tail(&chunk->entry,
  750. &rctx->chunk_list);
  751. chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
  752. if (!chunk) {
  753. err = -ENOMEM;
  754. break;
  755. }
  756. INIT_LIST_HEAD(&chunk->entry);
  757. }
  758. }
  759. if (chunk->arr_len == 0) {
  760. chunk->dest_paddr = dest_paddr;
  761. tot_len = 0;
  762. }
  763. chunk->arr[chunk->arr_len].src_paddr = src_paddr;
  764. chunk->arr[chunk->arr_len].src_len = this_len;
  765. chunk->arr_len++;
  766. dest_prev = dest_paddr + this_len;
  767. prev_in_place = in_place;
  768. tot_len += this_len;
  769. err = ablkcipher_walk_done(req, walk, nbytes - this_len);
  770. if (err)
  771. break;
  772. }
  773. if (!err && chunk->arr_len != 0) {
  774. chunk->dest_final = dest_prev;
  775. list_add_tail(&chunk->entry, &rctx->chunk_list);
  776. }
  777. return err;
  778. }
  779. static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
  780. {
  781. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  782. struct n2_crypto_chunk *c, *tmp;
  783. if (final_iv)
  784. memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
  785. ablkcipher_walk_complete(&rctx->walk);
  786. list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
  787. list_del(&c->entry);
  788. if (unlikely(c != &rctx->chunk))
  789. kfree(c);
  790. }
  791. }
  792. static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
  793. {
  794. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  795. struct crypto_tfm *tfm = req->base.tfm;
  796. int err = n2_compute_chunks(req);
  797. struct n2_crypto_chunk *c, *tmp;
  798. unsigned long flags, hv_ret;
  799. struct spu_queue *qp;
  800. if (err)
  801. return err;
  802. qp = cpu_to_cwq[get_cpu()];
  803. err = -ENODEV;
  804. if (!qp)
  805. goto out;
  806. spin_lock_irqsave(&qp->lock, flags);
  807. list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
  808. err = __n2_crypt_chunk(tfm, c, qp, encrypt);
  809. if (err)
  810. break;
  811. list_del(&c->entry);
  812. if (unlikely(c != &rctx->chunk))
  813. kfree(c);
  814. }
  815. if (!err) {
  816. hv_ret = wait_for_tail(qp);
  817. if (hv_ret != HV_EOK)
  818. err = -EINVAL;
  819. }
  820. spin_unlock_irqrestore(&qp->lock, flags);
  821. out:
  822. put_cpu();
  823. n2_chunk_complete(req, NULL);
  824. return err;
  825. }
  826. static int n2_encrypt_ecb(struct ablkcipher_request *req)
  827. {
  828. return n2_do_ecb(req, true);
  829. }
  830. static int n2_decrypt_ecb(struct ablkcipher_request *req)
  831. {
  832. return n2_do_ecb(req, false);
  833. }
  834. static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
  835. {
  836. struct n2_request_context *rctx = ablkcipher_request_ctx(req);
  837. struct crypto_tfm *tfm = req->base.tfm;
  838. unsigned long flags, hv_ret, iv_paddr;
  839. int err = n2_compute_chunks(req);
  840. struct n2_crypto_chunk *c, *tmp;
  841. struct spu_queue *qp;
  842. void *final_iv_addr;
  843. final_iv_addr = NULL;
  844. if (err)
  845. return err;
  846. qp = cpu_to_cwq[get_cpu()];
  847. err = -ENODEV;
  848. if (!qp)
  849. goto out;
  850. spin_lock_irqsave(&qp->lock, flags);
  851. if (encrypt) {
  852. iv_paddr = __pa(rctx->walk.iv);
  853. list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
  854. entry) {
  855. c->iv_paddr = iv_paddr;
  856. err = __n2_crypt_chunk(tfm, c, qp, true);
  857. if (err)
  858. break;
  859. iv_paddr = c->dest_final - rctx->walk.blocksize;
  860. list_del(&c->entry);
  861. if (unlikely(c != &rctx->chunk))
  862. kfree(c);
  863. }
  864. final_iv_addr = __va(iv_paddr);
  865. } else {
  866. list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
  867. entry) {
  868. if (c == &rctx->chunk) {
  869. iv_paddr = __pa(rctx->walk.iv);
  870. } else {
  871. iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
  872. tmp->arr[tmp->arr_len-1].src_len -
  873. rctx->walk.blocksize);
  874. }
  875. if (!final_iv_addr) {
  876. unsigned long pa;
  877. pa = (c->arr[c->arr_len-1].src_paddr +
  878. c->arr[c->arr_len-1].src_len -
  879. rctx->walk.blocksize);
  880. final_iv_addr = rctx->temp_iv;
  881. memcpy(rctx->temp_iv, __va(pa),
  882. rctx->walk.blocksize);
  883. }
  884. c->iv_paddr = iv_paddr;
  885. err = __n2_crypt_chunk(tfm, c, qp, false);
  886. if (err)
  887. break;
  888. list_del(&c->entry);
  889. if (unlikely(c != &rctx->chunk))
  890. kfree(c);
  891. }
  892. }
  893. if (!err) {
  894. hv_ret = wait_for_tail(qp);
  895. if (hv_ret != HV_EOK)
  896. err = -EINVAL;
  897. }
  898. spin_unlock_irqrestore(&qp->lock, flags);
  899. out:
  900. put_cpu();
  901. n2_chunk_complete(req, err ? NULL : final_iv_addr);
  902. return err;
  903. }
  904. static int n2_encrypt_chaining(struct ablkcipher_request *req)
  905. {
  906. return n2_do_chaining(req, true);
  907. }
  908. static int n2_decrypt_chaining(struct ablkcipher_request *req)
  909. {
  910. return n2_do_chaining(req, false);
  911. }
  912. struct n2_cipher_tmpl {
  913. const char *name;
  914. const char *drv_name;
  915. u8 block_size;
  916. u8 enc_type;
  917. struct ablkcipher_alg ablkcipher;
  918. };
  919. static const struct n2_cipher_tmpl cipher_tmpls[] = {
  920. /* ARC4: only ECB is supported (chaining bits ignored) */
  921. { .name = "ecb(arc4)",
  922. .drv_name = "ecb-arc4",
  923. .block_size = 1,
  924. .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
  925. ENC_TYPE_CHAINING_ECB),
  926. .ablkcipher = {
  927. .min_keysize = 1,
  928. .max_keysize = 256,
  929. .setkey = n2_arc4_setkey,
  930. .encrypt = n2_encrypt_ecb,
  931. .decrypt = n2_decrypt_ecb,
  932. },
  933. },
  934. /* DES: ECB CBC and CFB are supported */
  935. { .name = "ecb(des)",
  936. .drv_name = "ecb-des",
  937. .block_size = DES_BLOCK_SIZE,
  938. .enc_type = (ENC_TYPE_ALG_DES |
  939. ENC_TYPE_CHAINING_ECB),
  940. .ablkcipher = {
  941. .min_keysize = DES_KEY_SIZE,
  942. .max_keysize = DES_KEY_SIZE,
  943. .setkey = n2_des_setkey,
  944. .encrypt = n2_encrypt_ecb,
  945. .decrypt = n2_decrypt_ecb,
  946. },
  947. },
  948. { .name = "cbc(des)",
  949. .drv_name = "cbc-des",
  950. .block_size = DES_BLOCK_SIZE,
  951. .enc_type = (ENC_TYPE_ALG_DES |
  952. ENC_TYPE_CHAINING_CBC),
  953. .ablkcipher = {
  954. .ivsize = DES_BLOCK_SIZE,
  955. .min_keysize = DES_KEY_SIZE,
  956. .max_keysize = DES_KEY_SIZE,
  957. .setkey = n2_des_setkey,
  958. .encrypt = n2_encrypt_chaining,
  959. .decrypt = n2_decrypt_chaining,
  960. },
  961. },
  962. { .name = "cfb(des)",
  963. .drv_name = "cfb-des",
  964. .block_size = DES_BLOCK_SIZE,
  965. .enc_type = (ENC_TYPE_ALG_DES |
  966. ENC_TYPE_CHAINING_CFB),
  967. .ablkcipher = {
  968. .min_keysize = DES_KEY_SIZE,
  969. .max_keysize = DES_KEY_SIZE,
  970. .setkey = n2_des_setkey,
  971. .encrypt = n2_encrypt_chaining,
  972. .decrypt = n2_decrypt_chaining,
  973. },
  974. },
  975. /* 3DES: ECB CBC and CFB are supported */
  976. { .name = "ecb(des3_ede)",
  977. .drv_name = "ecb-3des",
  978. .block_size = DES_BLOCK_SIZE,
  979. .enc_type = (ENC_TYPE_ALG_3DES |
  980. ENC_TYPE_CHAINING_ECB),
  981. .ablkcipher = {
  982. .min_keysize = 3 * DES_KEY_SIZE,
  983. .max_keysize = 3 * DES_KEY_SIZE,
  984. .setkey = n2_3des_setkey,
  985. .encrypt = n2_encrypt_ecb,
  986. .decrypt = n2_decrypt_ecb,
  987. },
  988. },
  989. { .name = "cbc(des3_ede)",
  990. .drv_name = "cbc-3des",
  991. .block_size = DES_BLOCK_SIZE,
  992. .enc_type = (ENC_TYPE_ALG_3DES |
  993. ENC_TYPE_CHAINING_CBC),
  994. .ablkcipher = {
  995. .ivsize = DES_BLOCK_SIZE,
  996. .min_keysize = 3 * DES_KEY_SIZE,
  997. .max_keysize = 3 * DES_KEY_SIZE,
  998. .setkey = n2_3des_setkey,
  999. .encrypt = n2_encrypt_chaining,
  1000. .decrypt = n2_decrypt_chaining,
  1001. },
  1002. },
  1003. { .name = "cfb(des3_ede)",
  1004. .drv_name = "cfb-3des",
  1005. .block_size = DES_BLOCK_SIZE,
  1006. .enc_type = (ENC_TYPE_ALG_3DES |
  1007. ENC_TYPE_CHAINING_CFB),
  1008. .ablkcipher = {
  1009. .min_keysize = 3 * DES_KEY_SIZE,
  1010. .max_keysize = 3 * DES_KEY_SIZE,
  1011. .setkey = n2_3des_setkey,
  1012. .encrypt = n2_encrypt_chaining,
  1013. .decrypt = n2_decrypt_chaining,
  1014. },
  1015. },
  1016. /* AES: ECB CBC and CTR are supported */
  1017. { .name = "ecb(aes)",
  1018. .drv_name = "ecb-aes",
  1019. .block_size = AES_BLOCK_SIZE,
  1020. .enc_type = (ENC_TYPE_ALG_AES128 |
  1021. ENC_TYPE_CHAINING_ECB),
  1022. .ablkcipher = {
  1023. .min_keysize = AES_MIN_KEY_SIZE,
  1024. .max_keysize = AES_MAX_KEY_SIZE,
  1025. .setkey = n2_aes_setkey,
  1026. .encrypt = n2_encrypt_ecb,
  1027. .decrypt = n2_decrypt_ecb,
  1028. },
  1029. },
  1030. { .name = "cbc(aes)",
  1031. .drv_name = "cbc-aes",
  1032. .block_size = AES_BLOCK_SIZE,
  1033. .enc_type = (ENC_TYPE_ALG_AES128 |
  1034. ENC_TYPE_CHAINING_CBC),
  1035. .ablkcipher = {
  1036. .ivsize = AES_BLOCK_SIZE,
  1037. .min_keysize = AES_MIN_KEY_SIZE,
  1038. .max_keysize = AES_MAX_KEY_SIZE,
  1039. .setkey = n2_aes_setkey,
  1040. .encrypt = n2_encrypt_chaining,
  1041. .decrypt = n2_decrypt_chaining,
  1042. },
  1043. },
  1044. { .name = "ctr(aes)",
  1045. .drv_name = "ctr-aes",
  1046. .block_size = AES_BLOCK_SIZE,
  1047. .enc_type = (ENC_TYPE_ALG_AES128 |
  1048. ENC_TYPE_CHAINING_COUNTER),
  1049. .ablkcipher = {
  1050. .ivsize = AES_BLOCK_SIZE,
  1051. .min_keysize = AES_MIN_KEY_SIZE,
  1052. .max_keysize = AES_MAX_KEY_SIZE,
  1053. .setkey = n2_aes_setkey,
  1054. .encrypt = n2_encrypt_chaining,
  1055. .decrypt = n2_encrypt_chaining,
  1056. },
  1057. },
  1058. };
  1059. #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
  1060. static LIST_HEAD(cipher_algs);
  1061. struct n2_hash_tmpl {
  1062. const char *name;
  1063. const char *hash_zero;
  1064. const u32 *hash_init;
  1065. u8 hw_op_hashsz;
  1066. u8 digest_size;
  1067. u8 block_size;
  1068. u8 auth_type;
  1069. u8 hmac_type;
  1070. };
  1071. static const char md5_zero[MD5_DIGEST_SIZE] = {
  1072. 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
  1073. 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
  1074. };
  1075. static const u32 md5_init[MD5_HASH_WORDS] = {
  1076. cpu_to_le32(0x67452301),
  1077. cpu_to_le32(0xefcdab89),
  1078. cpu_to_le32(0x98badcfe),
  1079. cpu_to_le32(0x10325476),
  1080. };
  1081. static const char sha1_zero[SHA1_DIGEST_SIZE] = {
  1082. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
  1083. 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
  1084. 0x07, 0x09
  1085. };
  1086. static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
  1087. SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
  1088. };
  1089. static const char sha256_zero[SHA256_DIGEST_SIZE] = {
  1090. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
  1091. 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
  1092. 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
  1093. 0x1b, 0x78, 0x52, 0xb8, 0x55
  1094. };
  1095. static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
  1096. SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
  1097. SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
  1098. };
  1099. static const char sha224_zero[SHA224_DIGEST_SIZE] = {
  1100. 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
  1101. 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
  1102. 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
  1103. 0x2f
  1104. };
  1105. static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
  1106. SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
  1107. SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
  1108. };
  1109. static const struct n2_hash_tmpl hash_tmpls[] = {
  1110. { .name = "md5",
  1111. .hash_zero = md5_zero,
  1112. .hash_init = md5_init,
  1113. .auth_type = AUTH_TYPE_MD5,
  1114. .hmac_type = AUTH_TYPE_HMAC_MD5,
  1115. .hw_op_hashsz = MD5_DIGEST_SIZE,
  1116. .digest_size = MD5_DIGEST_SIZE,
  1117. .block_size = MD5_HMAC_BLOCK_SIZE },
  1118. { .name = "sha1",
  1119. .hash_zero = sha1_zero,
  1120. .hash_init = sha1_init,
  1121. .auth_type = AUTH_TYPE_SHA1,
  1122. .hmac_type = AUTH_TYPE_HMAC_SHA1,
  1123. .hw_op_hashsz = SHA1_DIGEST_SIZE,
  1124. .digest_size = SHA1_DIGEST_SIZE,
  1125. .block_size = SHA1_BLOCK_SIZE },
  1126. { .name = "sha256",
  1127. .hash_zero = sha256_zero,
  1128. .hash_init = sha256_init,
  1129. .auth_type = AUTH_TYPE_SHA256,
  1130. .hmac_type = AUTH_TYPE_HMAC_SHA256,
  1131. .hw_op_hashsz = SHA256_DIGEST_SIZE,
  1132. .digest_size = SHA256_DIGEST_SIZE,
  1133. .block_size = SHA256_BLOCK_SIZE },
  1134. { .name = "sha224",
  1135. .hash_zero = sha224_zero,
  1136. .hash_init = sha224_init,
  1137. .auth_type = AUTH_TYPE_SHA256,
  1138. .hmac_type = AUTH_TYPE_RESERVED,
  1139. .hw_op_hashsz = SHA256_DIGEST_SIZE,
  1140. .digest_size = SHA224_DIGEST_SIZE,
  1141. .block_size = SHA224_BLOCK_SIZE },
  1142. };
  1143. #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
  1144. static LIST_HEAD(ahash_algs);
  1145. static LIST_HEAD(hmac_algs);
  1146. static int algs_registered;
  1147. static void __n2_unregister_algs(void)
  1148. {
  1149. struct n2_cipher_alg *cipher, *cipher_tmp;
  1150. struct n2_ahash_alg *alg, *alg_tmp;
  1151. struct n2_hmac_alg *hmac, *hmac_tmp;
  1152. list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
  1153. crypto_unregister_alg(&cipher->alg);
  1154. list_del(&cipher->entry);
  1155. kfree(cipher);
  1156. }
  1157. list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
  1158. crypto_unregister_ahash(&hmac->derived.alg);
  1159. list_del(&hmac->derived.entry);
  1160. kfree(hmac);
  1161. }
  1162. list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
  1163. crypto_unregister_ahash(&alg->alg);
  1164. list_del(&alg->entry);
  1165. kfree(alg);
  1166. }
  1167. }
  1168. static int n2_cipher_cra_init(struct crypto_tfm *tfm)
  1169. {
  1170. tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
  1171. return 0;
  1172. }
  1173. static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
  1174. {
  1175. struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
  1176. struct crypto_alg *alg;
  1177. int err;
  1178. if (!p)
  1179. return -ENOMEM;
  1180. alg = &p->alg;
  1181. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
  1182. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
  1183. alg->cra_priority = N2_CRA_PRIORITY;
  1184. alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  1185. CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
  1186. alg->cra_blocksize = tmpl->block_size;
  1187. p->enc_type = tmpl->enc_type;
  1188. alg->cra_ctxsize = sizeof(struct n2_cipher_context);
  1189. alg->cra_type = &crypto_ablkcipher_type;
  1190. alg->cra_u.ablkcipher = tmpl->ablkcipher;
  1191. alg->cra_init = n2_cipher_cra_init;
  1192. alg->cra_module = THIS_MODULE;
  1193. list_add(&p->entry, &cipher_algs);
  1194. err = crypto_register_alg(alg);
  1195. if (err) {
  1196. pr_err("%s alg registration failed\n", alg->cra_name);
  1197. list_del(&p->entry);
  1198. kfree(p);
  1199. } else {
  1200. pr_info("%s alg registered\n", alg->cra_name);
  1201. }
  1202. return err;
  1203. }
  1204. static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
  1205. {
  1206. struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
  1207. struct ahash_alg *ahash;
  1208. struct crypto_alg *base;
  1209. int err;
  1210. if (!p)
  1211. return -ENOMEM;
  1212. p->child_alg = n2ahash->alg.halg.base.cra_name;
  1213. memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
  1214. INIT_LIST_HEAD(&p->derived.entry);
  1215. ahash = &p->derived.alg;
  1216. ahash->digest = n2_hmac_async_digest;
  1217. ahash->setkey = n2_hmac_async_setkey;
  1218. base = &ahash->halg.base;
  1219. snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
  1220. snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
  1221. base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
  1222. base->cra_init = n2_hmac_cra_init;
  1223. base->cra_exit = n2_hmac_cra_exit;
  1224. list_add(&p->derived.entry, &hmac_algs);
  1225. err = crypto_register_ahash(ahash);
  1226. if (err) {
  1227. pr_err("%s alg registration failed\n", base->cra_name);
  1228. list_del(&p->derived.entry);
  1229. kfree(p);
  1230. } else {
  1231. pr_info("%s alg registered\n", base->cra_name);
  1232. }
  1233. return err;
  1234. }
  1235. static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
  1236. {
  1237. struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
  1238. struct hash_alg_common *halg;
  1239. struct crypto_alg *base;
  1240. struct ahash_alg *ahash;
  1241. int err;
  1242. if (!p)
  1243. return -ENOMEM;
  1244. p->hash_zero = tmpl->hash_zero;
  1245. p->hash_init = tmpl->hash_init;
  1246. p->auth_type = tmpl->auth_type;
  1247. p->hmac_type = tmpl->hmac_type;
  1248. p->hw_op_hashsz = tmpl->hw_op_hashsz;
  1249. p->digest_size = tmpl->digest_size;
  1250. ahash = &p->alg;
  1251. ahash->init = n2_hash_async_init;
  1252. ahash->update = n2_hash_async_update;
  1253. ahash->final = n2_hash_async_final;
  1254. ahash->finup = n2_hash_async_finup;
  1255. ahash->digest = n2_hash_async_digest;
  1256. halg = &ahash->halg;
  1257. halg->digestsize = tmpl->digest_size;
  1258. base = &halg->base;
  1259. snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
  1260. snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
  1261. base->cra_priority = N2_CRA_PRIORITY;
  1262. base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1263. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1264. CRYPTO_ALG_NEED_FALLBACK;
  1265. base->cra_blocksize = tmpl->block_size;
  1266. base->cra_ctxsize = sizeof(struct n2_hash_ctx);
  1267. base->cra_module = THIS_MODULE;
  1268. base->cra_init = n2_hash_cra_init;
  1269. base->cra_exit = n2_hash_cra_exit;
  1270. list_add(&p->entry, &ahash_algs);
  1271. err = crypto_register_ahash(ahash);
  1272. if (err) {
  1273. pr_err("%s alg registration failed\n", base->cra_name);
  1274. list_del(&p->entry);
  1275. kfree(p);
  1276. } else {
  1277. pr_info("%s alg registered\n", base->cra_name);
  1278. }
  1279. if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
  1280. err = __n2_register_one_hmac(p);
  1281. return err;
  1282. }
  1283. static int n2_register_algs(void)
  1284. {
  1285. int i, err = 0;
  1286. mutex_lock(&spu_lock);
  1287. if (algs_registered++)
  1288. goto out;
  1289. for (i = 0; i < NUM_HASH_TMPLS; i++) {
  1290. err = __n2_register_one_ahash(&hash_tmpls[i]);
  1291. if (err) {
  1292. __n2_unregister_algs();
  1293. goto out;
  1294. }
  1295. }
  1296. for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
  1297. err = __n2_register_one_cipher(&cipher_tmpls[i]);
  1298. if (err) {
  1299. __n2_unregister_algs();
  1300. goto out;
  1301. }
  1302. }
  1303. out:
  1304. mutex_unlock(&spu_lock);
  1305. return err;
  1306. }
  1307. static void n2_unregister_algs(void)
  1308. {
  1309. mutex_lock(&spu_lock);
  1310. if (!--algs_registered)
  1311. __n2_unregister_algs();
  1312. mutex_unlock(&spu_lock);
  1313. }
  1314. /* To map CWQ queues to interrupt sources, the hypervisor API provides
  1315. * a devino. This isn't very useful to us because all of the
  1316. * interrupts listed in the device_node have been translated to
  1317. * Linux virtual IRQ cookie numbers.
  1318. *
  1319. * So we have to back-translate, going through the 'intr' and 'ino'
  1320. * property tables of the n2cp MDESC node, matching it with the OF
  1321. * 'interrupts' property entries, in order to to figure out which
  1322. * devino goes to which already-translated IRQ.
  1323. */
  1324. static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
  1325. unsigned long dev_ino)
  1326. {
  1327. const unsigned int *dev_intrs;
  1328. unsigned int intr;
  1329. int i;
  1330. for (i = 0; i < ip->num_intrs; i++) {
  1331. if (ip->ino_table[i].ino == dev_ino)
  1332. break;
  1333. }
  1334. if (i == ip->num_intrs)
  1335. return -ENODEV;
  1336. intr = ip->ino_table[i].intr;
  1337. dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
  1338. if (!dev_intrs)
  1339. return -ENODEV;
  1340. for (i = 0; i < dev->archdata.num_irqs; i++) {
  1341. if (dev_intrs[i] == intr)
  1342. return i;
  1343. }
  1344. return -ENODEV;
  1345. }
  1346. static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
  1347. const char *irq_name, struct spu_queue *p,
  1348. irq_handler_t handler)
  1349. {
  1350. unsigned long herr;
  1351. int index;
  1352. herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
  1353. if (herr)
  1354. return -EINVAL;
  1355. index = find_devino_index(dev, ip, p->devino);
  1356. if (index < 0)
  1357. return index;
  1358. p->irq = dev->archdata.irqs[index];
  1359. sprintf(p->irq_name, "%s-%d", irq_name, index);
  1360. return request_irq(p->irq, handler, 0, p->irq_name, p);
  1361. }
  1362. static struct kmem_cache *queue_cache[2];
  1363. static void *new_queue(unsigned long q_type)
  1364. {
  1365. return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
  1366. }
  1367. static void free_queue(void *p, unsigned long q_type)
  1368. {
  1369. return kmem_cache_free(queue_cache[q_type - 1], p);
  1370. }
  1371. static int queue_cache_init(void)
  1372. {
  1373. if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
  1374. queue_cache[HV_NCS_QTYPE_MAU - 1] =
  1375. kmem_cache_create("mau_queue",
  1376. (MAU_NUM_ENTRIES *
  1377. MAU_ENTRY_SIZE),
  1378. MAU_ENTRY_SIZE, 0, NULL);
  1379. if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
  1380. return -ENOMEM;
  1381. if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
  1382. queue_cache[HV_NCS_QTYPE_CWQ - 1] =
  1383. kmem_cache_create("cwq_queue",
  1384. (CWQ_NUM_ENTRIES *
  1385. CWQ_ENTRY_SIZE),
  1386. CWQ_ENTRY_SIZE, 0, NULL);
  1387. if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
  1388. kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
  1389. return -ENOMEM;
  1390. }
  1391. return 0;
  1392. }
  1393. static void queue_cache_destroy(void)
  1394. {
  1395. kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
  1396. kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
  1397. }
  1398. static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
  1399. {
  1400. cpumask_var_t old_allowed;
  1401. unsigned long hv_ret;
  1402. if (cpumask_empty(&p->sharing))
  1403. return -EINVAL;
  1404. if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
  1405. return -ENOMEM;
  1406. cpumask_copy(old_allowed, &current->cpus_allowed);
  1407. set_cpus_allowed_ptr(current, &p->sharing);
  1408. hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
  1409. CWQ_NUM_ENTRIES, &p->qhandle);
  1410. if (!hv_ret)
  1411. sun4v_ncs_sethead_marker(p->qhandle, 0);
  1412. set_cpus_allowed_ptr(current, old_allowed);
  1413. free_cpumask_var(old_allowed);
  1414. return (hv_ret ? -EINVAL : 0);
  1415. }
  1416. static int spu_queue_setup(struct spu_queue *p)
  1417. {
  1418. int err;
  1419. p->q = new_queue(p->q_type);
  1420. if (!p->q)
  1421. return -ENOMEM;
  1422. err = spu_queue_register(p, p->q_type);
  1423. if (err) {
  1424. free_queue(p->q, p->q_type);
  1425. p->q = NULL;
  1426. }
  1427. return err;
  1428. }
  1429. static void spu_queue_destroy(struct spu_queue *p)
  1430. {
  1431. unsigned long hv_ret;
  1432. if (!p->q)
  1433. return;
  1434. hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
  1435. if (!hv_ret)
  1436. free_queue(p->q, p->q_type);
  1437. }
  1438. static void spu_list_destroy(struct list_head *list)
  1439. {
  1440. struct spu_queue *p, *n;
  1441. list_for_each_entry_safe(p, n, list, list) {
  1442. int i;
  1443. for (i = 0; i < NR_CPUS; i++) {
  1444. if (cpu_to_cwq[i] == p)
  1445. cpu_to_cwq[i] = NULL;
  1446. }
  1447. if (p->irq) {
  1448. free_irq(p->irq, p);
  1449. p->irq = 0;
  1450. }
  1451. spu_queue_destroy(p);
  1452. list_del(&p->list);
  1453. kfree(p);
  1454. }
  1455. }
  1456. /* Walk the backward arcs of a CWQ 'exec-unit' node,
  1457. * gathering cpu membership information.
  1458. */
  1459. static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
  1460. struct platform_device *dev,
  1461. u64 node, struct spu_queue *p,
  1462. struct spu_queue **table)
  1463. {
  1464. u64 arc;
  1465. mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
  1466. u64 tgt = mdesc_arc_target(mdesc, arc);
  1467. const char *name = mdesc_node_name(mdesc, tgt);
  1468. const u64 *id;
  1469. if (strcmp(name, "cpu"))
  1470. continue;
  1471. id = mdesc_get_property(mdesc, tgt, "id", NULL);
  1472. if (table[*id] != NULL) {
  1473. dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
  1474. dev->dev.of_node->full_name);
  1475. return -EINVAL;
  1476. }
  1477. cpu_set(*id, p->sharing);
  1478. table[*id] = p;
  1479. }
  1480. return 0;
  1481. }
  1482. /* Process an 'exec-unit' MDESC node of type 'cwq'. */
  1483. static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
  1484. struct platform_device *dev, struct mdesc_handle *mdesc,
  1485. u64 node, const char *iname, unsigned long q_type,
  1486. irq_handler_t handler, struct spu_queue **table)
  1487. {
  1488. struct spu_queue *p;
  1489. int err;
  1490. p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
  1491. if (!p) {
  1492. dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
  1493. dev->dev.of_node->full_name);
  1494. return -ENOMEM;
  1495. }
  1496. cpus_clear(p->sharing);
  1497. spin_lock_init(&p->lock);
  1498. p->q_type = q_type;
  1499. INIT_LIST_HEAD(&p->jobs);
  1500. list_add(&p->list, list);
  1501. err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
  1502. if (err)
  1503. return err;
  1504. err = spu_queue_setup(p);
  1505. if (err)
  1506. return err;
  1507. return spu_map_ino(dev, ip, iname, p, handler);
  1508. }
  1509. static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
  1510. struct spu_mdesc_info *ip, struct list_head *list,
  1511. const char *exec_name, unsigned long q_type,
  1512. irq_handler_t handler, struct spu_queue **table)
  1513. {
  1514. int err = 0;
  1515. u64 node;
  1516. mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
  1517. const char *type;
  1518. type = mdesc_get_property(mdesc, node, "type", NULL);
  1519. if (!type || strcmp(type, exec_name))
  1520. continue;
  1521. err = handle_exec_unit(ip, list, dev, mdesc, node,
  1522. exec_name, q_type, handler, table);
  1523. if (err) {
  1524. spu_list_destroy(list);
  1525. break;
  1526. }
  1527. }
  1528. return err;
  1529. }
  1530. static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
  1531. struct spu_mdesc_info *ip)
  1532. {
  1533. const u64 *ino;
  1534. int ino_len;
  1535. int i;
  1536. ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
  1537. if (!ino) {
  1538. printk("NO 'ino'\n");
  1539. return -ENODEV;
  1540. }
  1541. ip->num_intrs = ino_len / sizeof(u64);
  1542. ip->ino_table = kzalloc((sizeof(struct ino_blob) *
  1543. ip->num_intrs),
  1544. GFP_KERNEL);
  1545. if (!ip->ino_table)
  1546. return -ENOMEM;
  1547. for (i = 0; i < ip->num_intrs; i++) {
  1548. struct ino_blob *b = &ip->ino_table[i];
  1549. b->intr = i + 1;
  1550. b->ino = ino[i];
  1551. }
  1552. return 0;
  1553. }
  1554. static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
  1555. struct platform_device *dev,
  1556. struct spu_mdesc_info *ip,
  1557. const char *node_name)
  1558. {
  1559. const unsigned int *reg;
  1560. u64 node;
  1561. reg = of_get_property(dev->dev.of_node, "reg", NULL);
  1562. if (!reg)
  1563. return -ENODEV;
  1564. mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
  1565. const char *name;
  1566. const u64 *chdl;
  1567. name = mdesc_get_property(mdesc, node, "name", NULL);
  1568. if (!name || strcmp(name, node_name))
  1569. continue;
  1570. chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
  1571. if (!chdl || (*chdl != *reg))
  1572. continue;
  1573. ip->cfg_handle = *chdl;
  1574. return get_irq_props(mdesc, node, ip);
  1575. }
  1576. return -ENODEV;
  1577. }
  1578. static unsigned long n2_spu_hvapi_major;
  1579. static unsigned long n2_spu_hvapi_minor;
  1580. static int n2_spu_hvapi_register(void)
  1581. {
  1582. int err;
  1583. n2_spu_hvapi_major = 2;
  1584. n2_spu_hvapi_minor = 0;
  1585. err = sun4v_hvapi_register(HV_GRP_NCS,
  1586. n2_spu_hvapi_major,
  1587. &n2_spu_hvapi_minor);
  1588. if (!err)
  1589. pr_info("Registered NCS HVAPI version %lu.%lu\n",
  1590. n2_spu_hvapi_major,
  1591. n2_spu_hvapi_minor);
  1592. return err;
  1593. }
  1594. static void n2_spu_hvapi_unregister(void)
  1595. {
  1596. sun4v_hvapi_unregister(HV_GRP_NCS);
  1597. }
  1598. static int global_ref;
  1599. static int grab_global_resources(void)
  1600. {
  1601. int err = 0;
  1602. mutex_lock(&spu_lock);
  1603. if (global_ref++)
  1604. goto out;
  1605. err = n2_spu_hvapi_register();
  1606. if (err)
  1607. goto out;
  1608. err = queue_cache_init();
  1609. if (err)
  1610. goto out_hvapi_release;
  1611. err = -ENOMEM;
  1612. cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
  1613. GFP_KERNEL);
  1614. if (!cpu_to_cwq)
  1615. goto out_queue_cache_destroy;
  1616. cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
  1617. GFP_KERNEL);
  1618. if (!cpu_to_mau)
  1619. goto out_free_cwq_table;
  1620. err = 0;
  1621. out:
  1622. if (err)
  1623. global_ref--;
  1624. mutex_unlock(&spu_lock);
  1625. return err;
  1626. out_free_cwq_table:
  1627. kfree(cpu_to_cwq);
  1628. cpu_to_cwq = NULL;
  1629. out_queue_cache_destroy:
  1630. queue_cache_destroy();
  1631. out_hvapi_release:
  1632. n2_spu_hvapi_unregister();
  1633. goto out;
  1634. }
  1635. static void release_global_resources(void)
  1636. {
  1637. mutex_lock(&spu_lock);
  1638. if (!--global_ref) {
  1639. kfree(cpu_to_cwq);
  1640. cpu_to_cwq = NULL;
  1641. kfree(cpu_to_mau);
  1642. cpu_to_mau = NULL;
  1643. queue_cache_destroy();
  1644. n2_spu_hvapi_unregister();
  1645. }
  1646. mutex_unlock(&spu_lock);
  1647. }
  1648. static struct n2_crypto *alloc_n2cp(void)
  1649. {
  1650. struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
  1651. if (np)
  1652. INIT_LIST_HEAD(&np->cwq_list);
  1653. return np;
  1654. }
  1655. static void free_n2cp(struct n2_crypto *np)
  1656. {
  1657. if (np->cwq_info.ino_table) {
  1658. kfree(np->cwq_info.ino_table);
  1659. np->cwq_info.ino_table = NULL;
  1660. }
  1661. kfree(np);
  1662. }
  1663. static void n2_spu_driver_version(void)
  1664. {
  1665. static int n2_spu_version_printed;
  1666. if (n2_spu_version_printed++ == 0)
  1667. pr_info("%s", version);
  1668. }
  1669. static int n2_crypto_probe(struct platform_device *dev)
  1670. {
  1671. struct mdesc_handle *mdesc;
  1672. const char *full_name;
  1673. struct n2_crypto *np;
  1674. int err;
  1675. n2_spu_driver_version();
  1676. full_name = dev->dev.of_node->full_name;
  1677. pr_info("Found N2CP at %s\n", full_name);
  1678. np = alloc_n2cp();
  1679. if (!np) {
  1680. dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
  1681. full_name);
  1682. return -ENOMEM;
  1683. }
  1684. err = grab_global_resources();
  1685. if (err) {
  1686. dev_err(&dev->dev, "%s: Unable to grab "
  1687. "global resources.\n", full_name);
  1688. goto out_free_n2cp;
  1689. }
  1690. mdesc = mdesc_grab();
  1691. if (!mdesc) {
  1692. dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
  1693. full_name);
  1694. err = -ENODEV;
  1695. goto out_free_global;
  1696. }
  1697. err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
  1698. if (err) {
  1699. dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
  1700. full_name);
  1701. mdesc_release(mdesc);
  1702. goto out_free_global;
  1703. }
  1704. err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
  1705. "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
  1706. cpu_to_cwq);
  1707. mdesc_release(mdesc);
  1708. if (err) {
  1709. dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
  1710. full_name);
  1711. goto out_free_global;
  1712. }
  1713. err = n2_register_algs();
  1714. if (err) {
  1715. dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
  1716. full_name);
  1717. goto out_free_spu_list;
  1718. }
  1719. dev_set_drvdata(&dev->dev, np);
  1720. return 0;
  1721. out_free_spu_list:
  1722. spu_list_destroy(&np->cwq_list);
  1723. out_free_global:
  1724. release_global_resources();
  1725. out_free_n2cp:
  1726. free_n2cp(np);
  1727. return err;
  1728. }
  1729. static int n2_crypto_remove(struct platform_device *dev)
  1730. {
  1731. struct n2_crypto *np = dev_get_drvdata(&dev->dev);
  1732. n2_unregister_algs();
  1733. spu_list_destroy(&np->cwq_list);
  1734. release_global_resources();
  1735. free_n2cp(np);
  1736. return 0;
  1737. }
  1738. static struct n2_mau *alloc_ncp(void)
  1739. {
  1740. struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
  1741. if (mp)
  1742. INIT_LIST_HEAD(&mp->mau_list);
  1743. return mp;
  1744. }
  1745. static void free_ncp(struct n2_mau *mp)
  1746. {
  1747. if (mp->mau_info.ino_table) {
  1748. kfree(mp->mau_info.ino_table);
  1749. mp->mau_info.ino_table = NULL;
  1750. }
  1751. kfree(mp);
  1752. }
  1753. static int n2_mau_probe(struct platform_device *dev)
  1754. {
  1755. struct mdesc_handle *mdesc;
  1756. const char *full_name;
  1757. struct n2_mau *mp;
  1758. int err;
  1759. n2_spu_driver_version();
  1760. full_name = dev->dev.of_node->full_name;
  1761. pr_info("Found NCP at %s\n", full_name);
  1762. mp = alloc_ncp();
  1763. if (!mp) {
  1764. dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
  1765. full_name);
  1766. return -ENOMEM;
  1767. }
  1768. err = grab_global_resources();
  1769. if (err) {
  1770. dev_err(&dev->dev, "%s: Unable to grab "
  1771. "global resources.\n", full_name);
  1772. goto out_free_ncp;
  1773. }
  1774. mdesc = mdesc_grab();
  1775. if (!mdesc) {
  1776. dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
  1777. full_name);
  1778. err = -ENODEV;
  1779. goto out_free_global;
  1780. }
  1781. err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
  1782. if (err) {
  1783. dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
  1784. full_name);
  1785. mdesc_release(mdesc);
  1786. goto out_free_global;
  1787. }
  1788. err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
  1789. "mau", HV_NCS_QTYPE_MAU, mau_intr,
  1790. cpu_to_mau);
  1791. mdesc_release(mdesc);
  1792. if (err) {
  1793. dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
  1794. full_name);
  1795. goto out_free_global;
  1796. }
  1797. dev_set_drvdata(&dev->dev, mp);
  1798. return 0;
  1799. out_free_global:
  1800. release_global_resources();
  1801. out_free_ncp:
  1802. free_ncp(mp);
  1803. return err;
  1804. }
  1805. static int n2_mau_remove(struct platform_device *dev)
  1806. {
  1807. struct n2_mau *mp = dev_get_drvdata(&dev->dev);
  1808. spu_list_destroy(&mp->mau_list);
  1809. release_global_resources();
  1810. free_ncp(mp);
  1811. return 0;
  1812. }
  1813. static struct of_device_id n2_crypto_match[] = {
  1814. {
  1815. .name = "n2cp",
  1816. .compatible = "SUNW,n2-cwq",
  1817. },
  1818. {
  1819. .name = "n2cp",
  1820. .compatible = "SUNW,vf-cwq",
  1821. },
  1822. {
  1823. .name = "n2cp",
  1824. .compatible = "SUNW,kt-cwq",
  1825. },
  1826. {},
  1827. };
  1828. MODULE_DEVICE_TABLE(of, n2_crypto_match);
  1829. static struct platform_driver n2_crypto_driver = {
  1830. .driver = {
  1831. .name = "n2cp",
  1832. .owner = THIS_MODULE,
  1833. .of_match_table = n2_crypto_match,
  1834. },
  1835. .probe = n2_crypto_probe,
  1836. .remove = n2_crypto_remove,
  1837. };
  1838. static struct of_device_id n2_mau_match[] = {
  1839. {
  1840. .name = "ncp",
  1841. .compatible = "SUNW,n2-mau",
  1842. },
  1843. {
  1844. .name = "ncp",
  1845. .compatible = "SUNW,vf-mau",
  1846. },
  1847. {
  1848. .name = "ncp",
  1849. .compatible = "SUNW,kt-mau",
  1850. },
  1851. {},
  1852. };
  1853. MODULE_DEVICE_TABLE(of, n2_mau_match);
  1854. static struct platform_driver n2_mau_driver = {
  1855. .driver = {
  1856. .name = "ncp",
  1857. .owner = THIS_MODULE,
  1858. .of_match_table = n2_mau_match,
  1859. },
  1860. .probe = n2_mau_probe,
  1861. .remove = n2_mau_remove,
  1862. };
  1863. static int __init n2_init(void)
  1864. {
  1865. int err = platform_driver_register(&n2_crypto_driver);
  1866. if (!err) {
  1867. err = platform_driver_register(&n2_mau_driver);
  1868. if (err)
  1869. platform_driver_unregister(&n2_crypto_driver);
  1870. }
  1871. return err;
  1872. }
  1873. static void __exit n2_exit(void)
  1874. {
  1875. platform_driver_unregister(&n2_mau_driver);
  1876. platform_driver_unregister(&n2_crypto_driver);
  1877. }
  1878. module_init(n2_init);
  1879. module_exit(n2_exit);