bcm-sba-raid.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788
  1. /*
  2. * Copyright (C) 2017 Broadcom
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation version 2.
  7. *
  8. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  9. * kind, whether express or implied; without even the implied warranty
  10. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. /*
  14. * Broadcom SBA RAID Driver
  15. *
  16. * The Broadcom stream buffer accelerator (SBA) provides offloading
  17. * capabilities for RAID operations. The SBA offload engine is accessible
  18. * via Broadcom SoC specific ring manager. Two or more offload engines
  19. * can share same Broadcom SoC specific ring manager due to this Broadcom
  20. * SoC specific ring manager driver is implemented as a mailbox controller
  21. * driver and offload engine drivers are implemented as mallbox clients.
  22. *
  23. * Typically, Broadcom SoC specific ring manager will implement larger
  24. * number of hardware rings over one or more SBA hardware devices. By
  25. * design, the internal buffer size of SBA hardware device is limited
  26. * but all offload operations supported by SBA can be broken down into
  27. * multiple small size requests and executed parallely on multiple SBA
  28. * hardware devices for achieving high through-put.
  29. *
  30. * The Broadcom SBA RAID driver does not require any register programming
  31. * except submitting request to SBA hardware device via mailbox channels.
  32. * This driver implements a DMA device with one DMA channel using a single
  33. * mailbox channel provided by Broadcom SoC specific ring manager driver.
  34. * For having more SBA DMA channels, we can create more SBA device nodes
  35. * in Broadcom SoC specific DTS based on number of hardware rings supported
  36. * by Broadcom SoC ring manager.
  37. */
  38. #include <linux/bitops.h>
  39. #include <linux/debugfs.h>
  40. #include <linux/dma-mapping.h>
  41. #include <linux/dmaengine.h>
  42. #include <linux/list.h>
  43. #include <linux/mailbox_client.h>
  44. #include <linux/mailbox/brcm-message.h>
  45. #include <linux/module.h>
  46. #include <linux/of_device.h>
  47. #include <linux/slab.h>
  48. #include <linux/raid/pq.h>
  49. #include "dmaengine.h"
  50. /* ====== Driver macros and defines ===== */
  51. #define SBA_TYPE_SHIFT 48
  52. #define SBA_TYPE_MASK GENMASK(1, 0)
  53. #define SBA_TYPE_A 0x0
  54. #define SBA_TYPE_B 0x2
  55. #define SBA_TYPE_C 0x3
  56. #define SBA_USER_DEF_SHIFT 32
  57. #define SBA_USER_DEF_MASK GENMASK(15, 0)
  58. #define SBA_R_MDATA_SHIFT 24
  59. #define SBA_R_MDATA_MASK GENMASK(7, 0)
  60. #define SBA_C_MDATA_MS_SHIFT 18
  61. #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
  62. #define SBA_INT_SHIFT 17
  63. #define SBA_INT_MASK BIT(0)
  64. #define SBA_RESP_SHIFT 16
  65. #define SBA_RESP_MASK BIT(0)
  66. #define SBA_C_MDATA_SHIFT 8
  67. #define SBA_C_MDATA_MASK GENMASK(7, 0)
  68. #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
  69. #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
  70. #define SBA_C_MDATA_DNUM_SHIFT 5
  71. #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
  72. #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
  73. #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
  74. #define SBA_CMD_SHIFT 0
  75. #define SBA_CMD_MASK GENMASK(3, 0)
  76. #define SBA_CMD_ZERO_BUFFER 0x4
  77. #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
  78. #define SBA_CMD_LOAD_BUFFER 0x9
  79. #define SBA_CMD_XOR 0xa
  80. #define SBA_CMD_GALOIS_XOR 0xb
  81. #define SBA_CMD_WRITE_BUFFER 0xc
  82. #define SBA_CMD_GALOIS 0xe
  83. #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
  84. #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
  85. /* Driver helper macros */
  86. #define to_sba_request(tx) \
  87. container_of(tx, struct sba_request, tx)
  88. #define to_sba_device(dchan) \
  89. container_of(dchan, struct sba_device, dma_chan)
  90. /* ===== Driver data structures ===== */
  91. enum sba_request_flags {
  92. SBA_REQUEST_STATE_FREE = 0x001,
  93. SBA_REQUEST_STATE_ALLOCED = 0x002,
  94. SBA_REQUEST_STATE_PENDING = 0x004,
  95. SBA_REQUEST_STATE_ACTIVE = 0x008,
  96. SBA_REQUEST_STATE_ABORTED = 0x010,
  97. SBA_REQUEST_STATE_MASK = 0x0ff,
  98. SBA_REQUEST_FENCE = 0x100,
  99. };
  100. struct sba_request {
  101. /* Global state */
  102. struct list_head node;
  103. struct sba_device *sba;
  104. u32 flags;
  105. /* Chained requests management */
  106. struct sba_request *first;
  107. struct list_head next;
  108. atomic_t next_pending_count;
  109. /* BRCM message data */
  110. struct brcm_message msg;
  111. struct dma_async_tx_descriptor tx;
  112. /* SBA commands */
  113. struct brcm_sba_command cmds[0];
  114. };
  115. enum sba_version {
  116. SBA_VER_1 = 0,
  117. SBA_VER_2
  118. };
  119. struct sba_device {
  120. /* Underlying device */
  121. struct device *dev;
  122. /* DT configuration parameters */
  123. enum sba_version ver;
  124. /* Derived configuration parameters */
  125. u32 max_req;
  126. u32 hw_buf_size;
  127. u32 hw_resp_size;
  128. u32 max_pq_coefs;
  129. u32 max_pq_srcs;
  130. u32 max_cmd_per_req;
  131. u32 max_xor_srcs;
  132. u32 max_resp_pool_size;
  133. u32 max_cmds_pool_size;
  134. /* Maibox client and Mailbox channels */
  135. struct mbox_client client;
  136. struct mbox_chan *mchan;
  137. struct device *mbox_dev;
  138. /* DMA device and DMA channel */
  139. struct dma_device dma_dev;
  140. struct dma_chan dma_chan;
  141. /* DMA channel resources */
  142. void *resp_base;
  143. dma_addr_t resp_dma_base;
  144. void *cmds_base;
  145. dma_addr_t cmds_dma_base;
  146. spinlock_t reqs_lock;
  147. bool reqs_fence;
  148. struct list_head reqs_alloc_list;
  149. struct list_head reqs_pending_list;
  150. struct list_head reqs_active_list;
  151. struct list_head reqs_aborted_list;
  152. struct list_head reqs_free_list;
  153. /* DebugFS directory entries */
  154. struct dentry *root;
  155. struct dentry *stats;
  156. };
  157. /* ====== Command helper routines ===== */
  158. static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
  159. {
  160. cmd &= ~((u64)mask << shift);
  161. cmd |= ((u64)(val & mask) << shift);
  162. return cmd;
  163. }
  164. static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
  165. {
  166. return b0 & SBA_C_MDATA_BNUMx_MASK;
  167. }
  168. static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
  169. {
  170. return b0 & SBA_C_MDATA_BNUMx_MASK;
  171. }
  172. static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
  173. {
  174. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  175. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
  176. }
  177. static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
  178. {
  179. return (b0 & SBA_C_MDATA_BNUMx_MASK) |
  180. ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
  181. ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
  182. }
  183. /* ====== General helper routines ===== */
  184. static struct sba_request *sba_alloc_request(struct sba_device *sba)
  185. {
  186. bool found = false;
  187. unsigned long flags;
  188. struct sba_request *req = NULL;
  189. spin_lock_irqsave(&sba->reqs_lock, flags);
  190. list_for_each_entry(req, &sba->reqs_free_list, node) {
  191. if (async_tx_test_ack(&req->tx)) {
  192. list_move_tail(&req->node, &sba->reqs_alloc_list);
  193. found = true;
  194. break;
  195. }
  196. }
  197. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  198. if (!found) {
  199. /*
  200. * We have no more free requests so, we peek
  201. * mailbox channels hoping few active requests
  202. * would have completed which will create more
  203. * room for new requests.
  204. */
  205. mbox_client_peek_data(sba->mchan);
  206. return NULL;
  207. }
  208. req->flags = SBA_REQUEST_STATE_ALLOCED;
  209. req->first = req;
  210. INIT_LIST_HEAD(&req->next);
  211. atomic_set(&req->next_pending_count, 1);
  212. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  213. async_tx_ack(&req->tx);
  214. return req;
  215. }
  216. /* Note: Must be called with sba->reqs_lock held */
  217. static void _sba_pending_request(struct sba_device *sba,
  218. struct sba_request *req)
  219. {
  220. lockdep_assert_held(&sba->reqs_lock);
  221. req->flags &= ~SBA_REQUEST_STATE_MASK;
  222. req->flags |= SBA_REQUEST_STATE_PENDING;
  223. list_move_tail(&req->node, &sba->reqs_pending_list);
  224. if (list_empty(&sba->reqs_active_list))
  225. sba->reqs_fence = false;
  226. }
  227. /* Note: Must be called with sba->reqs_lock held */
  228. static bool _sba_active_request(struct sba_device *sba,
  229. struct sba_request *req)
  230. {
  231. lockdep_assert_held(&sba->reqs_lock);
  232. if (list_empty(&sba->reqs_active_list))
  233. sba->reqs_fence = false;
  234. if (sba->reqs_fence)
  235. return false;
  236. req->flags &= ~SBA_REQUEST_STATE_MASK;
  237. req->flags |= SBA_REQUEST_STATE_ACTIVE;
  238. list_move_tail(&req->node, &sba->reqs_active_list);
  239. if (req->flags & SBA_REQUEST_FENCE)
  240. sba->reqs_fence = true;
  241. return true;
  242. }
  243. /* Note: Must be called with sba->reqs_lock held */
  244. static void _sba_abort_request(struct sba_device *sba,
  245. struct sba_request *req)
  246. {
  247. lockdep_assert_held(&sba->reqs_lock);
  248. req->flags &= ~SBA_REQUEST_STATE_MASK;
  249. req->flags |= SBA_REQUEST_STATE_ABORTED;
  250. list_move_tail(&req->node, &sba->reqs_aborted_list);
  251. if (list_empty(&sba->reqs_active_list))
  252. sba->reqs_fence = false;
  253. }
  254. /* Note: Must be called with sba->reqs_lock held */
  255. static void _sba_free_request(struct sba_device *sba,
  256. struct sba_request *req)
  257. {
  258. lockdep_assert_held(&sba->reqs_lock);
  259. req->flags &= ~SBA_REQUEST_STATE_MASK;
  260. req->flags |= SBA_REQUEST_STATE_FREE;
  261. list_move_tail(&req->node, &sba->reqs_free_list);
  262. if (list_empty(&sba->reqs_active_list))
  263. sba->reqs_fence = false;
  264. }
  265. static void sba_free_chained_requests(struct sba_request *req)
  266. {
  267. unsigned long flags;
  268. struct sba_request *nreq;
  269. struct sba_device *sba = req->sba;
  270. spin_lock_irqsave(&sba->reqs_lock, flags);
  271. _sba_free_request(sba, req);
  272. list_for_each_entry(nreq, &req->next, next)
  273. _sba_free_request(sba, nreq);
  274. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  275. }
  276. static void sba_chain_request(struct sba_request *first,
  277. struct sba_request *req)
  278. {
  279. unsigned long flags;
  280. struct sba_device *sba = req->sba;
  281. spin_lock_irqsave(&sba->reqs_lock, flags);
  282. list_add_tail(&req->next, &first->next);
  283. req->first = first;
  284. atomic_inc(&first->next_pending_count);
  285. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  286. }
  287. static void sba_cleanup_nonpending_requests(struct sba_device *sba)
  288. {
  289. unsigned long flags;
  290. struct sba_request *req, *req1;
  291. spin_lock_irqsave(&sba->reqs_lock, flags);
  292. /* Freeup all alloced request */
  293. list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
  294. _sba_free_request(sba, req);
  295. /* Set all active requests as aborted */
  296. list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
  297. _sba_abort_request(sba, req);
  298. /*
  299. * Note: We expect that aborted request will be eventually
  300. * freed by sba_receive_message()
  301. */
  302. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  303. }
  304. static void sba_cleanup_pending_requests(struct sba_device *sba)
  305. {
  306. unsigned long flags;
  307. struct sba_request *req, *req1;
  308. spin_lock_irqsave(&sba->reqs_lock, flags);
  309. /* Freeup all pending request */
  310. list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
  311. _sba_free_request(sba, req);
  312. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  313. }
  314. static int sba_send_mbox_request(struct sba_device *sba,
  315. struct sba_request *req)
  316. {
  317. int ret = 0;
  318. /* Send message for the request */
  319. req->msg.error = 0;
  320. ret = mbox_send_message(sba->mchan, &req->msg);
  321. if (ret < 0) {
  322. dev_err(sba->dev, "send message failed with error %d", ret);
  323. return ret;
  324. }
  325. /* Check error returned by mailbox controller */
  326. ret = req->msg.error;
  327. if (ret < 0) {
  328. dev_err(sba->dev, "message error %d", ret);
  329. }
  330. /* Signal txdone for mailbox channel */
  331. mbox_client_txdone(sba->mchan, ret);
  332. return ret;
  333. }
  334. /* Note: Must be called with sba->reqs_lock held */
  335. static void _sba_process_pending_requests(struct sba_device *sba)
  336. {
  337. int ret;
  338. u32 count;
  339. struct sba_request *req;
  340. /* Process few pending requests */
  341. count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
  342. while (!list_empty(&sba->reqs_pending_list) && count) {
  343. /* Get the first pending request */
  344. req = list_first_entry(&sba->reqs_pending_list,
  345. struct sba_request, node);
  346. /* Try to make request active */
  347. if (!_sba_active_request(sba, req))
  348. break;
  349. /* Send request to mailbox channel */
  350. ret = sba_send_mbox_request(sba, req);
  351. if (ret < 0) {
  352. _sba_pending_request(sba, req);
  353. break;
  354. }
  355. count--;
  356. }
  357. }
  358. static void sba_process_received_request(struct sba_device *sba,
  359. struct sba_request *req)
  360. {
  361. unsigned long flags;
  362. struct dma_async_tx_descriptor *tx;
  363. struct sba_request *nreq, *first = req->first;
  364. /* Process only after all chained requests are received */
  365. if (!atomic_dec_return(&first->next_pending_count)) {
  366. tx = &first->tx;
  367. WARN_ON(tx->cookie < 0);
  368. if (tx->cookie > 0) {
  369. spin_lock_irqsave(&sba->reqs_lock, flags);
  370. dma_cookie_complete(tx);
  371. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  372. dmaengine_desc_get_callback_invoke(tx, NULL);
  373. dma_descriptor_unmap(tx);
  374. tx->callback = NULL;
  375. tx->callback_result = NULL;
  376. }
  377. dma_run_dependencies(tx);
  378. spin_lock_irqsave(&sba->reqs_lock, flags);
  379. /* Free all requests chained to first request */
  380. list_for_each_entry(nreq, &first->next, next)
  381. _sba_free_request(sba, nreq);
  382. INIT_LIST_HEAD(&first->next);
  383. /* Free the first request */
  384. _sba_free_request(sba, first);
  385. /* Process pending requests */
  386. _sba_process_pending_requests(sba);
  387. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  388. }
  389. }
  390. static void sba_write_stats_in_seqfile(struct sba_device *sba,
  391. struct seq_file *file)
  392. {
  393. unsigned long flags;
  394. struct sba_request *req;
  395. u32 free_count = 0, alloced_count = 0;
  396. u32 pending_count = 0, active_count = 0, aborted_count = 0;
  397. spin_lock_irqsave(&sba->reqs_lock, flags);
  398. list_for_each_entry(req, &sba->reqs_free_list, node)
  399. if (async_tx_test_ack(&req->tx))
  400. free_count++;
  401. list_for_each_entry(req, &sba->reqs_alloc_list, node)
  402. alloced_count++;
  403. list_for_each_entry(req, &sba->reqs_pending_list, node)
  404. pending_count++;
  405. list_for_each_entry(req, &sba->reqs_active_list, node)
  406. active_count++;
  407. list_for_each_entry(req, &sba->reqs_aborted_list, node)
  408. aborted_count++;
  409. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  410. seq_printf(file, "maximum requests = %d\n", sba->max_req);
  411. seq_printf(file, "free requests = %d\n", free_count);
  412. seq_printf(file, "alloced requests = %d\n", alloced_count);
  413. seq_printf(file, "pending requests = %d\n", pending_count);
  414. seq_printf(file, "active requests = %d\n", active_count);
  415. seq_printf(file, "aborted requests = %d\n", aborted_count);
  416. }
  417. /* ====== DMAENGINE callbacks ===== */
  418. static void sba_free_chan_resources(struct dma_chan *dchan)
  419. {
  420. /*
  421. * Channel resources are pre-alloced so we just free-up
  422. * whatever we can so that we can re-use pre-alloced
  423. * channel resources next time.
  424. */
  425. sba_cleanup_nonpending_requests(to_sba_device(dchan));
  426. }
  427. static int sba_device_terminate_all(struct dma_chan *dchan)
  428. {
  429. /* Cleanup all pending requests */
  430. sba_cleanup_pending_requests(to_sba_device(dchan));
  431. return 0;
  432. }
  433. static void sba_issue_pending(struct dma_chan *dchan)
  434. {
  435. unsigned long flags;
  436. struct sba_device *sba = to_sba_device(dchan);
  437. /* Process pending requests */
  438. spin_lock_irqsave(&sba->reqs_lock, flags);
  439. _sba_process_pending_requests(sba);
  440. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  441. }
  442. static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
  443. {
  444. unsigned long flags;
  445. dma_cookie_t cookie;
  446. struct sba_device *sba;
  447. struct sba_request *req, *nreq;
  448. if (unlikely(!tx))
  449. return -EINVAL;
  450. sba = to_sba_device(tx->chan);
  451. req = to_sba_request(tx);
  452. /* Assign cookie and mark all chained requests pending */
  453. spin_lock_irqsave(&sba->reqs_lock, flags);
  454. cookie = dma_cookie_assign(tx);
  455. _sba_pending_request(sba, req);
  456. list_for_each_entry(nreq, &req->next, next)
  457. _sba_pending_request(sba, nreq);
  458. spin_unlock_irqrestore(&sba->reqs_lock, flags);
  459. return cookie;
  460. }
  461. static enum dma_status sba_tx_status(struct dma_chan *dchan,
  462. dma_cookie_t cookie,
  463. struct dma_tx_state *txstate)
  464. {
  465. enum dma_status ret;
  466. struct sba_device *sba = to_sba_device(dchan);
  467. ret = dma_cookie_status(dchan, cookie, txstate);
  468. if (ret == DMA_COMPLETE)
  469. return ret;
  470. mbox_client_peek_data(sba->mchan);
  471. return dma_cookie_status(dchan, cookie, txstate);
  472. }
  473. static void sba_fillup_interrupt_msg(struct sba_request *req,
  474. struct brcm_sba_command *cmds,
  475. struct brcm_message *msg)
  476. {
  477. u64 cmd;
  478. u32 c_mdata;
  479. dma_addr_t resp_dma = req->tx.phys;
  480. struct brcm_sba_command *cmdsp = cmds;
  481. /* Type-B command to load dummy data into buf0 */
  482. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  483. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  484. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  485. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  486. c_mdata = sba_cmd_load_c_mdata(0);
  487. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  488. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  489. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  490. SBA_CMD_SHIFT, SBA_CMD_MASK);
  491. cmdsp->cmd = cmd;
  492. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  493. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  494. cmdsp->data = resp_dma;
  495. cmdsp->data_len = req->sba->hw_resp_size;
  496. cmdsp++;
  497. /* Type-A command to write buf0 to dummy location */
  498. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  499. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  500. cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
  501. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  502. cmd = sba_cmd_enc(cmd, 0x1,
  503. SBA_RESP_SHIFT, SBA_RESP_MASK);
  504. c_mdata = sba_cmd_write_c_mdata(0);
  505. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  506. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  507. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  508. SBA_CMD_SHIFT, SBA_CMD_MASK);
  509. cmdsp->cmd = cmd;
  510. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  511. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  512. if (req->sba->hw_resp_size) {
  513. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  514. cmdsp->resp = resp_dma;
  515. cmdsp->resp_len = req->sba->hw_resp_size;
  516. }
  517. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  518. cmdsp->data = resp_dma;
  519. cmdsp->data_len = req->sba->hw_resp_size;
  520. cmdsp++;
  521. /* Fillup brcm_message */
  522. msg->type = BRCM_MESSAGE_SBA;
  523. msg->sba.cmds = cmds;
  524. msg->sba.cmds_count = cmdsp - cmds;
  525. msg->ctx = req;
  526. msg->error = 0;
  527. }
  528. static struct dma_async_tx_descriptor *
  529. sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
  530. {
  531. struct sba_request *req = NULL;
  532. struct sba_device *sba = to_sba_device(dchan);
  533. /* Alloc new request */
  534. req = sba_alloc_request(sba);
  535. if (!req)
  536. return NULL;
  537. /*
  538. * Force fence so that no requests are submitted
  539. * until DMA callback for this request is invoked.
  540. */
  541. req->flags |= SBA_REQUEST_FENCE;
  542. /* Fillup request message */
  543. sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
  544. /* Init async_tx descriptor */
  545. req->tx.flags = flags;
  546. req->tx.cookie = -EBUSY;
  547. return &req->tx;
  548. }
  549. static void sba_fillup_memcpy_msg(struct sba_request *req,
  550. struct brcm_sba_command *cmds,
  551. struct brcm_message *msg,
  552. dma_addr_t msg_offset, size_t msg_len,
  553. dma_addr_t dst, dma_addr_t src)
  554. {
  555. u64 cmd;
  556. u32 c_mdata;
  557. dma_addr_t resp_dma = req->tx.phys;
  558. struct brcm_sba_command *cmdsp = cmds;
  559. /* Type-B command to load data into buf0 */
  560. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  561. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  562. cmd = sba_cmd_enc(cmd, msg_len,
  563. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  564. c_mdata = sba_cmd_load_c_mdata(0);
  565. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  566. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  567. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  568. SBA_CMD_SHIFT, SBA_CMD_MASK);
  569. cmdsp->cmd = cmd;
  570. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  571. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  572. cmdsp->data = src + msg_offset;
  573. cmdsp->data_len = msg_len;
  574. cmdsp++;
  575. /* Type-A command to write buf0 */
  576. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  577. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  578. cmd = sba_cmd_enc(cmd, msg_len,
  579. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  580. cmd = sba_cmd_enc(cmd, 0x1,
  581. SBA_RESP_SHIFT, SBA_RESP_MASK);
  582. c_mdata = sba_cmd_write_c_mdata(0);
  583. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  584. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  585. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  586. SBA_CMD_SHIFT, SBA_CMD_MASK);
  587. cmdsp->cmd = cmd;
  588. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  589. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  590. if (req->sba->hw_resp_size) {
  591. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  592. cmdsp->resp = resp_dma;
  593. cmdsp->resp_len = req->sba->hw_resp_size;
  594. }
  595. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  596. cmdsp->data = dst + msg_offset;
  597. cmdsp->data_len = msg_len;
  598. cmdsp++;
  599. /* Fillup brcm_message */
  600. msg->type = BRCM_MESSAGE_SBA;
  601. msg->sba.cmds = cmds;
  602. msg->sba.cmds_count = cmdsp - cmds;
  603. msg->ctx = req;
  604. msg->error = 0;
  605. }
  606. static struct sba_request *
  607. sba_prep_dma_memcpy_req(struct sba_device *sba,
  608. dma_addr_t off, dma_addr_t dst, dma_addr_t src,
  609. size_t len, unsigned long flags)
  610. {
  611. struct sba_request *req = NULL;
  612. /* Alloc new request */
  613. req = sba_alloc_request(sba);
  614. if (!req)
  615. return NULL;
  616. if (flags & DMA_PREP_FENCE)
  617. req->flags |= SBA_REQUEST_FENCE;
  618. /* Fillup request message */
  619. sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
  620. off, len, dst, src);
  621. /* Init async_tx descriptor */
  622. req->tx.flags = flags;
  623. req->tx.cookie = -EBUSY;
  624. return req;
  625. }
  626. static struct dma_async_tx_descriptor *
  627. sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
  628. size_t len, unsigned long flags)
  629. {
  630. size_t req_len;
  631. dma_addr_t off = 0;
  632. struct sba_device *sba = to_sba_device(dchan);
  633. struct sba_request *first = NULL, *req;
  634. /* Create chained requests where each request is upto hw_buf_size */
  635. while (len) {
  636. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  637. req = sba_prep_dma_memcpy_req(sba, off, dst, src,
  638. req_len, flags);
  639. if (!req) {
  640. if (first)
  641. sba_free_chained_requests(first);
  642. return NULL;
  643. }
  644. if (first)
  645. sba_chain_request(first, req);
  646. else
  647. first = req;
  648. off += req_len;
  649. len -= req_len;
  650. }
  651. return (first) ? &first->tx : NULL;
  652. }
  653. static void sba_fillup_xor_msg(struct sba_request *req,
  654. struct brcm_sba_command *cmds,
  655. struct brcm_message *msg,
  656. dma_addr_t msg_offset, size_t msg_len,
  657. dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
  658. {
  659. u64 cmd;
  660. u32 c_mdata;
  661. unsigned int i;
  662. dma_addr_t resp_dma = req->tx.phys;
  663. struct brcm_sba_command *cmdsp = cmds;
  664. /* Type-B command to load data into buf0 */
  665. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  666. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  667. cmd = sba_cmd_enc(cmd, msg_len,
  668. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  669. c_mdata = sba_cmd_load_c_mdata(0);
  670. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  671. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  672. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  673. SBA_CMD_SHIFT, SBA_CMD_MASK);
  674. cmdsp->cmd = cmd;
  675. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  676. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  677. cmdsp->data = src[0] + msg_offset;
  678. cmdsp->data_len = msg_len;
  679. cmdsp++;
  680. /* Type-B commands to xor data with buf0 and put it back in buf0 */
  681. for (i = 1; i < src_cnt; i++) {
  682. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  683. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  684. cmd = sba_cmd_enc(cmd, msg_len,
  685. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  686. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  687. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  688. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  689. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  690. SBA_CMD_SHIFT, SBA_CMD_MASK);
  691. cmdsp->cmd = cmd;
  692. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  693. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  694. cmdsp->data = src[i] + msg_offset;
  695. cmdsp->data_len = msg_len;
  696. cmdsp++;
  697. }
  698. /* Type-A command to write buf0 */
  699. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  700. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  701. cmd = sba_cmd_enc(cmd, msg_len,
  702. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  703. cmd = sba_cmd_enc(cmd, 0x1,
  704. SBA_RESP_SHIFT, SBA_RESP_MASK);
  705. c_mdata = sba_cmd_write_c_mdata(0);
  706. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  707. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  708. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  709. SBA_CMD_SHIFT, SBA_CMD_MASK);
  710. cmdsp->cmd = cmd;
  711. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  712. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  713. if (req->sba->hw_resp_size) {
  714. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  715. cmdsp->resp = resp_dma;
  716. cmdsp->resp_len = req->sba->hw_resp_size;
  717. }
  718. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  719. cmdsp->data = dst + msg_offset;
  720. cmdsp->data_len = msg_len;
  721. cmdsp++;
  722. /* Fillup brcm_message */
  723. msg->type = BRCM_MESSAGE_SBA;
  724. msg->sba.cmds = cmds;
  725. msg->sba.cmds_count = cmdsp - cmds;
  726. msg->ctx = req;
  727. msg->error = 0;
  728. }
  729. static struct sba_request *
  730. sba_prep_dma_xor_req(struct sba_device *sba,
  731. dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
  732. u32 src_cnt, size_t len, unsigned long flags)
  733. {
  734. struct sba_request *req = NULL;
  735. /* Alloc new request */
  736. req = sba_alloc_request(sba);
  737. if (!req)
  738. return NULL;
  739. if (flags & DMA_PREP_FENCE)
  740. req->flags |= SBA_REQUEST_FENCE;
  741. /* Fillup request message */
  742. sba_fillup_xor_msg(req, req->cmds, &req->msg,
  743. off, len, dst, src, src_cnt);
  744. /* Init async_tx descriptor */
  745. req->tx.flags = flags;
  746. req->tx.cookie = -EBUSY;
  747. return req;
  748. }
  749. static struct dma_async_tx_descriptor *
  750. sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
  751. u32 src_cnt, size_t len, unsigned long flags)
  752. {
  753. size_t req_len;
  754. dma_addr_t off = 0;
  755. struct sba_device *sba = to_sba_device(dchan);
  756. struct sba_request *first = NULL, *req;
  757. /* Sanity checks */
  758. if (unlikely(src_cnt > sba->max_xor_srcs))
  759. return NULL;
  760. /* Create chained requests where each request is upto hw_buf_size */
  761. while (len) {
  762. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  763. req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
  764. req_len, flags);
  765. if (!req) {
  766. if (first)
  767. sba_free_chained_requests(first);
  768. return NULL;
  769. }
  770. if (first)
  771. sba_chain_request(first, req);
  772. else
  773. first = req;
  774. off += req_len;
  775. len -= req_len;
  776. }
  777. return (first) ? &first->tx : NULL;
  778. }
  779. static void sba_fillup_pq_msg(struct sba_request *req,
  780. bool pq_continue,
  781. struct brcm_sba_command *cmds,
  782. struct brcm_message *msg,
  783. dma_addr_t msg_offset, size_t msg_len,
  784. dma_addr_t *dst_p, dma_addr_t *dst_q,
  785. const u8 *scf, dma_addr_t *src, u32 src_cnt)
  786. {
  787. u64 cmd;
  788. u32 c_mdata;
  789. unsigned int i;
  790. dma_addr_t resp_dma = req->tx.phys;
  791. struct brcm_sba_command *cmdsp = cmds;
  792. if (pq_continue) {
  793. /* Type-B command to load old P into buf0 */
  794. if (dst_p) {
  795. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  796. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  797. cmd = sba_cmd_enc(cmd, msg_len,
  798. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  799. c_mdata = sba_cmd_load_c_mdata(0);
  800. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  801. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  802. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  803. SBA_CMD_SHIFT, SBA_CMD_MASK);
  804. cmdsp->cmd = cmd;
  805. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  806. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  807. cmdsp->data = *dst_p + msg_offset;
  808. cmdsp->data_len = msg_len;
  809. cmdsp++;
  810. }
  811. /* Type-B command to load old Q into buf1 */
  812. if (dst_q) {
  813. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  814. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  815. cmd = sba_cmd_enc(cmd, msg_len,
  816. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  817. c_mdata = sba_cmd_load_c_mdata(1);
  818. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  819. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  820. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  821. SBA_CMD_SHIFT, SBA_CMD_MASK);
  822. cmdsp->cmd = cmd;
  823. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  824. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  825. cmdsp->data = *dst_q + msg_offset;
  826. cmdsp->data_len = msg_len;
  827. cmdsp++;
  828. }
  829. } else {
  830. /* Type-A command to zero all buffers */
  831. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  832. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  833. cmd = sba_cmd_enc(cmd, msg_len,
  834. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  835. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  836. SBA_CMD_SHIFT, SBA_CMD_MASK);
  837. cmdsp->cmd = cmd;
  838. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  839. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  840. cmdsp++;
  841. }
  842. /* Type-B commands for generate P onto buf0 and Q onto buf1 */
  843. for (i = 0; i < src_cnt; i++) {
  844. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  845. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  846. cmd = sba_cmd_enc(cmd, msg_len,
  847. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  848. c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
  849. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  850. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  851. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  852. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  853. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
  854. SBA_CMD_SHIFT, SBA_CMD_MASK);
  855. cmdsp->cmd = cmd;
  856. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  857. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  858. cmdsp->data = src[i] + msg_offset;
  859. cmdsp->data_len = msg_len;
  860. cmdsp++;
  861. }
  862. /* Type-A command to write buf0 */
  863. if (dst_p) {
  864. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  865. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  866. cmd = sba_cmd_enc(cmd, msg_len,
  867. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  868. cmd = sba_cmd_enc(cmd, 0x1,
  869. SBA_RESP_SHIFT, SBA_RESP_MASK);
  870. c_mdata = sba_cmd_write_c_mdata(0);
  871. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  872. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  873. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  874. SBA_CMD_SHIFT, SBA_CMD_MASK);
  875. cmdsp->cmd = cmd;
  876. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  877. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  878. if (req->sba->hw_resp_size) {
  879. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  880. cmdsp->resp = resp_dma;
  881. cmdsp->resp_len = req->sba->hw_resp_size;
  882. }
  883. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  884. cmdsp->data = *dst_p + msg_offset;
  885. cmdsp->data_len = msg_len;
  886. cmdsp++;
  887. }
  888. /* Type-A command to write buf1 */
  889. if (dst_q) {
  890. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  891. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  892. cmd = sba_cmd_enc(cmd, msg_len,
  893. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  894. cmd = sba_cmd_enc(cmd, 0x1,
  895. SBA_RESP_SHIFT, SBA_RESP_MASK);
  896. c_mdata = sba_cmd_write_c_mdata(1);
  897. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  898. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  899. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  900. SBA_CMD_SHIFT, SBA_CMD_MASK);
  901. cmdsp->cmd = cmd;
  902. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  903. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  904. if (req->sba->hw_resp_size) {
  905. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  906. cmdsp->resp = resp_dma;
  907. cmdsp->resp_len = req->sba->hw_resp_size;
  908. }
  909. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  910. cmdsp->data = *dst_q + msg_offset;
  911. cmdsp->data_len = msg_len;
  912. cmdsp++;
  913. }
  914. /* Fillup brcm_message */
  915. msg->type = BRCM_MESSAGE_SBA;
  916. msg->sba.cmds = cmds;
  917. msg->sba.cmds_count = cmdsp - cmds;
  918. msg->ctx = req;
  919. msg->error = 0;
  920. }
  921. static struct sba_request *
  922. sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
  923. dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
  924. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  925. {
  926. struct sba_request *req = NULL;
  927. /* Alloc new request */
  928. req = sba_alloc_request(sba);
  929. if (!req)
  930. return NULL;
  931. if (flags & DMA_PREP_FENCE)
  932. req->flags |= SBA_REQUEST_FENCE;
  933. /* Fillup request messages */
  934. sba_fillup_pq_msg(req, dmaf_continue(flags),
  935. req->cmds, &req->msg,
  936. off, len, dst_p, dst_q, scf, src, src_cnt);
  937. /* Init async_tx descriptor */
  938. req->tx.flags = flags;
  939. req->tx.cookie = -EBUSY;
  940. return req;
  941. }
  942. static void sba_fillup_pq_single_msg(struct sba_request *req,
  943. bool pq_continue,
  944. struct brcm_sba_command *cmds,
  945. struct brcm_message *msg,
  946. dma_addr_t msg_offset, size_t msg_len,
  947. dma_addr_t *dst_p, dma_addr_t *dst_q,
  948. dma_addr_t src, u8 scf)
  949. {
  950. u64 cmd;
  951. u32 c_mdata;
  952. u8 pos, dpos = raid6_gflog[scf];
  953. dma_addr_t resp_dma = req->tx.phys;
  954. struct brcm_sba_command *cmdsp = cmds;
  955. if (!dst_p)
  956. goto skip_p;
  957. if (pq_continue) {
  958. /* Type-B command to load old P into buf0 */
  959. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  960. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  961. cmd = sba_cmd_enc(cmd, msg_len,
  962. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  963. c_mdata = sba_cmd_load_c_mdata(0);
  964. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  965. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  966. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  967. SBA_CMD_SHIFT, SBA_CMD_MASK);
  968. cmdsp->cmd = cmd;
  969. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  970. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  971. cmdsp->data = *dst_p + msg_offset;
  972. cmdsp->data_len = msg_len;
  973. cmdsp++;
  974. /*
  975. * Type-B commands to xor data with buf0 and put it
  976. * back in buf0
  977. */
  978. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  979. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  980. cmd = sba_cmd_enc(cmd, msg_len,
  981. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  982. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  983. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  984. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  985. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  986. SBA_CMD_SHIFT, SBA_CMD_MASK);
  987. cmdsp->cmd = cmd;
  988. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  989. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  990. cmdsp->data = src + msg_offset;
  991. cmdsp->data_len = msg_len;
  992. cmdsp++;
  993. } else {
  994. /* Type-B command to load old P into buf0 */
  995. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  996. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  997. cmd = sba_cmd_enc(cmd, msg_len,
  998. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  999. c_mdata = sba_cmd_load_c_mdata(0);
  1000. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1001. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1002. cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
  1003. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1004. cmdsp->cmd = cmd;
  1005. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1006. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1007. cmdsp->data = src + msg_offset;
  1008. cmdsp->data_len = msg_len;
  1009. cmdsp++;
  1010. }
  1011. /* Type-A command to write buf0 */
  1012. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1013. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1014. cmd = sba_cmd_enc(cmd, msg_len,
  1015. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1016. cmd = sba_cmd_enc(cmd, 0x1,
  1017. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1018. c_mdata = sba_cmd_write_c_mdata(0);
  1019. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1020. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1021. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1022. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1023. cmdsp->cmd = cmd;
  1024. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1025. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1026. if (req->sba->hw_resp_size) {
  1027. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1028. cmdsp->resp = resp_dma;
  1029. cmdsp->resp_len = req->sba->hw_resp_size;
  1030. }
  1031. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1032. cmdsp->data = *dst_p + msg_offset;
  1033. cmdsp->data_len = msg_len;
  1034. cmdsp++;
  1035. skip_p:
  1036. if (!dst_q)
  1037. goto skip_q;
  1038. /* Type-A command to zero all buffers */
  1039. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1040. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1041. cmd = sba_cmd_enc(cmd, msg_len,
  1042. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1043. cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
  1044. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1045. cmdsp->cmd = cmd;
  1046. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1047. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1048. cmdsp++;
  1049. if (dpos == 255)
  1050. goto skip_q_computation;
  1051. pos = (dpos < req->sba->max_pq_coefs) ?
  1052. dpos : (req->sba->max_pq_coefs - 1);
  1053. /*
  1054. * Type-B command to generate initial Q from data
  1055. * and store output into buf0
  1056. */
  1057. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1058. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1059. cmd = sba_cmd_enc(cmd, msg_len,
  1060. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1061. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
  1062. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1063. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1064. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1065. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1066. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1067. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1068. cmdsp->cmd = cmd;
  1069. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1070. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1071. cmdsp->data = src + msg_offset;
  1072. cmdsp->data_len = msg_len;
  1073. cmdsp++;
  1074. dpos -= pos;
  1075. /* Multiple Type-A command to generate final Q */
  1076. while (dpos) {
  1077. pos = (dpos < req->sba->max_pq_coefs) ?
  1078. dpos : (req->sba->max_pq_coefs - 1);
  1079. /*
  1080. * Type-A command to generate Q with buf0 and
  1081. * buf1 store result in buf0
  1082. */
  1083. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1084. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1085. cmd = sba_cmd_enc(cmd, msg_len,
  1086. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1087. c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
  1088. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1089. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1090. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
  1091. SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
  1092. cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
  1093. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1094. cmdsp->cmd = cmd;
  1095. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1096. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1097. cmdsp++;
  1098. dpos -= pos;
  1099. }
  1100. skip_q_computation:
  1101. if (pq_continue) {
  1102. /*
  1103. * Type-B command to XOR previous output with
  1104. * buf0 and write it into buf0
  1105. */
  1106. cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
  1107. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1108. cmd = sba_cmd_enc(cmd, msg_len,
  1109. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1110. c_mdata = sba_cmd_xor_c_mdata(0, 0);
  1111. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1112. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1113. cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
  1114. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1115. cmdsp->cmd = cmd;
  1116. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1117. cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
  1118. cmdsp->data = *dst_q + msg_offset;
  1119. cmdsp->data_len = msg_len;
  1120. cmdsp++;
  1121. }
  1122. /* Type-A command to write buf0 */
  1123. cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
  1124. SBA_TYPE_SHIFT, SBA_TYPE_MASK);
  1125. cmd = sba_cmd_enc(cmd, msg_len,
  1126. SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
  1127. cmd = sba_cmd_enc(cmd, 0x1,
  1128. SBA_RESP_SHIFT, SBA_RESP_MASK);
  1129. c_mdata = sba_cmd_write_c_mdata(0);
  1130. cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
  1131. SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
  1132. cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
  1133. SBA_CMD_SHIFT, SBA_CMD_MASK);
  1134. cmdsp->cmd = cmd;
  1135. *cmdsp->cmd_dma = cpu_to_le64(cmd);
  1136. cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
  1137. if (req->sba->hw_resp_size) {
  1138. cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
  1139. cmdsp->resp = resp_dma;
  1140. cmdsp->resp_len = req->sba->hw_resp_size;
  1141. }
  1142. cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
  1143. cmdsp->data = *dst_q + msg_offset;
  1144. cmdsp->data_len = msg_len;
  1145. cmdsp++;
  1146. skip_q:
  1147. /* Fillup brcm_message */
  1148. msg->type = BRCM_MESSAGE_SBA;
  1149. msg->sba.cmds = cmds;
  1150. msg->sba.cmds_count = cmdsp - cmds;
  1151. msg->ctx = req;
  1152. msg->error = 0;
  1153. }
  1154. static struct sba_request *
  1155. sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
  1156. dma_addr_t *dst_p, dma_addr_t *dst_q,
  1157. dma_addr_t src, u8 scf, size_t len,
  1158. unsigned long flags)
  1159. {
  1160. struct sba_request *req = NULL;
  1161. /* Alloc new request */
  1162. req = sba_alloc_request(sba);
  1163. if (!req)
  1164. return NULL;
  1165. if (flags & DMA_PREP_FENCE)
  1166. req->flags |= SBA_REQUEST_FENCE;
  1167. /* Fillup request messages */
  1168. sba_fillup_pq_single_msg(req, dmaf_continue(flags),
  1169. req->cmds, &req->msg, off, len,
  1170. dst_p, dst_q, src, scf);
  1171. /* Init async_tx descriptor */
  1172. req->tx.flags = flags;
  1173. req->tx.cookie = -EBUSY;
  1174. return req;
  1175. }
  1176. static struct dma_async_tx_descriptor *
  1177. sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
  1178. u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
  1179. {
  1180. u32 i, dst_q_index;
  1181. size_t req_len;
  1182. bool slow = false;
  1183. dma_addr_t off = 0;
  1184. dma_addr_t *dst_p = NULL, *dst_q = NULL;
  1185. struct sba_device *sba = to_sba_device(dchan);
  1186. struct sba_request *first = NULL, *req;
  1187. /* Sanity checks */
  1188. if (unlikely(src_cnt > sba->max_pq_srcs))
  1189. return NULL;
  1190. for (i = 0; i < src_cnt; i++)
  1191. if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
  1192. slow = true;
  1193. /* Figure-out P and Q destination addresses */
  1194. if (!(flags & DMA_PREP_PQ_DISABLE_P))
  1195. dst_p = &dst[0];
  1196. if (!(flags & DMA_PREP_PQ_DISABLE_Q))
  1197. dst_q = &dst[1];
  1198. /* Create chained requests where each request is upto hw_buf_size */
  1199. while (len) {
  1200. req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
  1201. if (slow) {
  1202. dst_q_index = src_cnt;
  1203. if (dst_q) {
  1204. for (i = 0; i < src_cnt; i++) {
  1205. if (*dst_q == src[i]) {
  1206. dst_q_index = i;
  1207. break;
  1208. }
  1209. }
  1210. }
  1211. if (dst_q_index < src_cnt) {
  1212. i = dst_q_index;
  1213. req = sba_prep_dma_pq_single_req(sba,
  1214. off, dst_p, dst_q, src[i], scf[i],
  1215. req_len, flags | DMA_PREP_FENCE);
  1216. if (!req)
  1217. goto fail;
  1218. if (first)
  1219. sba_chain_request(first, req);
  1220. else
  1221. first = req;
  1222. flags |= DMA_PREP_CONTINUE;
  1223. }
  1224. for (i = 0; i < src_cnt; i++) {
  1225. if (dst_q_index == i)
  1226. continue;
  1227. req = sba_prep_dma_pq_single_req(sba,
  1228. off, dst_p, dst_q, src[i], scf[i],
  1229. req_len, flags | DMA_PREP_FENCE);
  1230. if (!req)
  1231. goto fail;
  1232. if (first)
  1233. sba_chain_request(first, req);
  1234. else
  1235. first = req;
  1236. flags |= DMA_PREP_CONTINUE;
  1237. }
  1238. } else {
  1239. req = sba_prep_dma_pq_req(sba, off,
  1240. dst_p, dst_q, src, src_cnt,
  1241. scf, req_len, flags);
  1242. if (!req)
  1243. goto fail;
  1244. if (first)
  1245. sba_chain_request(first, req);
  1246. else
  1247. first = req;
  1248. }
  1249. off += req_len;
  1250. len -= req_len;
  1251. }
  1252. return (first) ? &first->tx : NULL;
  1253. fail:
  1254. if (first)
  1255. sba_free_chained_requests(first);
  1256. return NULL;
  1257. }
  1258. /* ====== Mailbox callbacks ===== */
  1259. static void sba_receive_message(struct mbox_client *cl, void *msg)
  1260. {
  1261. struct brcm_message *m = msg;
  1262. struct sba_request *req = m->ctx;
  1263. struct sba_device *sba = req->sba;
  1264. /* Error count if message has error */
  1265. if (m->error < 0)
  1266. dev_err(sba->dev, "%s got message with error %d",
  1267. dma_chan_name(&sba->dma_chan), m->error);
  1268. /* Process received request */
  1269. sba_process_received_request(sba, req);
  1270. }
  1271. /* ====== Debugfs callbacks ====== */
  1272. static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
  1273. {
  1274. struct platform_device *pdev = to_platform_device(file->private);
  1275. struct sba_device *sba = platform_get_drvdata(pdev);
  1276. /* Write stats in file */
  1277. sba_write_stats_in_seqfile(sba, file);
  1278. return 0;
  1279. }
  1280. /* ====== Platform driver routines ===== */
  1281. static int sba_prealloc_channel_resources(struct sba_device *sba)
  1282. {
  1283. int i, j, ret = 0;
  1284. struct sba_request *req = NULL;
  1285. sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
  1286. sba->max_resp_pool_size,
  1287. &sba->resp_dma_base, GFP_KERNEL);
  1288. if (!sba->resp_base)
  1289. return -ENOMEM;
  1290. sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
  1291. sba->max_cmds_pool_size,
  1292. &sba->cmds_dma_base, GFP_KERNEL);
  1293. if (!sba->cmds_base) {
  1294. ret = -ENOMEM;
  1295. goto fail_free_resp_pool;
  1296. }
  1297. spin_lock_init(&sba->reqs_lock);
  1298. sba->reqs_fence = false;
  1299. INIT_LIST_HEAD(&sba->reqs_alloc_list);
  1300. INIT_LIST_HEAD(&sba->reqs_pending_list);
  1301. INIT_LIST_HEAD(&sba->reqs_active_list);
  1302. INIT_LIST_HEAD(&sba->reqs_aborted_list);
  1303. INIT_LIST_HEAD(&sba->reqs_free_list);
  1304. for (i = 0; i < sba->max_req; i++) {
  1305. req = devm_kzalloc(sba->dev,
  1306. sizeof(*req) +
  1307. sba->max_cmd_per_req * sizeof(req->cmds[0]),
  1308. GFP_KERNEL);
  1309. if (!req) {
  1310. ret = -ENOMEM;
  1311. goto fail_free_cmds_pool;
  1312. }
  1313. INIT_LIST_HEAD(&req->node);
  1314. req->sba = sba;
  1315. req->flags = SBA_REQUEST_STATE_FREE;
  1316. INIT_LIST_HEAD(&req->next);
  1317. atomic_set(&req->next_pending_count, 0);
  1318. for (j = 0; j < sba->max_cmd_per_req; j++) {
  1319. req->cmds[j].cmd = 0;
  1320. req->cmds[j].cmd_dma = sba->cmds_base +
  1321. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1322. req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
  1323. (i * sba->max_cmd_per_req + j) * sizeof(u64);
  1324. req->cmds[j].flags = 0;
  1325. }
  1326. memset(&req->msg, 0, sizeof(req->msg));
  1327. dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
  1328. async_tx_ack(&req->tx);
  1329. req->tx.tx_submit = sba_tx_submit;
  1330. req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
  1331. list_add_tail(&req->node, &sba->reqs_free_list);
  1332. }
  1333. return 0;
  1334. fail_free_cmds_pool:
  1335. dma_free_coherent(sba->mbox_dev,
  1336. sba->max_cmds_pool_size,
  1337. sba->cmds_base, sba->cmds_dma_base);
  1338. fail_free_resp_pool:
  1339. dma_free_coherent(sba->mbox_dev,
  1340. sba->max_resp_pool_size,
  1341. sba->resp_base, sba->resp_dma_base);
  1342. return ret;
  1343. }
  1344. static void sba_freeup_channel_resources(struct sba_device *sba)
  1345. {
  1346. dmaengine_terminate_all(&sba->dma_chan);
  1347. dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
  1348. sba->cmds_base, sba->cmds_dma_base);
  1349. dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
  1350. sba->resp_base, sba->resp_dma_base);
  1351. sba->resp_base = NULL;
  1352. sba->resp_dma_base = 0;
  1353. }
  1354. static int sba_async_register(struct sba_device *sba)
  1355. {
  1356. int ret;
  1357. struct dma_device *dma_dev = &sba->dma_dev;
  1358. /* Initialize DMA channel cookie */
  1359. sba->dma_chan.device = dma_dev;
  1360. dma_cookie_init(&sba->dma_chan);
  1361. /* Initialize DMA device capability mask */
  1362. dma_cap_zero(dma_dev->cap_mask);
  1363. dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
  1364. dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
  1365. dma_cap_set(DMA_XOR, dma_dev->cap_mask);
  1366. dma_cap_set(DMA_PQ, dma_dev->cap_mask);
  1367. /*
  1368. * Set mailbox channel device as the base device of
  1369. * our dma_device because the actual memory accesses
  1370. * will be done by mailbox controller
  1371. */
  1372. dma_dev->dev = sba->mbox_dev;
  1373. /* Set base prep routines */
  1374. dma_dev->device_free_chan_resources = sba_free_chan_resources;
  1375. dma_dev->device_terminate_all = sba_device_terminate_all;
  1376. dma_dev->device_issue_pending = sba_issue_pending;
  1377. dma_dev->device_tx_status = sba_tx_status;
  1378. /* Set interrupt routine */
  1379. if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
  1380. dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
  1381. /* Set memcpy routine */
  1382. if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
  1383. dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
  1384. /* Set xor routine and capability */
  1385. if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
  1386. dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
  1387. dma_dev->max_xor = sba->max_xor_srcs;
  1388. }
  1389. /* Set pq routine and capability */
  1390. if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
  1391. dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
  1392. dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
  1393. }
  1394. /* Initialize DMA device channel list */
  1395. INIT_LIST_HEAD(&dma_dev->channels);
  1396. list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
  1397. /* Register with Linux async DMA framework*/
  1398. ret = dma_async_device_register(dma_dev);
  1399. if (ret) {
  1400. dev_err(sba->dev, "async device register error %d", ret);
  1401. return ret;
  1402. }
  1403. dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
  1404. dma_chan_name(&sba->dma_chan),
  1405. dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
  1406. dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
  1407. dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
  1408. dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
  1409. return 0;
  1410. }
  1411. static int sba_probe(struct platform_device *pdev)
  1412. {
  1413. int ret = 0;
  1414. struct sba_device *sba;
  1415. struct platform_device *mbox_pdev;
  1416. struct of_phandle_args args;
  1417. /* Allocate main SBA struct */
  1418. sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
  1419. if (!sba)
  1420. return -ENOMEM;
  1421. sba->dev = &pdev->dev;
  1422. platform_set_drvdata(pdev, sba);
  1423. /* Number of mailbox channels should be atleast 1 */
  1424. ret = of_count_phandle_with_args(pdev->dev.of_node,
  1425. "mboxes", "#mbox-cells");
  1426. if (ret <= 0)
  1427. return -ENODEV;
  1428. /* Determine SBA version from DT compatible string */
  1429. if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
  1430. sba->ver = SBA_VER_1;
  1431. else if (of_device_is_compatible(sba->dev->of_node,
  1432. "brcm,iproc-sba-v2"))
  1433. sba->ver = SBA_VER_2;
  1434. else
  1435. return -ENODEV;
  1436. /* Derived Configuration parameters */
  1437. switch (sba->ver) {
  1438. case SBA_VER_1:
  1439. sba->hw_buf_size = 4096;
  1440. sba->hw_resp_size = 8;
  1441. sba->max_pq_coefs = 6;
  1442. sba->max_pq_srcs = 6;
  1443. break;
  1444. case SBA_VER_2:
  1445. sba->hw_buf_size = 4096;
  1446. sba->hw_resp_size = 8;
  1447. sba->max_pq_coefs = 30;
  1448. /*
  1449. * We can support max_pq_srcs == max_pq_coefs because
  1450. * we are limited by number of SBA commands that we can
  1451. * fit in one message for underlying ring manager HW.
  1452. */
  1453. sba->max_pq_srcs = 12;
  1454. break;
  1455. default:
  1456. return -EINVAL;
  1457. }
  1458. sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
  1459. sba->max_cmd_per_req = sba->max_pq_srcs + 3;
  1460. sba->max_xor_srcs = sba->max_cmd_per_req - 1;
  1461. sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
  1462. sba->max_cmds_pool_size = sba->max_req *
  1463. sba->max_cmd_per_req * sizeof(u64);
  1464. /* Setup mailbox client */
  1465. sba->client.dev = &pdev->dev;
  1466. sba->client.rx_callback = sba_receive_message;
  1467. sba->client.tx_block = false;
  1468. sba->client.knows_txdone = true;
  1469. sba->client.tx_tout = 0;
  1470. /* Request mailbox channel */
  1471. sba->mchan = mbox_request_channel(&sba->client, 0);
  1472. if (IS_ERR(sba->mchan)) {
  1473. ret = PTR_ERR(sba->mchan);
  1474. goto fail_free_mchan;
  1475. }
  1476. /* Find-out underlying mailbox device */
  1477. ret = of_parse_phandle_with_args(pdev->dev.of_node,
  1478. "mboxes", "#mbox-cells", 0, &args);
  1479. if (ret)
  1480. goto fail_free_mchan;
  1481. mbox_pdev = of_find_device_by_node(args.np);
  1482. of_node_put(args.np);
  1483. if (!mbox_pdev) {
  1484. ret = -ENODEV;
  1485. goto fail_free_mchan;
  1486. }
  1487. sba->mbox_dev = &mbox_pdev->dev;
  1488. /* Prealloc channel resource */
  1489. ret = sba_prealloc_channel_resources(sba);
  1490. if (ret)
  1491. goto fail_free_mchan;
  1492. /* Check availability of debugfs */
  1493. if (!debugfs_initialized())
  1494. goto skip_debugfs;
  1495. /* Create debugfs root entry */
  1496. sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
  1497. if (IS_ERR_OR_NULL(sba->root)) {
  1498. dev_err(sba->dev, "failed to create debugfs root entry\n");
  1499. sba->root = NULL;
  1500. goto skip_debugfs;
  1501. }
  1502. /* Create debugfs stats entry */
  1503. sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
  1504. sba_debugfs_stats_show);
  1505. if (IS_ERR_OR_NULL(sba->stats))
  1506. dev_err(sba->dev, "failed to create debugfs stats file\n");
  1507. skip_debugfs:
  1508. /* Register DMA device with Linux async framework */
  1509. ret = sba_async_register(sba);
  1510. if (ret)
  1511. goto fail_free_resources;
  1512. /* Print device info */
  1513. dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
  1514. dma_chan_name(&sba->dma_chan), sba->ver+1,
  1515. dev_name(sba->mbox_dev));
  1516. return 0;
  1517. fail_free_resources:
  1518. debugfs_remove_recursive(sba->root);
  1519. sba_freeup_channel_resources(sba);
  1520. fail_free_mchan:
  1521. mbox_free_channel(sba->mchan);
  1522. return ret;
  1523. }
  1524. static int sba_remove(struct platform_device *pdev)
  1525. {
  1526. struct sba_device *sba = platform_get_drvdata(pdev);
  1527. dma_async_device_unregister(&sba->dma_dev);
  1528. debugfs_remove_recursive(sba->root);
  1529. sba_freeup_channel_resources(sba);
  1530. mbox_free_channel(sba->mchan);
  1531. return 0;
  1532. }
  1533. static const struct of_device_id sba_of_match[] = {
  1534. { .compatible = "brcm,iproc-sba", },
  1535. { .compatible = "brcm,iproc-sba-v2", },
  1536. {},
  1537. };
  1538. MODULE_DEVICE_TABLE(of, sba_of_match);
  1539. static struct platform_driver sba_driver = {
  1540. .probe = sba_probe,
  1541. .remove = sba_remove,
  1542. .driver = {
  1543. .name = "bcm-sba-raid",
  1544. .of_match_table = sba_of_match,
  1545. },
  1546. };
  1547. module_platform_driver(sba_driver);
  1548. MODULE_DESCRIPTION("Broadcom SBA RAID driver");
  1549. MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
  1550. MODULE_LICENSE("GPL v2");