dma_v3.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698
  1. /*
  2. * This file is provided under a dual BSD/GPLv2 license. When using or
  3. * redistributing this file, you may do so under either license.
  4. *
  5. * GPL LICENSE SUMMARY
  6. *
  7. * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms and conditions of the GNU General Public License,
  11. * version 2, as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but WITHOUT
  14. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  16. * more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along with
  19. * this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  21. *
  22. * The full GNU General Public License is included in this distribution in
  23. * the file called "COPYING".
  24. *
  25. * BSD LICENSE
  26. *
  27. * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
  28. *
  29. * Redistribution and use in source and binary forms, with or without
  30. * modification, are permitted provided that the following conditions are met:
  31. *
  32. * * Redistributions of source code must retain the above copyright
  33. * notice, this list of conditions and the following disclaimer.
  34. * * Redistributions in binary form must reproduce the above copyright
  35. * notice, this list of conditions and the following disclaimer in
  36. * the documentation and/or other materials provided with the
  37. * distribution.
  38. * * Neither the name of Intel Corporation nor the names of its
  39. * contributors may be used to endorse or promote products derived
  40. * from this software without specific prior written permission.
  41. *
  42. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  43. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  44. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  45. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  46. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  47. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  48. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  49. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  50. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  51. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  52. * POSSIBILITY OF SUCH DAMAGE.
  53. */
  54. /*
  55. * Support routines for v3+ hardware
  56. */
  57. #include <linux/module.h>
  58. #include <linux/pci.h>
  59. #include <linux/gfp.h>
  60. #include <linux/dmaengine.h>
  61. #include <linux/dma-mapping.h>
  62. #include <linux/prefetch.h>
  63. #include "../dmaengine.h"
  64. #include "registers.h"
  65. #include "hw.h"
  66. #include "dma.h"
  67. #include "dma_v2.h"
  68. extern struct kmem_cache *ioat3_sed_cache;
  69. /* ioat hardware assumes at least two sources for raid operations */
  70. #define src_cnt_to_sw(x) ((x) + 2)
  71. #define src_cnt_to_hw(x) ((x) - 2)
  72. #define ndest_to_sw(x) ((x) + 1)
  73. #define ndest_to_hw(x) ((x) - 1)
  74. #define src16_cnt_to_sw(x) ((x) + 9)
  75. #define src16_cnt_to_hw(x) ((x) - 9)
  76. /* provide a lookup table for setting the source address in the base or
  77. * extended descriptor of an xor or pq descriptor
  78. */
  79. static const u8 xor_idx_to_desc = 0xe0;
  80. static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
  81. static const u8 pq_idx_to_desc = 0xf8;
  82. static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
  83. 2, 2, 2, 2, 2, 2, 2 };
  84. static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
  85. static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
  86. 0, 1, 2, 3, 4, 5, 6 };
  87. static void ioat3_eh(struct ioat2_dma_chan *ioat);
  88. static void xor_set_src(struct ioat_raw_descriptor *descs[2],
  89. dma_addr_t addr, u32 offset, int idx)
  90. {
  91. struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
  92. raw->field[xor_idx_to_field[idx]] = addr + offset;
  93. }
  94. static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
  95. {
  96. struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
  97. return raw->field[pq_idx_to_field[idx]];
  98. }
  99. static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
  100. {
  101. struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
  102. return raw->field[pq16_idx_to_field[idx]];
  103. }
  104. static void pq_set_src(struct ioat_raw_descriptor *descs[2],
  105. dma_addr_t addr, u32 offset, u8 coef, int idx)
  106. {
  107. struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
  108. struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
  109. raw->field[pq_idx_to_field[idx]] = addr + offset;
  110. pq->coef[idx] = coef;
  111. }
  112. static bool is_jf_ioat(struct pci_dev *pdev)
  113. {
  114. switch (pdev->device) {
  115. case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
  116. case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
  117. case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
  118. case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
  119. case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
  120. case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
  121. case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
  122. case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
  123. case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
  124. case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
  125. return true;
  126. default:
  127. return false;
  128. }
  129. }
  130. static bool is_snb_ioat(struct pci_dev *pdev)
  131. {
  132. switch (pdev->device) {
  133. case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
  134. case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
  135. case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
  136. case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
  137. case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
  138. case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
  139. case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
  140. case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
  141. case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
  142. case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
  143. return true;
  144. default:
  145. return false;
  146. }
  147. }
  148. static bool is_ivb_ioat(struct pci_dev *pdev)
  149. {
  150. switch (pdev->device) {
  151. case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
  152. case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
  153. case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
  154. case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
  155. case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
  156. case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
  157. case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
  158. case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
  159. case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
  160. case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
  161. return true;
  162. default:
  163. return false;
  164. }
  165. }
  166. static bool is_hsw_ioat(struct pci_dev *pdev)
  167. {
  168. switch (pdev->device) {
  169. case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
  170. case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
  171. case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
  172. case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
  173. case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
  174. case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
  175. case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
  176. case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
  177. case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
  178. case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
  179. return true;
  180. default:
  181. return false;
  182. }
  183. }
  184. static bool is_xeon_cb32(struct pci_dev *pdev)
  185. {
  186. return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
  187. is_hsw_ioat(pdev);
  188. }
  189. static bool is_bwd_ioat(struct pci_dev *pdev)
  190. {
  191. switch (pdev->device) {
  192. case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
  193. case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
  194. case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
  195. case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
  196. return true;
  197. default:
  198. return false;
  199. }
  200. }
  201. static bool is_bwd_noraid(struct pci_dev *pdev)
  202. {
  203. switch (pdev->device) {
  204. case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
  205. case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
  206. return true;
  207. default:
  208. return false;
  209. }
  210. }
  211. static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
  212. dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
  213. {
  214. struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
  215. struct ioat_pq16a_descriptor *pq16 =
  216. (struct ioat_pq16a_descriptor *)desc[1];
  217. struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
  218. raw->field[pq16_idx_to_field[idx]] = addr + offset;
  219. if (idx < 8)
  220. pq->coef[idx] = coef;
  221. else
  222. pq16->coef[idx - 8] = coef;
  223. }
  224. static struct ioat_sed_ent *
  225. ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
  226. {
  227. struct ioat_sed_ent *sed;
  228. gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
  229. sed = kmem_cache_alloc(ioat3_sed_cache, flags);
  230. if (!sed)
  231. return NULL;
  232. sed->hw_pool = hw_pool;
  233. sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
  234. flags, &sed->dma);
  235. if (!sed->hw) {
  236. kmem_cache_free(ioat3_sed_cache, sed);
  237. return NULL;
  238. }
  239. return sed;
  240. }
  241. static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
  242. {
  243. if (!sed)
  244. return;
  245. dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
  246. kmem_cache_free(ioat3_sed_cache, sed);
  247. }
  248. static bool desc_has_ext(struct ioat_ring_ent *desc)
  249. {
  250. struct ioat_dma_descriptor *hw = desc->hw;
  251. if (hw->ctl_f.op == IOAT_OP_XOR ||
  252. hw->ctl_f.op == IOAT_OP_XOR_VAL) {
  253. struct ioat_xor_descriptor *xor = desc->xor;
  254. if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
  255. return true;
  256. } else if (hw->ctl_f.op == IOAT_OP_PQ ||
  257. hw->ctl_f.op == IOAT_OP_PQ_VAL) {
  258. struct ioat_pq_descriptor *pq = desc->pq;
  259. if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
  260. return true;
  261. }
  262. return false;
  263. }
  264. static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
  265. {
  266. u64 phys_complete;
  267. u64 completion;
  268. completion = *chan->completion;
  269. phys_complete = ioat_chansts_to_addr(completion);
  270. dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
  271. (unsigned long long) phys_complete);
  272. return phys_complete;
  273. }
  274. static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
  275. u64 *phys_complete)
  276. {
  277. *phys_complete = ioat3_get_current_completion(chan);
  278. if (*phys_complete == chan->last_completion)
  279. return false;
  280. clear_bit(IOAT_COMPLETION_ACK, &chan->state);
  281. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  282. return true;
  283. }
  284. static void
  285. desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
  286. {
  287. struct ioat_dma_descriptor *hw = desc->hw;
  288. switch (hw->ctl_f.op) {
  289. case IOAT_OP_PQ_VAL:
  290. case IOAT_OP_PQ_VAL_16S:
  291. {
  292. struct ioat_pq_descriptor *pq = desc->pq;
  293. /* check if there's error written */
  294. if (!pq->dwbes_f.wbes)
  295. return;
  296. /* need to set a chanerr var for checking to clear later */
  297. if (pq->dwbes_f.p_val_err)
  298. *desc->result |= SUM_CHECK_P_RESULT;
  299. if (pq->dwbes_f.q_val_err)
  300. *desc->result |= SUM_CHECK_Q_RESULT;
  301. return;
  302. }
  303. default:
  304. return;
  305. }
  306. }
  307. /**
  308. * __cleanup - reclaim used descriptors
  309. * @ioat: channel (ring) to clean
  310. *
  311. * The difference from the dma_v2.c __cleanup() is that this routine
  312. * handles extended descriptors and dma-unmapping raid operations.
  313. */
  314. static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
  315. {
  316. struct ioat_chan_common *chan = &ioat->base;
  317. struct ioatdma_device *device = chan->device;
  318. struct ioat_ring_ent *desc;
  319. bool seen_current = false;
  320. int idx = ioat->tail, i;
  321. u16 active;
  322. dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
  323. __func__, ioat->head, ioat->tail, ioat->issued);
  324. /*
  325. * At restart of the channel, the completion address and the
  326. * channel status will be 0 due to starting a new chain. Since
  327. * it's new chain and the first descriptor "fails", there is
  328. * nothing to clean up. We do not want to reap the entire submitted
  329. * chain due to this 0 address value and then BUG.
  330. */
  331. if (!phys_complete)
  332. return;
  333. active = ioat2_ring_active(ioat);
  334. for (i = 0; i < active && !seen_current; i++) {
  335. struct dma_async_tx_descriptor *tx;
  336. smp_read_barrier_depends();
  337. prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
  338. desc = ioat2_get_ring_ent(ioat, idx + i);
  339. dump_desc_dbg(ioat, desc);
  340. /* set err stat if we are using dwbes */
  341. if (device->cap & IOAT_CAP_DWBES)
  342. desc_get_errstat(ioat, desc);
  343. tx = &desc->txd;
  344. if (tx->cookie) {
  345. dma_cookie_complete(tx);
  346. dma_descriptor_unmap(tx);
  347. if (tx->callback) {
  348. tx->callback(tx->callback_param);
  349. tx->callback = NULL;
  350. }
  351. }
  352. if (tx->phys == phys_complete)
  353. seen_current = true;
  354. /* skip extended descriptors */
  355. if (desc_has_ext(desc)) {
  356. BUG_ON(i + 1 >= active);
  357. i++;
  358. }
  359. /* cleanup super extended descriptors */
  360. if (desc->sed) {
  361. ioat3_free_sed(device, desc->sed);
  362. desc->sed = NULL;
  363. }
  364. }
  365. smp_mb(); /* finish all descriptor reads before incrementing tail */
  366. ioat->tail = idx + i;
  367. BUG_ON(active && !seen_current); /* no active descs have written a completion? */
  368. chan->last_completion = phys_complete;
  369. if (active - i == 0) {
  370. dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
  371. __func__);
  372. clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
  373. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  374. }
  375. /* 5 microsecond delay per pending descriptor */
  376. writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
  377. chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
  378. }
  379. static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
  380. {
  381. struct ioat_chan_common *chan = &ioat->base;
  382. u64 phys_complete;
  383. spin_lock_bh(&chan->cleanup_lock);
  384. if (ioat3_cleanup_preamble(chan, &phys_complete))
  385. __cleanup(ioat, phys_complete);
  386. if (is_ioat_halted(*chan->completion)) {
  387. u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  388. if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
  389. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  390. ioat3_eh(ioat);
  391. }
  392. }
  393. spin_unlock_bh(&chan->cleanup_lock);
  394. }
  395. static void ioat3_cleanup_event(unsigned long data)
  396. {
  397. struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
  398. struct ioat_chan_common *chan = &ioat->base;
  399. ioat3_cleanup(ioat);
  400. if (!test_bit(IOAT_RUN, &chan->state))
  401. return;
  402. writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
  403. }
  404. static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
  405. {
  406. struct ioat_chan_common *chan = &ioat->base;
  407. u64 phys_complete;
  408. ioat2_quiesce(chan, 0);
  409. if (ioat3_cleanup_preamble(chan, &phys_complete))
  410. __cleanup(ioat, phys_complete);
  411. __ioat2_restart_chan(ioat);
  412. }
  413. static void ioat3_eh(struct ioat2_dma_chan *ioat)
  414. {
  415. struct ioat_chan_common *chan = &ioat->base;
  416. struct pci_dev *pdev = to_pdev(chan);
  417. struct ioat_dma_descriptor *hw;
  418. u64 phys_complete;
  419. struct ioat_ring_ent *desc;
  420. u32 err_handled = 0;
  421. u32 chanerr_int;
  422. u32 chanerr;
  423. /* cleanup so tail points to descriptor that caused the error */
  424. if (ioat3_cleanup_preamble(chan, &phys_complete))
  425. __cleanup(ioat, phys_complete);
  426. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  427. pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
  428. dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
  429. __func__, chanerr, chanerr_int);
  430. desc = ioat2_get_ring_ent(ioat, ioat->tail);
  431. hw = desc->hw;
  432. dump_desc_dbg(ioat, desc);
  433. switch (hw->ctl_f.op) {
  434. case IOAT_OP_XOR_VAL:
  435. if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
  436. *desc->result |= SUM_CHECK_P_RESULT;
  437. err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
  438. }
  439. break;
  440. case IOAT_OP_PQ_VAL:
  441. case IOAT_OP_PQ_VAL_16S:
  442. if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
  443. *desc->result |= SUM_CHECK_P_RESULT;
  444. err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
  445. }
  446. if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
  447. *desc->result |= SUM_CHECK_Q_RESULT;
  448. err_handled |= IOAT_CHANERR_XOR_Q_ERR;
  449. }
  450. break;
  451. }
  452. /* fault on unhandled error or spurious halt */
  453. if (chanerr ^ err_handled || chanerr == 0) {
  454. dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
  455. __func__, chanerr, err_handled);
  456. BUG();
  457. }
  458. writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
  459. pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
  460. /* mark faulting descriptor as complete */
  461. *chan->completion = desc->txd.phys;
  462. spin_lock_bh(&ioat->prep_lock);
  463. ioat3_restart_channel(ioat);
  464. spin_unlock_bh(&ioat->prep_lock);
  465. }
  466. static void check_active(struct ioat2_dma_chan *ioat)
  467. {
  468. struct ioat_chan_common *chan = &ioat->base;
  469. if (ioat2_ring_active(ioat)) {
  470. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  471. return;
  472. }
  473. if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
  474. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  475. else if (ioat->alloc_order > ioat_get_alloc_order()) {
  476. /* if the ring is idle, empty, and oversized try to step
  477. * down the size
  478. */
  479. reshape_ring(ioat, ioat->alloc_order - 1);
  480. /* keep shrinking until we get back to our minimum
  481. * default size
  482. */
  483. if (ioat->alloc_order > ioat_get_alloc_order())
  484. mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
  485. }
  486. }
  487. static void ioat3_timer_event(unsigned long data)
  488. {
  489. struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
  490. struct ioat_chan_common *chan = &ioat->base;
  491. dma_addr_t phys_complete;
  492. u64 status;
  493. status = ioat_chansts(chan);
  494. /* when halted due to errors check for channel
  495. * programming errors before advancing the completion state
  496. */
  497. if (is_ioat_halted(status)) {
  498. u32 chanerr;
  499. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  500. dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
  501. __func__, chanerr);
  502. if (test_bit(IOAT_RUN, &chan->state))
  503. BUG_ON(is_ioat_bug(chanerr));
  504. else /* we never got off the ground */
  505. return;
  506. }
  507. /* if we haven't made progress and we have already
  508. * acknowledged a pending completion once, then be more
  509. * forceful with a restart
  510. */
  511. spin_lock_bh(&chan->cleanup_lock);
  512. if (ioat_cleanup_preamble(chan, &phys_complete))
  513. __cleanup(ioat, phys_complete);
  514. else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
  515. spin_lock_bh(&ioat->prep_lock);
  516. ioat3_restart_channel(ioat);
  517. spin_unlock_bh(&ioat->prep_lock);
  518. spin_unlock_bh(&chan->cleanup_lock);
  519. return;
  520. } else {
  521. set_bit(IOAT_COMPLETION_ACK, &chan->state);
  522. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  523. }
  524. if (ioat2_ring_active(ioat))
  525. mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
  526. else {
  527. spin_lock_bh(&ioat->prep_lock);
  528. check_active(ioat);
  529. spin_unlock_bh(&ioat->prep_lock);
  530. }
  531. spin_unlock_bh(&chan->cleanup_lock);
  532. }
  533. static enum dma_status
  534. ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
  535. struct dma_tx_state *txstate)
  536. {
  537. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  538. enum dma_status ret;
  539. ret = dma_cookie_status(c, cookie, txstate);
  540. if (ret == DMA_COMPLETE)
  541. return ret;
  542. ioat3_cleanup(ioat);
  543. return dma_cookie_status(c, cookie, txstate);
  544. }
  545. static struct dma_async_tx_descriptor *
  546. __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
  547. dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
  548. size_t len, unsigned long flags)
  549. {
  550. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  551. struct ioat_ring_ent *compl_desc;
  552. struct ioat_ring_ent *desc;
  553. struct ioat_ring_ent *ext;
  554. size_t total_len = len;
  555. struct ioat_xor_descriptor *xor;
  556. struct ioat_xor_ext_descriptor *xor_ex = NULL;
  557. struct ioat_dma_descriptor *hw;
  558. int num_descs, with_ext, idx, i;
  559. u32 offset = 0;
  560. u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
  561. BUG_ON(src_cnt < 2);
  562. num_descs = ioat2_xferlen_to_descs(ioat, len);
  563. /* we need 2x the number of descriptors to cover greater than 5
  564. * sources
  565. */
  566. if (src_cnt > 5) {
  567. with_ext = 1;
  568. num_descs *= 2;
  569. } else
  570. with_ext = 0;
  571. /* completion writes from the raid engine may pass completion
  572. * writes from the legacy engine, so we need one extra null
  573. * (legacy) descriptor to ensure all completion writes arrive in
  574. * order.
  575. */
  576. if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
  577. idx = ioat->head;
  578. else
  579. return NULL;
  580. i = 0;
  581. do {
  582. struct ioat_raw_descriptor *descs[2];
  583. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  584. int s;
  585. desc = ioat2_get_ring_ent(ioat, idx + i);
  586. xor = desc->xor;
  587. /* save a branch by unconditionally retrieving the
  588. * extended descriptor xor_set_src() knows to not write
  589. * to it in the single descriptor case
  590. */
  591. ext = ioat2_get_ring_ent(ioat, idx + i + 1);
  592. xor_ex = ext->xor_ex;
  593. descs[0] = (struct ioat_raw_descriptor *) xor;
  594. descs[1] = (struct ioat_raw_descriptor *) xor_ex;
  595. for (s = 0; s < src_cnt; s++)
  596. xor_set_src(descs, src[s], offset, s);
  597. xor->size = xfer_size;
  598. xor->dst_addr = dest + offset;
  599. xor->ctl = 0;
  600. xor->ctl_f.op = op;
  601. xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
  602. len -= xfer_size;
  603. offset += xfer_size;
  604. dump_desc_dbg(ioat, desc);
  605. } while ((i += 1 + with_ext) < num_descs);
  606. /* last xor descriptor carries the unmap parameters and fence bit */
  607. desc->txd.flags = flags;
  608. desc->len = total_len;
  609. if (result)
  610. desc->result = result;
  611. xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  612. /* completion descriptor carries interrupt bit */
  613. compl_desc = ioat2_get_ring_ent(ioat, idx + i);
  614. compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
  615. hw = compl_desc->hw;
  616. hw->ctl = 0;
  617. hw->ctl_f.null = 1;
  618. hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  619. hw->ctl_f.compl_write = 1;
  620. hw->size = NULL_DESC_BUFFER_SIZE;
  621. dump_desc_dbg(ioat, compl_desc);
  622. /* we leave the channel locked to ensure in order submission */
  623. return &compl_desc->txd;
  624. }
  625. static struct dma_async_tx_descriptor *
  626. ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
  627. unsigned int src_cnt, size_t len, unsigned long flags)
  628. {
  629. return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
  630. }
  631. static struct dma_async_tx_descriptor *
  632. ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
  633. unsigned int src_cnt, size_t len,
  634. enum sum_check_flags *result, unsigned long flags)
  635. {
  636. /* the cleanup routine only sets bits on validate failure, it
  637. * does not clear bits on validate success... so clear it here
  638. */
  639. *result = 0;
  640. return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
  641. src_cnt - 1, len, flags);
  642. }
  643. static void
  644. dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
  645. {
  646. struct device *dev = to_dev(&ioat->base);
  647. struct ioat_pq_descriptor *pq = desc->pq;
  648. struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
  649. struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
  650. int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
  651. int i;
  652. dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
  653. " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
  654. " src_cnt: %d)\n",
  655. desc_id(desc), (unsigned long long) desc->txd.phys,
  656. (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
  657. desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
  658. pq->ctl_f.compl_write,
  659. pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
  660. pq->ctl_f.src_cnt);
  661. for (i = 0; i < src_cnt; i++)
  662. dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
  663. (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
  664. dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
  665. dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
  666. dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
  667. }
  668. static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
  669. struct ioat_ring_ent *desc)
  670. {
  671. struct device *dev = to_dev(&ioat->base);
  672. struct ioat_pq_descriptor *pq = desc->pq;
  673. struct ioat_raw_descriptor *descs[] = { (void *)pq,
  674. (void *)pq,
  675. (void *)pq };
  676. int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
  677. int i;
  678. if (desc->sed) {
  679. descs[1] = (void *)desc->sed->hw;
  680. descs[2] = (void *)desc->sed->hw + 64;
  681. }
  682. dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
  683. " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
  684. " src_cnt: %d)\n",
  685. desc_id(desc), (unsigned long long) desc->txd.phys,
  686. (unsigned long long) pq->next,
  687. desc->txd.flags, pq->size, pq->ctl,
  688. pq->ctl_f.op, pq->ctl_f.int_en,
  689. pq->ctl_f.compl_write,
  690. pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
  691. pq->ctl_f.src_cnt);
  692. for (i = 0; i < src_cnt; i++) {
  693. dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
  694. (unsigned long long) pq16_get_src(descs, i),
  695. pq->coef[i]);
  696. }
  697. dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
  698. dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
  699. }
  700. static struct dma_async_tx_descriptor *
  701. __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
  702. const dma_addr_t *dst, const dma_addr_t *src,
  703. unsigned int src_cnt, const unsigned char *scf,
  704. size_t len, unsigned long flags)
  705. {
  706. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  707. struct ioat_chan_common *chan = &ioat->base;
  708. struct ioatdma_device *device = chan->device;
  709. struct ioat_ring_ent *compl_desc;
  710. struct ioat_ring_ent *desc;
  711. struct ioat_ring_ent *ext;
  712. size_t total_len = len;
  713. struct ioat_pq_descriptor *pq;
  714. struct ioat_pq_ext_descriptor *pq_ex = NULL;
  715. struct ioat_dma_descriptor *hw;
  716. u32 offset = 0;
  717. u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
  718. int i, s, idx, with_ext, num_descs;
  719. int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
  720. dev_dbg(to_dev(chan), "%s\n", __func__);
  721. /* the engine requires at least two sources (we provide
  722. * at least 1 implied source in the DMA_PREP_CONTINUE case)
  723. */
  724. BUG_ON(src_cnt + dmaf_continue(flags) < 2);
  725. num_descs = ioat2_xferlen_to_descs(ioat, len);
  726. /* we need 2x the number of descriptors to cover greater than 3
  727. * sources (we need 1 extra source in the q-only continuation
  728. * case and 3 extra sources in the p+q continuation case.
  729. */
  730. if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
  731. (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
  732. with_ext = 1;
  733. num_descs *= 2;
  734. } else
  735. with_ext = 0;
  736. /* completion writes from the raid engine may pass completion
  737. * writes from the legacy engine, so we need one extra null
  738. * (legacy) descriptor to ensure all completion writes arrive in
  739. * order.
  740. */
  741. if (likely(num_descs) &&
  742. ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
  743. idx = ioat->head;
  744. else
  745. return NULL;
  746. i = 0;
  747. do {
  748. struct ioat_raw_descriptor *descs[2];
  749. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  750. desc = ioat2_get_ring_ent(ioat, idx + i);
  751. pq = desc->pq;
  752. /* save a branch by unconditionally retrieving the
  753. * extended descriptor pq_set_src() knows to not write
  754. * to it in the single descriptor case
  755. */
  756. ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
  757. pq_ex = ext->pq_ex;
  758. descs[0] = (struct ioat_raw_descriptor *) pq;
  759. descs[1] = (struct ioat_raw_descriptor *) pq_ex;
  760. for (s = 0; s < src_cnt; s++)
  761. pq_set_src(descs, src[s], offset, scf[s], s);
  762. /* see the comment for dma_maxpq in include/linux/dmaengine.h */
  763. if (dmaf_p_disabled_continue(flags))
  764. pq_set_src(descs, dst[1], offset, 1, s++);
  765. else if (dmaf_continue(flags)) {
  766. pq_set_src(descs, dst[0], offset, 0, s++);
  767. pq_set_src(descs, dst[1], offset, 1, s++);
  768. pq_set_src(descs, dst[1], offset, 0, s++);
  769. }
  770. pq->size = xfer_size;
  771. pq->p_addr = dst[0] + offset;
  772. pq->q_addr = dst[1] + offset;
  773. pq->ctl = 0;
  774. pq->ctl_f.op = op;
  775. /* we turn on descriptor write back error status */
  776. if (device->cap & IOAT_CAP_DWBES)
  777. pq->ctl_f.wb_en = result ? 1 : 0;
  778. pq->ctl_f.src_cnt = src_cnt_to_hw(s);
  779. pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
  780. pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
  781. len -= xfer_size;
  782. offset += xfer_size;
  783. } while ((i += 1 + with_ext) < num_descs);
  784. /* last pq descriptor carries the unmap parameters and fence bit */
  785. desc->txd.flags = flags;
  786. desc->len = total_len;
  787. if (result)
  788. desc->result = result;
  789. pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  790. dump_pq_desc_dbg(ioat, desc, ext);
  791. if (!cb32) {
  792. pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  793. pq->ctl_f.compl_write = 1;
  794. compl_desc = desc;
  795. } else {
  796. /* completion descriptor carries interrupt bit */
  797. compl_desc = ioat2_get_ring_ent(ioat, idx + i);
  798. compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
  799. hw = compl_desc->hw;
  800. hw->ctl = 0;
  801. hw->ctl_f.null = 1;
  802. hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  803. hw->ctl_f.compl_write = 1;
  804. hw->size = NULL_DESC_BUFFER_SIZE;
  805. dump_desc_dbg(ioat, compl_desc);
  806. }
  807. /* we leave the channel locked to ensure in order submission */
  808. return &compl_desc->txd;
  809. }
  810. static struct dma_async_tx_descriptor *
  811. __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
  812. const dma_addr_t *dst, const dma_addr_t *src,
  813. unsigned int src_cnt, const unsigned char *scf,
  814. size_t len, unsigned long flags)
  815. {
  816. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  817. struct ioat_chan_common *chan = &ioat->base;
  818. struct ioatdma_device *device = chan->device;
  819. struct ioat_ring_ent *desc;
  820. size_t total_len = len;
  821. struct ioat_pq_descriptor *pq;
  822. u32 offset = 0;
  823. u8 op;
  824. int i, s, idx, num_descs;
  825. /* this function is only called with 9-16 sources */
  826. op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
  827. dev_dbg(to_dev(chan), "%s\n", __func__);
  828. num_descs = ioat2_xferlen_to_descs(ioat, len);
  829. /*
  830. * 16 source pq is only available on cb3.3 and has no completion
  831. * write hw bug.
  832. */
  833. if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
  834. idx = ioat->head;
  835. else
  836. return NULL;
  837. i = 0;
  838. do {
  839. struct ioat_raw_descriptor *descs[4];
  840. size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
  841. desc = ioat2_get_ring_ent(ioat, idx + i);
  842. pq = desc->pq;
  843. descs[0] = (struct ioat_raw_descriptor *) pq;
  844. desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
  845. if (!desc->sed) {
  846. dev_err(to_dev(chan),
  847. "%s: no free sed entries\n", __func__);
  848. return NULL;
  849. }
  850. pq->sed_addr = desc->sed->dma;
  851. desc->sed->parent = desc;
  852. descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
  853. descs[2] = (void *)descs[1] + 64;
  854. for (s = 0; s < src_cnt; s++)
  855. pq16_set_src(descs, src[s], offset, scf[s], s);
  856. /* see the comment for dma_maxpq in include/linux/dmaengine.h */
  857. if (dmaf_p_disabled_continue(flags))
  858. pq16_set_src(descs, dst[1], offset, 1, s++);
  859. else if (dmaf_continue(flags)) {
  860. pq16_set_src(descs, dst[0], offset, 0, s++);
  861. pq16_set_src(descs, dst[1], offset, 1, s++);
  862. pq16_set_src(descs, dst[1], offset, 0, s++);
  863. }
  864. pq->size = xfer_size;
  865. pq->p_addr = dst[0] + offset;
  866. pq->q_addr = dst[1] + offset;
  867. pq->ctl = 0;
  868. pq->ctl_f.op = op;
  869. pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
  870. /* we turn on descriptor write back error status */
  871. if (device->cap & IOAT_CAP_DWBES)
  872. pq->ctl_f.wb_en = result ? 1 : 0;
  873. pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
  874. pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
  875. len -= xfer_size;
  876. offset += xfer_size;
  877. } while (++i < num_descs);
  878. /* last pq descriptor carries the unmap parameters and fence bit */
  879. desc->txd.flags = flags;
  880. desc->len = total_len;
  881. if (result)
  882. desc->result = result;
  883. pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  884. /* with cb3.3 we should be able to do completion w/o a null desc */
  885. pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
  886. pq->ctl_f.compl_write = 1;
  887. dump_pq16_desc_dbg(ioat, desc);
  888. /* we leave the channel locked to ensure in order submission */
  889. return &desc->txd;
  890. }
  891. static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
  892. {
  893. if (dmaf_p_disabled_continue(flags))
  894. return src_cnt + 1;
  895. else if (dmaf_continue(flags))
  896. return src_cnt + 3;
  897. else
  898. return src_cnt;
  899. }
  900. static struct dma_async_tx_descriptor *
  901. ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
  902. unsigned int src_cnt, const unsigned char *scf, size_t len,
  903. unsigned long flags)
  904. {
  905. /* specify valid address for disabled result */
  906. if (flags & DMA_PREP_PQ_DISABLE_P)
  907. dst[0] = dst[1];
  908. if (flags & DMA_PREP_PQ_DISABLE_Q)
  909. dst[1] = dst[0];
  910. /* handle the single source multiply case from the raid6
  911. * recovery path
  912. */
  913. if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
  914. dma_addr_t single_source[2];
  915. unsigned char single_source_coef[2];
  916. BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
  917. single_source[0] = src[0];
  918. single_source[1] = src[0];
  919. single_source_coef[0] = scf[0];
  920. single_source_coef[1] = 0;
  921. return src_cnt_flags(src_cnt, flags) > 8 ?
  922. __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
  923. 2, single_source_coef, len,
  924. flags) :
  925. __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
  926. single_source_coef, len, flags);
  927. } else {
  928. return src_cnt_flags(src_cnt, flags) > 8 ?
  929. __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
  930. scf, len, flags) :
  931. __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
  932. scf, len, flags);
  933. }
  934. }
  935. static struct dma_async_tx_descriptor *
  936. ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
  937. unsigned int src_cnt, const unsigned char *scf, size_t len,
  938. enum sum_check_flags *pqres, unsigned long flags)
  939. {
  940. /* specify valid address for disabled result */
  941. if (flags & DMA_PREP_PQ_DISABLE_P)
  942. pq[0] = pq[1];
  943. if (flags & DMA_PREP_PQ_DISABLE_Q)
  944. pq[1] = pq[0];
  945. /* the cleanup routine only sets bits on validate failure, it
  946. * does not clear bits on validate success... so clear it here
  947. */
  948. *pqres = 0;
  949. return src_cnt_flags(src_cnt, flags) > 8 ?
  950. __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
  951. flags) :
  952. __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
  953. flags);
  954. }
  955. static struct dma_async_tx_descriptor *
  956. ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
  957. unsigned int src_cnt, size_t len, unsigned long flags)
  958. {
  959. unsigned char scf[src_cnt];
  960. dma_addr_t pq[2];
  961. memset(scf, 0, src_cnt);
  962. pq[0] = dst;
  963. flags |= DMA_PREP_PQ_DISABLE_Q;
  964. pq[1] = dst; /* specify valid address for disabled result */
  965. return src_cnt_flags(src_cnt, flags) > 8 ?
  966. __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
  967. flags) :
  968. __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
  969. flags);
  970. }
  971. static struct dma_async_tx_descriptor *
  972. ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
  973. unsigned int src_cnt, size_t len,
  974. enum sum_check_flags *result, unsigned long flags)
  975. {
  976. unsigned char scf[src_cnt];
  977. dma_addr_t pq[2];
  978. /* the cleanup routine only sets bits on validate failure, it
  979. * does not clear bits on validate success... so clear it here
  980. */
  981. *result = 0;
  982. memset(scf, 0, src_cnt);
  983. pq[0] = src[0];
  984. flags |= DMA_PREP_PQ_DISABLE_Q;
  985. pq[1] = pq[0]; /* specify valid address for disabled result */
  986. return src_cnt_flags(src_cnt, flags) > 8 ?
  987. __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
  988. scf, len, flags) :
  989. __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
  990. scf, len, flags);
  991. }
  992. static struct dma_async_tx_descriptor *
  993. ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
  994. {
  995. struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
  996. struct ioat_ring_ent *desc;
  997. struct ioat_dma_descriptor *hw;
  998. if (ioat2_check_space_lock(ioat, 1) == 0)
  999. desc = ioat2_get_ring_ent(ioat, ioat->head);
  1000. else
  1001. return NULL;
  1002. hw = desc->hw;
  1003. hw->ctl = 0;
  1004. hw->ctl_f.null = 1;
  1005. hw->ctl_f.int_en = 1;
  1006. hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
  1007. hw->ctl_f.compl_write = 1;
  1008. hw->size = NULL_DESC_BUFFER_SIZE;
  1009. hw->src_addr = 0;
  1010. hw->dst_addr = 0;
  1011. desc->txd.flags = flags;
  1012. desc->len = 1;
  1013. dump_desc_dbg(ioat, desc);
  1014. /* we leave the channel locked to ensure in order submission */
  1015. return &desc->txd;
  1016. }
  1017. static void ioat3_dma_test_callback(void *dma_async_param)
  1018. {
  1019. struct completion *cmp = dma_async_param;
  1020. complete(cmp);
  1021. }
  1022. #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
  1023. static int ioat_xor_val_self_test(struct ioatdma_device *device)
  1024. {
  1025. int i, src_idx;
  1026. struct page *dest;
  1027. struct page *xor_srcs[IOAT_NUM_SRC_TEST];
  1028. struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
  1029. dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
  1030. dma_addr_t dest_dma;
  1031. struct dma_async_tx_descriptor *tx;
  1032. struct dma_chan *dma_chan;
  1033. dma_cookie_t cookie;
  1034. u8 cmp_byte = 0;
  1035. u32 cmp_word;
  1036. u32 xor_val_result;
  1037. int err = 0;
  1038. struct completion cmp;
  1039. unsigned long tmo;
  1040. struct device *dev = &device->pdev->dev;
  1041. struct dma_device *dma = &device->common;
  1042. u8 op = 0;
  1043. dev_dbg(dev, "%s\n", __func__);
  1044. if (!dma_has_cap(DMA_XOR, dma->cap_mask))
  1045. return 0;
  1046. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
  1047. xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
  1048. if (!xor_srcs[src_idx]) {
  1049. while (src_idx--)
  1050. __free_page(xor_srcs[src_idx]);
  1051. return -ENOMEM;
  1052. }
  1053. }
  1054. dest = alloc_page(GFP_KERNEL);
  1055. if (!dest) {
  1056. while (src_idx--)
  1057. __free_page(xor_srcs[src_idx]);
  1058. return -ENOMEM;
  1059. }
  1060. /* Fill in src buffers */
  1061. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
  1062. u8 *ptr = page_address(xor_srcs[src_idx]);
  1063. for (i = 0; i < PAGE_SIZE; i++)
  1064. ptr[i] = (1 << src_idx);
  1065. }
  1066. for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
  1067. cmp_byte ^= (u8) (1 << src_idx);
  1068. cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
  1069. (cmp_byte << 8) | cmp_byte;
  1070. memset(page_address(dest), 0, PAGE_SIZE);
  1071. dma_chan = container_of(dma->channels.next, struct dma_chan,
  1072. device_node);
  1073. if (dma->device_alloc_chan_resources(dma_chan) < 1) {
  1074. err = -ENODEV;
  1075. goto out;
  1076. }
  1077. /* test xor */
  1078. op = IOAT_OP_XOR;
  1079. dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
  1080. if (dma_mapping_error(dev, dest_dma))
  1081. goto dma_unmap;
  1082. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1083. dma_srcs[i] = DMA_ERROR_CODE;
  1084. for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
  1085. dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
  1086. DMA_TO_DEVICE);
  1087. if (dma_mapping_error(dev, dma_srcs[i]))
  1088. goto dma_unmap;
  1089. }
  1090. tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
  1091. IOAT_NUM_SRC_TEST, PAGE_SIZE,
  1092. DMA_PREP_INTERRUPT);
  1093. if (!tx) {
  1094. dev_err(dev, "Self-test xor prep failed\n");
  1095. err = -ENODEV;
  1096. goto dma_unmap;
  1097. }
  1098. async_tx_ack(tx);
  1099. init_completion(&cmp);
  1100. tx->callback = ioat3_dma_test_callback;
  1101. tx->callback_param = &cmp;
  1102. cookie = tx->tx_submit(tx);
  1103. if (cookie < 0) {
  1104. dev_err(dev, "Self-test xor setup failed\n");
  1105. err = -ENODEV;
  1106. goto dma_unmap;
  1107. }
  1108. dma->device_issue_pending(dma_chan);
  1109. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1110. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
  1111. dev_err(dev, "Self-test xor timed out\n");
  1112. err = -ENODEV;
  1113. goto dma_unmap;
  1114. }
  1115. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1116. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1117. dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1118. for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
  1119. u32 *ptr = page_address(dest);
  1120. if (ptr[i] != cmp_word) {
  1121. dev_err(dev, "Self-test xor failed compare\n");
  1122. err = -ENODEV;
  1123. goto free_resources;
  1124. }
  1125. }
  1126. dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1127. dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
  1128. /* skip validate if the capability is not present */
  1129. if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
  1130. goto free_resources;
  1131. op = IOAT_OP_XOR_VAL;
  1132. /* validate the sources with the destintation page */
  1133. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1134. xor_val_srcs[i] = xor_srcs[i];
  1135. xor_val_srcs[i] = dest;
  1136. xor_val_result = 1;
  1137. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1138. dma_srcs[i] = DMA_ERROR_CODE;
  1139. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
  1140. dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
  1141. DMA_TO_DEVICE);
  1142. if (dma_mapping_error(dev, dma_srcs[i]))
  1143. goto dma_unmap;
  1144. }
  1145. tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
  1146. IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
  1147. &xor_val_result, DMA_PREP_INTERRUPT);
  1148. if (!tx) {
  1149. dev_err(dev, "Self-test zero prep failed\n");
  1150. err = -ENODEV;
  1151. goto dma_unmap;
  1152. }
  1153. async_tx_ack(tx);
  1154. init_completion(&cmp);
  1155. tx->callback = ioat3_dma_test_callback;
  1156. tx->callback_param = &cmp;
  1157. cookie = tx->tx_submit(tx);
  1158. if (cookie < 0) {
  1159. dev_err(dev, "Self-test zero setup failed\n");
  1160. err = -ENODEV;
  1161. goto dma_unmap;
  1162. }
  1163. dma->device_issue_pending(dma_chan);
  1164. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1165. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
  1166. dev_err(dev, "Self-test validate timed out\n");
  1167. err = -ENODEV;
  1168. goto dma_unmap;
  1169. }
  1170. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1171. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1172. if (xor_val_result != 0) {
  1173. dev_err(dev, "Self-test validate failed compare\n");
  1174. err = -ENODEV;
  1175. goto free_resources;
  1176. }
  1177. memset(page_address(dest), 0, PAGE_SIZE);
  1178. /* test for non-zero parity sum */
  1179. op = IOAT_OP_XOR_VAL;
  1180. xor_val_result = 0;
  1181. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1182. dma_srcs[i] = DMA_ERROR_CODE;
  1183. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
  1184. dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
  1185. DMA_TO_DEVICE);
  1186. if (dma_mapping_error(dev, dma_srcs[i]))
  1187. goto dma_unmap;
  1188. }
  1189. tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
  1190. IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
  1191. &xor_val_result, DMA_PREP_INTERRUPT);
  1192. if (!tx) {
  1193. dev_err(dev, "Self-test 2nd zero prep failed\n");
  1194. err = -ENODEV;
  1195. goto dma_unmap;
  1196. }
  1197. async_tx_ack(tx);
  1198. init_completion(&cmp);
  1199. tx->callback = ioat3_dma_test_callback;
  1200. tx->callback_param = &cmp;
  1201. cookie = tx->tx_submit(tx);
  1202. if (cookie < 0) {
  1203. dev_err(dev, "Self-test 2nd zero setup failed\n");
  1204. err = -ENODEV;
  1205. goto dma_unmap;
  1206. }
  1207. dma->device_issue_pending(dma_chan);
  1208. tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
  1209. if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
  1210. dev_err(dev, "Self-test 2nd validate timed out\n");
  1211. err = -ENODEV;
  1212. goto dma_unmap;
  1213. }
  1214. if (xor_val_result != SUM_CHECK_P_RESULT) {
  1215. dev_err(dev, "Self-test validate failed compare\n");
  1216. err = -ENODEV;
  1217. goto dma_unmap;
  1218. }
  1219. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1220. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
  1221. goto free_resources;
  1222. dma_unmap:
  1223. if (op == IOAT_OP_XOR) {
  1224. if (dest_dma != DMA_ERROR_CODE)
  1225. dma_unmap_page(dev, dest_dma, PAGE_SIZE,
  1226. DMA_FROM_DEVICE);
  1227. for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
  1228. if (dma_srcs[i] != DMA_ERROR_CODE)
  1229. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
  1230. DMA_TO_DEVICE);
  1231. } else if (op == IOAT_OP_XOR_VAL) {
  1232. for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
  1233. if (dma_srcs[i] != DMA_ERROR_CODE)
  1234. dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
  1235. DMA_TO_DEVICE);
  1236. }
  1237. free_resources:
  1238. dma->device_free_chan_resources(dma_chan);
  1239. out:
  1240. src_idx = IOAT_NUM_SRC_TEST;
  1241. while (src_idx--)
  1242. __free_page(xor_srcs[src_idx]);
  1243. __free_page(dest);
  1244. return err;
  1245. }
  1246. static int ioat3_dma_self_test(struct ioatdma_device *device)
  1247. {
  1248. int rc = ioat_dma_self_test(device);
  1249. if (rc)
  1250. return rc;
  1251. rc = ioat_xor_val_self_test(device);
  1252. if (rc)
  1253. return rc;
  1254. return 0;
  1255. }
  1256. static int ioat3_irq_reinit(struct ioatdma_device *device)
  1257. {
  1258. struct pci_dev *pdev = device->pdev;
  1259. int irq = pdev->irq, i;
  1260. if (!is_bwd_ioat(pdev))
  1261. return 0;
  1262. switch (device->irq_mode) {
  1263. case IOAT_MSIX:
  1264. for (i = 0; i < device->common.chancnt; i++) {
  1265. struct msix_entry *msix = &device->msix_entries[i];
  1266. struct ioat_chan_common *chan;
  1267. chan = ioat_chan_by_index(device, i);
  1268. devm_free_irq(&pdev->dev, msix->vector, chan);
  1269. }
  1270. pci_disable_msix(pdev);
  1271. break;
  1272. case IOAT_MSI:
  1273. pci_disable_msi(pdev);
  1274. /* fall through */
  1275. case IOAT_INTX:
  1276. devm_free_irq(&pdev->dev, irq, device);
  1277. break;
  1278. default:
  1279. return 0;
  1280. }
  1281. device->irq_mode = IOAT_NOIRQ;
  1282. return ioat_dma_setup_interrupts(device);
  1283. }
  1284. static int ioat3_reset_hw(struct ioat_chan_common *chan)
  1285. {
  1286. /* throw away whatever the channel was doing and get it
  1287. * initialized, with ioat3 specific workarounds
  1288. */
  1289. struct ioatdma_device *device = chan->device;
  1290. struct pci_dev *pdev = device->pdev;
  1291. u32 chanerr;
  1292. u16 dev_id;
  1293. int err;
  1294. ioat2_quiesce(chan, msecs_to_jiffies(100));
  1295. chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
  1296. writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
  1297. if (device->version < IOAT_VER_3_3) {
  1298. /* clear any pending errors */
  1299. err = pci_read_config_dword(pdev,
  1300. IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
  1301. if (err) {
  1302. dev_err(&pdev->dev,
  1303. "channel error register unreachable\n");
  1304. return err;
  1305. }
  1306. pci_write_config_dword(pdev,
  1307. IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
  1308. /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
  1309. * (workaround for spurious config parity error after restart)
  1310. */
  1311. pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
  1312. if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
  1313. pci_write_config_dword(pdev,
  1314. IOAT_PCI_DMAUNCERRSTS_OFFSET,
  1315. 0x10);
  1316. }
  1317. }
  1318. err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
  1319. if (!err)
  1320. err = ioat3_irq_reinit(device);
  1321. if (err)
  1322. dev_err(&pdev->dev, "Failed to reset: %d\n", err);
  1323. return err;
  1324. }
  1325. static void ioat3_intr_quirk(struct ioatdma_device *device)
  1326. {
  1327. struct dma_device *dma;
  1328. struct dma_chan *c;
  1329. struct ioat_chan_common *chan;
  1330. u32 errmask;
  1331. dma = &device->common;
  1332. /*
  1333. * if we have descriptor write back error status, we mask the
  1334. * error interrupts
  1335. */
  1336. if (device->cap & IOAT_CAP_DWBES) {
  1337. list_for_each_entry(c, &dma->channels, device_node) {
  1338. chan = to_chan_common(c);
  1339. errmask = readl(chan->reg_base +
  1340. IOAT_CHANERR_MASK_OFFSET);
  1341. errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
  1342. IOAT_CHANERR_XOR_Q_ERR;
  1343. writel(errmask, chan->reg_base +
  1344. IOAT_CHANERR_MASK_OFFSET);
  1345. }
  1346. }
  1347. }
  1348. int ioat3_dma_probe(struct ioatdma_device *device, int dca)
  1349. {
  1350. struct pci_dev *pdev = device->pdev;
  1351. int dca_en = system_has_dca_enabled(pdev);
  1352. struct dma_device *dma;
  1353. struct dma_chan *c;
  1354. struct ioat_chan_common *chan;
  1355. bool is_raid_device = false;
  1356. int err;
  1357. device->enumerate_channels = ioat2_enumerate_channels;
  1358. device->reset_hw = ioat3_reset_hw;
  1359. device->self_test = ioat3_dma_self_test;
  1360. device->intr_quirk = ioat3_intr_quirk;
  1361. dma = &device->common;
  1362. dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
  1363. dma->device_issue_pending = ioat2_issue_pending;
  1364. dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
  1365. dma->device_free_chan_resources = ioat2_free_chan_resources;
  1366. dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
  1367. dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
  1368. device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
  1369. if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
  1370. device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
  1371. /* dca is incompatible with raid operations */
  1372. if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
  1373. device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
  1374. if (device->cap & IOAT_CAP_XOR) {
  1375. is_raid_device = true;
  1376. dma->max_xor = 8;
  1377. dma_cap_set(DMA_XOR, dma->cap_mask);
  1378. dma->device_prep_dma_xor = ioat3_prep_xor;
  1379. dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
  1380. dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
  1381. }
  1382. if (device->cap & IOAT_CAP_PQ) {
  1383. is_raid_device = true;
  1384. dma->device_prep_dma_pq = ioat3_prep_pq;
  1385. dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
  1386. dma_cap_set(DMA_PQ, dma->cap_mask);
  1387. dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
  1388. if (device->cap & IOAT_CAP_RAID16SS) {
  1389. dma_set_maxpq(dma, 16, 0);
  1390. } else {
  1391. dma_set_maxpq(dma, 8, 0);
  1392. }
  1393. if (!(device->cap & IOAT_CAP_XOR)) {
  1394. dma->device_prep_dma_xor = ioat3_prep_pqxor;
  1395. dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
  1396. dma_cap_set(DMA_XOR, dma->cap_mask);
  1397. dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
  1398. if (device->cap & IOAT_CAP_RAID16SS) {
  1399. dma->max_xor = 16;
  1400. } else {
  1401. dma->max_xor = 8;
  1402. }
  1403. }
  1404. }
  1405. dma->device_tx_status = ioat3_tx_status;
  1406. device->cleanup_fn = ioat3_cleanup_event;
  1407. device->timer_fn = ioat3_timer_event;
  1408. /* starting with CB3.3 super extended descriptors are supported */
  1409. if (device->cap & IOAT_CAP_RAID16SS) {
  1410. char pool_name[14];
  1411. int i;
  1412. for (i = 0; i < MAX_SED_POOLS; i++) {
  1413. snprintf(pool_name, 14, "ioat_hw%d_sed", i);
  1414. /* allocate SED DMA pool */
  1415. device->sed_hw_pool[i] = dmam_pool_create(pool_name,
  1416. &pdev->dev,
  1417. SED_SIZE * (i + 1), 64, 0);
  1418. if (!device->sed_hw_pool[i])
  1419. return -ENOMEM;
  1420. }
  1421. }
  1422. err = ioat_probe(device);
  1423. if (err)
  1424. return err;
  1425. list_for_each_entry(c, &dma->channels, device_node) {
  1426. chan = to_chan_common(c);
  1427. writel(IOAT_DMA_DCA_ANY_CPU,
  1428. chan->reg_base + IOAT_DCACTRL_OFFSET);
  1429. }
  1430. err = ioat_register(device);
  1431. if (err)
  1432. return err;
  1433. ioat_kobject_add(device, &ioat2_ktype);
  1434. if (dca)
  1435. device->dca = ioat3_dca_init(pdev, device->reg_base);
  1436. return 0;
  1437. }