nx-842.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617
  1. /*
  2. * Driver for IBM Power 842 compression accelerator
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2012
  19. *
  20. * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
  21. * Seth Jennings <sjenning@linux.vnet.ibm.com>
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/nx842.h>
  26. #include <linux/of.h>
  27. #include <linux/slab.h>
  28. #include <asm/page.h>
  29. #include <asm/vio.h>
  30. #include "nx_csbcpb.h" /* struct nx_csbcpb */
  31. #define MODULE_NAME "nx-compress"
  32. MODULE_LICENSE("GPL");
  33. MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
  34. MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
  35. #define SHIFT_4K 12
  36. #define SHIFT_64K 16
  37. #define SIZE_4K (1UL << SHIFT_4K)
  38. #define SIZE_64K (1UL << SHIFT_64K)
  39. /* IO buffer must be 128 byte aligned */
  40. #define IO_BUFFER_ALIGN 128
  41. struct nx842_header {
  42. int blocks_nr; /* number of compressed blocks */
  43. int offset; /* offset of the first block (from beginning of header) */
  44. int sizes[0]; /* size of compressed blocks */
  45. };
  46. static inline int nx842_header_size(const struct nx842_header *hdr)
  47. {
  48. return sizeof(struct nx842_header) +
  49. hdr->blocks_nr * sizeof(hdr->sizes[0]);
  50. }
  51. /* Macros for fields within nx_csbcpb */
  52. /* Check the valid bit within the csbcpb valid field */
  53. #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
  54. /* CE macros operate on the completion_extension field bits in the csbcpb.
  55. * CE0 0=full completion, 1=partial completion
  56. * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
  57. * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
  58. #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
  59. #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
  60. #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
  61. /* The NX unit accepts data only on 4K page boundaries */
  62. #define NX842_HW_PAGE_SHIFT SHIFT_4K
  63. #define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT)
  64. #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
  65. enum nx842_status {
  66. UNAVAILABLE,
  67. AVAILABLE
  68. };
  69. struct ibm_nx842_counters {
  70. atomic64_t comp_complete;
  71. atomic64_t comp_failed;
  72. atomic64_t decomp_complete;
  73. atomic64_t decomp_failed;
  74. atomic64_t swdecomp;
  75. atomic64_t comp_times[32];
  76. atomic64_t decomp_times[32];
  77. };
  78. static struct nx842_devdata {
  79. struct vio_dev *vdev;
  80. struct device *dev;
  81. struct ibm_nx842_counters *counters;
  82. unsigned int max_sg_len;
  83. unsigned int max_sync_size;
  84. unsigned int max_sync_sg;
  85. enum nx842_status status;
  86. } __rcu *devdata;
  87. static DEFINE_SPINLOCK(devdata_mutex);
  88. #define NX842_COUNTER_INC(_x) \
  89. static inline void nx842_inc_##_x( \
  90. const struct nx842_devdata *dev) { \
  91. if (dev) \
  92. atomic64_inc(&dev->counters->_x); \
  93. }
  94. NX842_COUNTER_INC(comp_complete);
  95. NX842_COUNTER_INC(comp_failed);
  96. NX842_COUNTER_INC(decomp_complete);
  97. NX842_COUNTER_INC(decomp_failed);
  98. NX842_COUNTER_INC(swdecomp);
  99. #define NX842_HIST_SLOTS 16
  100. static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
  101. {
  102. int bucket = fls(time);
  103. if (bucket)
  104. bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
  105. atomic64_inc(&times[bucket]);
  106. }
  107. /* NX unit operation flags */
  108. #define NX842_OP_COMPRESS 0x0
  109. #define NX842_OP_CRC 0x1
  110. #define NX842_OP_DECOMPRESS 0x2
  111. #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
  112. #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
  113. #define NX842_OP_ASYNC (1<<23)
  114. #define NX842_OP_NOTIFY (1<<22)
  115. #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
  116. static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
  117. {
  118. /* No use of DMA mappings within the driver. */
  119. return 0;
  120. }
  121. struct nx842_slentry {
  122. unsigned long ptr; /* Real address (use __pa()) */
  123. unsigned long len;
  124. };
  125. /* pHyp scatterlist entry */
  126. struct nx842_scatterlist {
  127. int entry_nr; /* number of slentries */
  128. struct nx842_slentry *entries; /* ptr to array of slentries */
  129. };
  130. /* Does not include sizeof(entry_nr) in the size */
  131. static inline unsigned long nx842_get_scatterlist_size(
  132. struct nx842_scatterlist *sl)
  133. {
  134. return sl->entry_nr * sizeof(struct nx842_slentry);
  135. }
  136. static inline unsigned long nx842_get_pa(void *addr)
  137. {
  138. if (is_vmalloc_addr(addr))
  139. return page_to_phys(vmalloc_to_page(addr))
  140. + offset_in_page(addr);
  141. else
  142. return __pa(addr);
  143. }
  144. static int nx842_build_scatterlist(unsigned long buf, int len,
  145. struct nx842_scatterlist *sl)
  146. {
  147. unsigned long nextpage;
  148. struct nx842_slentry *entry;
  149. sl->entry_nr = 0;
  150. entry = sl->entries;
  151. while (len) {
  152. entry->ptr = nx842_get_pa((void *)buf);
  153. nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
  154. if (nextpage < buf + len) {
  155. /* we aren't at the end yet */
  156. if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE))
  157. /* we are in the middle (or beginning) */
  158. entry->len = NX842_HW_PAGE_SIZE;
  159. else
  160. /* we are at the beginning */
  161. entry->len = nextpage - buf;
  162. } else {
  163. /* at the end */
  164. entry->len = len;
  165. }
  166. len -= entry->len;
  167. buf += entry->len;
  168. sl->entry_nr++;
  169. entry++;
  170. }
  171. return 0;
  172. }
  173. /*
  174. * Working memory for software decompression
  175. */
  176. struct sw842_fifo {
  177. union {
  178. char f8[256][8];
  179. char f4[512][4];
  180. };
  181. char f2[256][2];
  182. unsigned char f84_full;
  183. unsigned char f2_full;
  184. unsigned char f8_count;
  185. unsigned char f2_count;
  186. unsigned int f4_count;
  187. };
  188. /*
  189. * Working memory for crypto API
  190. */
  191. struct nx842_workmem {
  192. char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */
  193. union {
  194. /* hardware working memory */
  195. struct {
  196. /* scatterlist */
  197. char slin[SIZE_4K];
  198. char slout[SIZE_4K];
  199. /* coprocessor status/parameter block */
  200. struct nx_csbcpb csbcpb;
  201. };
  202. /* software working memory */
  203. struct sw842_fifo swfifo; /* software decompression fifo */
  204. };
  205. };
  206. int nx842_get_workmem_size(void)
  207. {
  208. return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE;
  209. }
  210. EXPORT_SYMBOL_GPL(nx842_get_workmem_size);
  211. int nx842_get_workmem_size_aligned(void)
  212. {
  213. return sizeof(struct nx842_workmem);
  214. }
  215. EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned);
  216. static int nx842_validate_result(struct device *dev,
  217. struct cop_status_block *csb)
  218. {
  219. /* The csb must be valid after returning from vio_h_cop_sync */
  220. if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
  221. dev_err(dev, "%s: cspcbp not valid upon completion.\n",
  222. __func__);
  223. dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
  224. csb->valid,
  225. csb->crb_seq_number,
  226. csb->completion_code,
  227. csb->completion_extension);
  228. dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
  229. csb->processed_byte_count,
  230. (unsigned long)csb->address);
  231. return -EIO;
  232. }
  233. /* Check return values from the hardware in the CSB */
  234. switch (csb->completion_code) {
  235. case 0: /* Completed without error */
  236. break;
  237. case 64: /* Target bytes > Source bytes during compression */
  238. case 13: /* Output buffer too small */
  239. dev_dbg(dev, "%s: Compression output larger than input\n",
  240. __func__);
  241. return -ENOSPC;
  242. case 66: /* Input data contains an illegal template field */
  243. case 67: /* Template indicates data past the end of the input stream */
  244. dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
  245. __func__, csb->completion_code);
  246. return -EINVAL;
  247. default:
  248. dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
  249. __func__, csb->completion_code);
  250. return -EIO;
  251. }
  252. /* Hardware sanity check */
  253. if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
  254. dev_err(dev, "%s: No error returned by hardware, but "
  255. "data returned is unusable, contact support.\n"
  256. "(Additional info: csbcbp->processed bytes "
  257. "does not specify processed bytes for the "
  258. "target buffer.)\n", __func__);
  259. return -EIO;
  260. }
  261. return 0;
  262. }
  263. /**
  264. * nx842_compress - Compress data using the 842 algorithm
  265. *
  266. * Compression provide by the NX842 coprocessor on IBM Power systems.
  267. * The input buffer is compressed and the result is stored in the
  268. * provided output buffer.
  269. *
  270. * Upon return from this function @outlen contains the length of the
  271. * compressed data. If there is an error then @outlen will be 0 and an
  272. * error will be specified by the return code from this function.
  273. *
  274. * @in: Pointer to input buffer, must be page aligned
  275. * @inlen: Length of input buffer, must be PAGE_SIZE
  276. * @out: Pointer to output buffer
  277. * @outlen: Length of output buffer
  278. * @wrkmem: ptr to buffer for working memory, size determined by
  279. * nx842_get_workmem_size()
  280. *
  281. * Returns:
  282. * 0 Success, output of length @outlen stored in the buffer at @out
  283. * -ENOMEM Unable to allocate internal buffers
  284. * -ENOSPC Output buffer is to small
  285. * -EMSGSIZE XXX Difficult to describe this limitation
  286. * -EIO Internal error
  287. * -ENODEV Hardware unavailable
  288. */
  289. int nx842_compress(const unsigned char *in, unsigned int inlen,
  290. unsigned char *out, unsigned int *outlen, void *wmem)
  291. {
  292. struct nx842_header *hdr;
  293. struct nx842_devdata *local_devdata;
  294. struct device *dev = NULL;
  295. struct nx842_workmem *workmem;
  296. struct nx842_scatterlist slin, slout;
  297. struct nx_csbcpb *csbcpb;
  298. int ret = 0, max_sync_size, i, bytesleft, size, hdrsize;
  299. unsigned long inbuf, outbuf, padding;
  300. struct vio_pfo_op op = {
  301. .done = NULL,
  302. .handle = 0,
  303. .timeout = 0,
  304. };
  305. unsigned long start_time = get_tb();
  306. /*
  307. * Make sure input buffer is 64k page aligned. This is assumed since
  308. * this driver is designed for page compression only (for now). This
  309. * is very nice since we can now use direct DDE(s) for the input and
  310. * the alignment is guaranteed.
  311. */
  312. inbuf = (unsigned long)in;
  313. if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE)
  314. return -EINVAL;
  315. rcu_read_lock();
  316. local_devdata = rcu_dereference(devdata);
  317. if (!local_devdata || !local_devdata->dev) {
  318. rcu_read_unlock();
  319. return -ENODEV;
  320. }
  321. max_sync_size = local_devdata->max_sync_size;
  322. dev = local_devdata->dev;
  323. /* Create the header */
  324. hdr = (struct nx842_header *)out;
  325. hdr->blocks_nr = PAGE_SIZE / max_sync_size;
  326. hdrsize = nx842_header_size(hdr);
  327. outbuf = (unsigned long)out + hdrsize;
  328. bytesleft = *outlen - hdrsize;
  329. /* Init scatterlist */
  330. workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
  331. NX842_HW_PAGE_SIZE);
  332. slin.entries = (struct nx842_slentry *)workmem->slin;
  333. slout.entries = (struct nx842_slentry *)workmem->slout;
  334. /* Init operation */
  335. op.flags = NX842_OP_COMPRESS;
  336. csbcpb = &workmem->csbcpb;
  337. memset(csbcpb, 0, sizeof(*csbcpb));
  338. op.csbcpb = nx842_get_pa(csbcpb);
  339. op.out = nx842_get_pa(slout.entries);
  340. for (i = 0; i < hdr->blocks_nr; i++) {
  341. /*
  342. * Aligning the output blocks to 128 bytes does waste space,
  343. * but it prevents the need for bounce buffers and memory
  344. * copies. It also simplifies the code a lot. In the worst
  345. * case (64k page, 4k max_sync_size), you lose up to
  346. * (128*16)/64k = ~3% the compression factor. For 64k
  347. * max_sync_size, the loss would be at most 128/64k = ~0.2%.
  348. */
  349. padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf;
  350. outbuf += padding;
  351. bytesleft -= padding;
  352. if (i == 0)
  353. /* save offset into first block in header */
  354. hdr->offset = padding + hdrsize;
  355. if (bytesleft <= 0) {
  356. ret = -ENOSPC;
  357. goto unlock;
  358. }
  359. /*
  360. * NOTE: If the default max_sync_size is changed from 4k
  361. * to 64k, remove the "likely" case below, since a
  362. * scatterlist will always be needed.
  363. */
  364. if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
  365. /* Create direct DDE */
  366. op.in = nx842_get_pa((void *)inbuf);
  367. op.inlen = max_sync_size;
  368. } else {
  369. /* Create indirect DDE (scatterlist) */
  370. nx842_build_scatterlist(inbuf, max_sync_size, &slin);
  371. op.in = nx842_get_pa(slin.entries);
  372. op.inlen = -nx842_get_scatterlist_size(&slin);
  373. }
  374. /*
  375. * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect
  376. * DDE is required for the outbuf.
  377. * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must
  378. * also be page aligned (1 in 128/4k=32 chance) in order
  379. * to use a direct DDE.
  380. * This is unlikely, just use an indirect DDE always.
  381. */
  382. nx842_build_scatterlist(outbuf,
  383. min(bytesleft, max_sync_size), &slout);
  384. /* op.out set before loop */
  385. op.outlen = -nx842_get_scatterlist_size(&slout);
  386. /* Send request to pHyp */
  387. ret = vio_h_cop_sync(local_devdata->vdev, &op);
  388. /* Check for pHyp error */
  389. if (ret) {
  390. dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
  391. __func__, ret, op.hcall_err);
  392. ret = -EIO;
  393. goto unlock;
  394. }
  395. /* Check for hardware error */
  396. ret = nx842_validate_result(dev, &csbcpb->csb);
  397. if (ret && ret != -ENOSPC)
  398. goto unlock;
  399. /* Handle incompressible data */
  400. if (unlikely(ret == -ENOSPC)) {
  401. if (bytesleft < max_sync_size) {
  402. /*
  403. * Not enough space left in the output buffer
  404. * to store uncompressed block
  405. */
  406. goto unlock;
  407. } else {
  408. /* Store incompressible block */
  409. memcpy((void *)outbuf, (void *)inbuf,
  410. max_sync_size);
  411. hdr->sizes[i] = -max_sync_size;
  412. outbuf += max_sync_size;
  413. bytesleft -= max_sync_size;
  414. /* Reset ret, incompressible data handled */
  415. ret = 0;
  416. }
  417. } else {
  418. /* Normal case, compression was successful */
  419. size = csbcpb->csb.processed_byte_count;
  420. dev_dbg(dev, "%s: processed_bytes=%d\n",
  421. __func__, size);
  422. hdr->sizes[i] = size;
  423. outbuf += size;
  424. bytesleft -= size;
  425. }
  426. inbuf += max_sync_size;
  427. }
  428. *outlen = (unsigned int)(outbuf - (unsigned long)out);
  429. unlock:
  430. if (ret)
  431. nx842_inc_comp_failed(local_devdata);
  432. else {
  433. nx842_inc_comp_complete(local_devdata);
  434. ibm_nx842_incr_hist(local_devdata->counters->comp_times,
  435. (get_tb() - start_time) / tb_ticks_per_usec);
  436. }
  437. rcu_read_unlock();
  438. return ret;
  439. }
  440. EXPORT_SYMBOL_GPL(nx842_compress);
  441. static int sw842_decompress(const unsigned char *, int, unsigned char *, int *,
  442. const void *);
  443. /**
  444. * nx842_decompress - Decompress data using the 842 algorithm
  445. *
  446. * Decompression provide by the NX842 coprocessor on IBM Power systems.
  447. * The input buffer is decompressed and the result is stored in the
  448. * provided output buffer. The size allocated to the output buffer is
  449. * provided by the caller of this function in @outlen. Upon return from
  450. * this function @outlen contains the length of the decompressed data.
  451. * If there is an error then @outlen will be 0 and an error will be
  452. * specified by the return code from this function.
  453. *
  454. * @in: Pointer to input buffer, will use bounce buffer if not 128 byte
  455. * aligned
  456. * @inlen: Length of input buffer
  457. * @out: Pointer to output buffer, must be page aligned
  458. * @outlen: Length of output buffer, must be PAGE_SIZE
  459. * @wrkmem: ptr to buffer for working memory, size determined by
  460. * nx842_get_workmem_size()
  461. *
  462. * Returns:
  463. * 0 Success, output of length @outlen stored in the buffer at @out
  464. * -ENODEV Hardware decompression device is unavailable
  465. * -ENOMEM Unable to allocate internal buffers
  466. * -ENOSPC Output buffer is to small
  467. * -EINVAL Bad input data encountered when attempting decompress
  468. * -EIO Internal error
  469. */
  470. int nx842_decompress(const unsigned char *in, unsigned int inlen,
  471. unsigned char *out, unsigned int *outlen, void *wmem)
  472. {
  473. struct nx842_header *hdr;
  474. struct nx842_devdata *local_devdata;
  475. struct device *dev = NULL;
  476. struct nx842_workmem *workmem;
  477. struct nx842_scatterlist slin, slout;
  478. struct nx_csbcpb *csbcpb;
  479. int ret = 0, i, size, max_sync_size;
  480. unsigned long inbuf, outbuf;
  481. struct vio_pfo_op op = {
  482. .done = NULL,
  483. .handle = 0,
  484. .timeout = 0,
  485. };
  486. unsigned long start_time = get_tb();
  487. /* Ensure page alignment and size */
  488. outbuf = (unsigned long)out;
  489. if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE)
  490. return -EINVAL;
  491. rcu_read_lock();
  492. local_devdata = rcu_dereference(devdata);
  493. if (local_devdata)
  494. dev = local_devdata->dev;
  495. /* Get header */
  496. hdr = (struct nx842_header *)in;
  497. workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
  498. NX842_HW_PAGE_SIZE);
  499. inbuf = (unsigned long)in + hdr->offset;
  500. if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) {
  501. /* Copy block(s) into bounce buffer for alignment */
  502. memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset);
  503. inbuf = (unsigned long)workmem->bounce;
  504. }
  505. /* Init scatterlist */
  506. slin.entries = (struct nx842_slentry *)workmem->slin;
  507. slout.entries = (struct nx842_slentry *)workmem->slout;
  508. /* Init operation */
  509. op.flags = NX842_OP_DECOMPRESS;
  510. csbcpb = &workmem->csbcpb;
  511. memset(csbcpb, 0, sizeof(*csbcpb));
  512. op.csbcpb = nx842_get_pa(csbcpb);
  513. /*
  514. * max_sync_size may have changed since compression,
  515. * so we can't read it from the device info. We need
  516. * to derive it from hdr->blocks_nr.
  517. */
  518. max_sync_size = PAGE_SIZE / hdr->blocks_nr;
  519. for (i = 0; i < hdr->blocks_nr; i++) {
  520. /* Skip padding */
  521. inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN);
  522. if (hdr->sizes[i] < 0) {
  523. /* Negative sizes indicate uncompressed data blocks */
  524. size = abs(hdr->sizes[i]);
  525. memcpy((void *)outbuf, (void *)inbuf, size);
  526. outbuf += size;
  527. inbuf += size;
  528. continue;
  529. }
  530. if (!dev)
  531. goto sw;
  532. /*
  533. * The better the compression, the more likely the "likely"
  534. * case becomes.
  535. */
  536. if (likely((inbuf & NX842_HW_PAGE_MASK) ==
  537. ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
  538. /* Create direct DDE */
  539. op.in = nx842_get_pa((void *)inbuf);
  540. op.inlen = hdr->sizes[i];
  541. } else {
  542. /* Create indirect DDE (scatterlist) */
  543. nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
  544. op.in = nx842_get_pa(slin.entries);
  545. op.inlen = -nx842_get_scatterlist_size(&slin);
  546. }
  547. /*
  548. * NOTE: If the default max_sync_size is changed from 4k
  549. * to 64k, remove the "likely" case below, since a
  550. * scatterlist will always be needed.
  551. */
  552. if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
  553. /* Create direct DDE */
  554. op.out = nx842_get_pa((void *)outbuf);
  555. op.outlen = max_sync_size;
  556. } else {
  557. /* Create indirect DDE (scatterlist) */
  558. nx842_build_scatterlist(outbuf, max_sync_size, &slout);
  559. op.out = nx842_get_pa(slout.entries);
  560. op.outlen = -nx842_get_scatterlist_size(&slout);
  561. }
  562. /* Send request to pHyp */
  563. ret = vio_h_cop_sync(local_devdata->vdev, &op);
  564. /* Check for pHyp error */
  565. if (ret) {
  566. dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
  567. __func__, ret, op.hcall_err);
  568. dev = NULL;
  569. goto sw;
  570. }
  571. /* Check for hardware error */
  572. ret = nx842_validate_result(dev, &csbcpb->csb);
  573. if (ret) {
  574. dev = NULL;
  575. goto sw;
  576. }
  577. /* HW decompression success */
  578. inbuf += hdr->sizes[i];
  579. outbuf += csbcpb->csb.processed_byte_count;
  580. continue;
  581. sw:
  582. /* software decompression */
  583. size = max_sync_size;
  584. ret = sw842_decompress(
  585. (unsigned char *)inbuf, hdr->sizes[i],
  586. (unsigned char *)outbuf, &size, wmem);
  587. if (ret)
  588. pr_debug("%s: sw842_decompress failed with %d\n",
  589. __func__, ret);
  590. if (ret) {
  591. if (ret != -ENOSPC && ret != -EINVAL &&
  592. ret != -EMSGSIZE)
  593. ret = -EIO;
  594. goto unlock;
  595. }
  596. /* SW decompression success */
  597. inbuf += hdr->sizes[i];
  598. outbuf += size;
  599. }
  600. *outlen = (unsigned int)(outbuf - (unsigned long)out);
  601. unlock:
  602. if (ret)
  603. /* decompress fail */
  604. nx842_inc_decomp_failed(local_devdata);
  605. else {
  606. if (!dev)
  607. /* software decompress */
  608. nx842_inc_swdecomp(local_devdata);
  609. nx842_inc_decomp_complete(local_devdata);
  610. ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
  611. (get_tb() - start_time) / tb_ticks_per_usec);
  612. }
  613. rcu_read_unlock();
  614. return ret;
  615. }
  616. EXPORT_SYMBOL_GPL(nx842_decompress);
  617. /**
  618. * nx842_OF_set_defaults -- Set default (disabled) values for devdata
  619. *
  620. * @devdata - struct nx842_devdata to update
  621. *
  622. * Returns:
  623. * 0 on success
  624. * -ENOENT if @devdata ptr is NULL
  625. */
  626. static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
  627. {
  628. if (devdata) {
  629. devdata->max_sync_size = 0;
  630. devdata->max_sync_sg = 0;
  631. devdata->max_sg_len = 0;
  632. devdata->status = UNAVAILABLE;
  633. return 0;
  634. } else
  635. return -ENOENT;
  636. }
  637. /**
  638. * nx842_OF_upd_status -- Update the device info from OF status prop
  639. *
  640. * The status property indicates if the accelerator is enabled. If the
  641. * device is in the OF tree it indicates that the hardware is present.
  642. * The status field indicates if the device is enabled when the status
  643. * is 'okay'. Otherwise the device driver will be disabled.
  644. *
  645. * @devdata - struct nx842_devdata to update
  646. * @prop - struct property point containing the maxsyncop for the update
  647. *
  648. * Returns:
  649. * 0 - Device is available
  650. * -EINVAL - Device is not available
  651. */
  652. static int nx842_OF_upd_status(struct nx842_devdata *devdata,
  653. struct property *prop) {
  654. int ret = 0;
  655. const char *status = (const char *)prop->value;
  656. if (!strncmp(status, "okay", (size_t)prop->length)) {
  657. devdata->status = AVAILABLE;
  658. } else {
  659. dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
  660. __func__, status);
  661. devdata->status = UNAVAILABLE;
  662. }
  663. return ret;
  664. }
  665. /**
  666. * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
  667. *
  668. * Definition of the 'ibm,max-sg-len' OF property:
  669. * This field indicates the maximum byte length of a scatter list
  670. * for the platform facility. It is a single cell encoded as with encode-int.
  671. *
  672. * Example:
  673. * # od -x ibm,max-sg-len
  674. * 0000000 0000 0ff0
  675. *
  676. * In this example, the maximum byte length of a scatter list is
  677. * 0x0ff0 (4,080).
  678. *
  679. * @devdata - struct nx842_devdata to update
  680. * @prop - struct property point containing the maxsyncop for the update
  681. *
  682. * Returns:
  683. * 0 on success
  684. * -EINVAL on failure
  685. */
  686. static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
  687. struct property *prop) {
  688. int ret = 0;
  689. const int *maxsglen = prop->value;
  690. if (prop->length != sizeof(*maxsglen)) {
  691. dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
  692. dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
  693. prop->length, sizeof(*maxsglen));
  694. ret = -EINVAL;
  695. } else {
  696. devdata->max_sg_len = (unsigned int)min(*maxsglen,
  697. (int)NX842_HW_PAGE_SIZE);
  698. }
  699. return ret;
  700. }
  701. /**
  702. * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
  703. *
  704. * Definition of the 'ibm,max-sync-cop' OF property:
  705. * Two series of cells. The first series of cells represents the maximums
  706. * that can be synchronously compressed. The second series of cells
  707. * represents the maximums that can be synchronously decompressed.
  708. * 1. The first cell in each series contains the count of the number of
  709. * data length, scatter list elements pairs that follow – each being
  710. * of the form
  711. * a. One cell data byte length
  712. * b. One cell total number of scatter list elements
  713. *
  714. * Example:
  715. * # od -x ibm,max-sync-cop
  716. * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001
  717. * 0000020 0000 1000 0000 01fe
  718. *
  719. * In this example, compression supports 0x1000 (4,096) data byte length
  720. * and 0x1fe (510) total scatter list elements. Decompression supports
  721. * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
  722. * elements.
  723. *
  724. * @devdata - struct nx842_devdata to update
  725. * @prop - struct property point containing the maxsyncop for the update
  726. *
  727. * Returns:
  728. * 0 on success
  729. * -EINVAL on failure
  730. */
  731. static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
  732. struct property *prop) {
  733. int ret = 0;
  734. const struct maxsynccop_t {
  735. int comp_elements;
  736. int comp_data_limit;
  737. int comp_sg_limit;
  738. int decomp_elements;
  739. int decomp_data_limit;
  740. int decomp_sg_limit;
  741. } *maxsynccop;
  742. if (prop->length != sizeof(*maxsynccop)) {
  743. dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
  744. dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
  745. sizeof(*maxsynccop));
  746. ret = -EINVAL;
  747. goto out;
  748. }
  749. maxsynccop = (const struct maxsynccop_t *)prop->value;
  750. /* Use one limit rather than separate limits for compression and
  751. * decompression. Set a maximum for this so as not to exceed the
  752. * size that the header can support and round the value down to
  753. * the hardware page size (4K) */
  754. devdata->max_sync_size =
  755. (unsigned int)min(maxsynccop->comp_data_limit,
  756. maxsynccop->decomp_data_limit);
  757. devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
  758. SIZE_64K);
  759. if (devdata->max_sync_size < SIZE_4K) {
  760. dev_err(devdata->dev, "%s: hardware max data size (%u) is "
  761. "less than the driver minimum, unable to use "
  762. "the hardware device\n",
  763. __func__, devdata->max_sync_size);
  764. ret = -EINVAL;
  765. goto out;
  766. }
  767. devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit,
  768. maxsynccop->decomp_sg_limit);
  769. if (devdata->max_sync_sg < 1) {
  770. dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
  771. "less than the driver minimum, unable to use "
  772. "the hardware device\n",
  773. __func__, devdata->max_sync_sg);
  774. ret = -EINVAL;
  775. goto out;
  776. }
  777. out:
  778. return ret;
  779. }
  780. /**
  781. *
  782. * nx842_OF_upd -- Handle OF properties updates for the device.
  783. *
  784. * Set all properties from the OF tree. Optionally, a new property
  785. * can be provided by the @new_prop pointer to overwrite an existing value.
  786. * The device will remain disabled until all values are valid, this function
  787. * will return an error for updates unless all values are valid.
  788. *
  789. * @new_prop: If not NULL, this property is being updated. If NULL, update
  790. * all properties from the current values in the OF tree.
  791. *
  792. * Returns:
  793. * 0 - Success
  794. * -ENOMEM - Could not allocate memory for new devdata structure
  795. * -EINVAL - property value not found, new_prop is not a recognized
  796. * property for the device or property value is not valid.
  797. * -ENODEV - Device is not available
  798. */
  799. static int nx842_OF_upd(struct property *new_prop)
  800. {
  801. struct nx842_devdata *old_devdata = NULL;
  802. struct nx842_devdata *new_devdata = NULL;
  803. struct device_node *of_node = NULL;
  804. struct property *status = NULL;
  805. struct property *maxsglen = NULL;
  806. struct property *maxsyncop = NULL;
  807. int ret = 0;
  808. unsigned long flags;
  809. spin_lock_irqsave(&devdata_mutex, flags);
  810. old_devdata = rcu_dereference_check(devdata,
  811. lockdep_is_held(&devdata_mutex));
  812. if (old_devdata)
  813. of_node = old_devdata->dev->of_node;
  814. if (!old_devdata || !of_node) {
  815. pr_err("%s: device is not available\n", __func__);
  816. spin_unlock_irqrestore(&devdata_mutex, flags);
  817. return -ENODEV;
  818. }
  819. new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
  820. if (!new_devdata) {
  821. dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
  822. ret = -ENOMEM;
  823. goto error_out;
  824. }
  825. memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
  826. new_devdata->counters = old_devdata->counters;
  827. /* Set ptrs for existing properties */
  828. status = of_find_property(of_node, "status", NULL);
  829. maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
  830. maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
  831. if (!status || !maxsglen || !maxsyncop) {
  832. dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
  833. ret = -EINVAL;
  834. goto error_out;
  835. }
  836. /* Set ptr to new property if provided */
  837. if (new_prop) {
  838. /* Single property */
  839. if (!strncmp(new_prop->name, "status", new_prop->length)) {
  840. status = new_prop;
  841. } else if (!strncmp(new_prop->name, "ibm,max-sg-len",
  842. new_prop->length)) {
  843. maxsglen = new_prop;
  844. } else if (!strncmp(new_prop->name, "ibm,max-sync-cop",
  845. new_prop->length)) {
  846. maxsyncop = new_prop;
  847. } else {
  848. /*
  849. * Skip the update, the property being updated
  850. * has no impact.
  851. */
  852. goto out;
  853. }
  854. }
  855. /* Perform property updates */
  856. ret = nx842_OF_upd_status(new_devdata, status);
  857. if (ret)
  858. goto error_out;
  859. ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
  860. if (ret)
  861. goto error_out;
  862. ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
  863. if (ret)
  864. goto error_out;
  865. out:
  866. dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
  867. __func__, new_devdata->max_sync_size,
  868. old_devdata->max_sync_size);
  869. dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
  870. __func__, new_devdata->max_sync_sg,
  871. old_devdata->max_sync_sg);
  872. dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
  873. __func__, new_devdata->max_sg_len,
  874. old_devdata->max_sg_len);
  875. rcu_assign_pointer(devdata, new_devdata);
  876. spin_unlock_irqrestore(&devdata_mutex, flags);
  877. synchronize_rcu();
  878. dev_set_drvdata(new_devdata->dev, new_devdata);
  879. kfree(old_devdata);
  880. return 0;
  881. error_out:
  882. if (new_devdata) {
  883. dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
  884. nx842_OF_set_defaults(new_devdata);
  885. rcu_assign_pointer(devdata, new_devdata);
  886. spin_unlock_irqrestore(&devdata_mutex, flags);
  887. synchronize_rcu();
  888. dev_set_drvdata(new_devdata->dev, new_devdata);
  889. kfree(old_devdata);
  890. } else {
  891. dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
  892. spin_unlock_irqrestore(&devdata_mutex, flags);
  893. }
  894. if (!ret)
  895. ret = -EINVAL;
  896. return ret;
  897. }
  898. /**
  899. * nx842_OF_notifier - Process updates to OF properties for the device
  900. *
  901. * @np: notifier block
  902. * @action: notifier action
  903. * @update: struct pSeries_reconfig_prop_update pointer if action is
  904. * PSERIES_UPDATE_PROPERTY
  905. *
  906. * Returns:
  907. * NOTIFY_OK on success
  908. * NOTIFY_BAD encoded with error number on failure, use
  909. * notifier_to_errno() to decode this value
  910. */
  911. static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
  912. void *update)
  913. {
  914. struct of_prop_reconfig *upd = update;
  915. struct nx842_devdata *local_devdata;
  916. struct device_node *node = NULL;
  917. rcu_read_lock();
  918. local_devdata = rcu_dereference(devdata);
  919. if (local_devdata)
  920. node = local_devdata->dev->of_node;
  921. if (local_devdata &&
  922. action == OF_RECONFIG_UPDATE_PROPERTY &&
  923. !strcmp(upd->dn->name, node->name)) {
  924. rcu_read_unlock();
  925. nx842_OF_upd(upd->prop);
  926. } else
  927. rcu_read_unlock();
  928. return NOTIFY_OK;
  929. }
  930. static struct notifier_block nx842_of_nb = {
  931. .notifier_call = nx842_OF_notifier,
  932. };
  933. #define nx842_counter_read(_name) \
  934. static ssize_t nx842_##_name##_show(struct device *dev, \
  935. struct device_attribute *attr, \
  936. char *buf) { \
  937. struct nx842_devdata *local_devdata; \
  938. int p = 0; \
  939. rcu_read_lock(); \
  940. local_devdata = rcu_dereference(devdata); \
  941. if (local_devdata) \
  942. p = snprintf(buf, PAGE_SIZE, "%ld\n", \
  943. atomic64_read(&local_devdata->counters->_name)); \
  944. rcu_read_unlock(); \
  945. return p; \
  946. }
  947. #define NX842DEV_COUNTER_ATTR_RO(_name) \
  948. nx842_counter_read(_name); \
  949. static struct device_attribute dev_attr_##_name = __ATTR(_name, \
  950. 0444, \
  951. nx842_##_name##_show,\
  952. NULL);
  953. NX842DEV_COUNTER_ATTR_RO(comp_complete);
  954. NX842DEV_COUNTER_ATTR_RO(comp_failed);
  955. NX842DEV_COUNTER_ATTR_RO(decomp_complete);
  956. NX842DEV_COUNTER_ATTR_RO(decomp_failed);
  957. NX842DEV_COUNTER_ATTR_RO(swdecomp);
  958. static ssize_t nx842_timehist_show(struct device *,
  959. struct device_attribute *, char *);
  960. static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
  961. nx842_timehist_show, NULL);
  962. static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
  963. 0444, nx842_timehist_show, NULL);
  964. static ssize_t nx842_timehist_show(struct device *dev,
  965. struct device_attribute *attr, char *buf) {
  966. char *p = buf;
  967. struct nx842_devdata *local_devdata;
  968. atomic64_t *times;
  969. int bytes_remain = PAGE_SIZE;
  970. int bytes;
  971. int i;
  972. rcu_read_lock();
  973. local_devdata = rcu_dereference(devdata);
  974. if (!local_devdata) {
  975. rcu_read_unlock();
  976. return 0;
  977. }
  978. if (attr == &dev_attr_comp_times)
  979. times = local_devdata->counters->comp_times;
  980. else if (attr == &dev_attr_decomp_times)
  981. times = local_devdata->counters->decomp_times;
  982. else {
  983. rcu_read_unlock();
  984. return 0;
  985. }
  986. for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
  987. bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
  988. i ? (2<<(i-1)) : 0, (2<<i)-1,
  989. atomic64_read(&times[i]));
  990. bytes_remain -= bytes;
  991. p += bytes;
  992. }
  993. /* The last bucket holds everything over
  994. * 2<<(NX842_HIST_SLOTS - 2) us */
  995. bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
  996. 2<<(NX842_HIST_SLOTS - 2),
  997. atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
  998. p += bytes;
  999. rcu_read_unlock();
  1000. return p - buf;
  1001. }
  1002. static struct attribute *nx842_sysfs_entries[] = {
  1003. &dev_attr_comp_complete.attr,
  1004. &dev_attr_comp_failed.attr,
  1005. &dev_attr_decomp_complete.attr,
  1006. &dev_attr_decomp_failed.attr,
  1007. &dev_attr_swdecomp.attr,
  1008. &dev_attr_comp_times.attr,
  1009. &dev_attr_decomp_times.attr,
  1010. NULL,
  1011. };
  1012. static struct attribute_group nx842_attribute_group = {
  1013. .name = NULL, /* put in device directory */
  1014. .attrs = nx842_sysfs_entries,
  1015. };
  1016. static int __init nx842_probe(struct vio_dev *viodev,
  1017. const struct vio_device_id *id)
  1018. {
  1019. struct nx842_devdata *old_devdata, *new_devdata = NULL;
  1020. unsigned long flags;
  1021. int ret = 0;
  1022. spin_lock_irqsave(&devdata_mutex, flags);
  1023. old_devdata = rcu_dereference_check(devdata,
  1024. lockdep_is_held(&devdata_mutex));
  1025. if (old_devdata && old_devdata->vdev != NULL) {
  1026. dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
  1027. ret = -1;
  1028. goto error_unlock;
  1029. }
  1030. dev_set_drvdata(&viodev->dev, NULL);
  1031. new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
  1032. if (!new_devdata) {
  1033. dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
  1034. ret = -ENOMEM;
  1035. goto error_unlock;
  1036. }
  1037. new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
  1038. GFP_NOFS);
  1039. if (!new_devdata->counters) {
  1040. dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
  1041. ret = -ENOMEM;
  1042. goto error_unlock;
  1043. }
  1044. new_devdata->vdev = viodev;
  1045. new_devdata->dev = &viodev->dev;
  1046. nx842_OF_set_defaults(new_devdata);
  1047. rcu_assign_pointer(devdata, new_devdata);
  1048. spin_unlock_irqrestore(&devdata_mutex, flags);
  1049. synchronize_rcu();
  1050. kfree(old_devdata);
  1051. of_reconfig_notifier_register(&nx842_of_nb);
  1052. ret = nx842_OF_upd(NULL);
  1053. if (ret && ret != -ENODEV) {
  1054. dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
  1055. ret = -1;
  1056. goto error;
  1057. }
  1058. rcu_read_lock();
  1059. dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
  1060. rcu_read_unlock();
  1061. if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
  1062. dev_err(&viodev->dev, "could not create sysfs device attributes\n");
  1063. ret = -1;
  1064. goto error;
  1065. }
  1066. return 0;
  1067. error_unlock:
  1068. spin_unlock_irqrestore(&devdata_mutex, flags);
  1069. if (new_devdata)
  1070. kfree(new_devdata->counters);
  1071. kfree(new_devdata);
  1072. error:
  1073. return ret;
  1074. }
  1075. static int __exit nx842_remove(struct vio_dev *viodev)
  1076. {
  1077. struct nx842_devdata *old_devdata;
  1078. unsigned long flags;
  1079. pr_info("Removing IBM Power 842 compression device\n");
  1080. sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
  1081. spin_lock_irqsave(&devdata_mutex, flags);
  1082. old_devdata = rcu_dereference_check(devdata,
  1083. lockdep_is_held(&devdata_mutex));
  1084. of_reconfig_notifier_unregister(&nx842_of_nb);
  1085. RCU_INIT_POINTER(devdata, NULL);
  1086. spin_unlock_irqrestore(&devdata_mutex, flags);
  1087. synchronize_rcu();
  1088. dev_set_drvdata(&viodev->dev, NULL);
  1089. if (old_devdata)
  1090. kfree(old_devdata->counters);
  1091. kfree(old_devdata);
  1092. return 0;
  1093. }
  1094. static struct vio_device_id nx842_driver_ids[] = {
  1095. {"ibm,compression-v1", "ibm,compression"},
  1096. {"", ""},
  1097. };
  1098. static struct vio_driver nx842_driver = {
  1099. .name = MODULE_NAME,
  1100. .probe = nx842_probe,
  1101. .remove = nx842_remove,
  1102. .get_desired_dma = nx842_get_desired_dma,
  1103. .id_table = nx842_driver_ids,
  1104. };
  1105. static int __init nx842_init(void)
  1106. {
  1107. struct nx842_devdata *new_devdata;
  1108. pr_info("Registering IBM Power 842 compression driver\n");
  1109. RCU_INIT_POINTER(devdata, NULL);
  1110. new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
  1111. if (!new_devdata) {
  1112. pr_err("Could not allocate memory for device data\n");
  1113. return -ENOMEM;
  1114. }
  1115. new_devdata->status = UNAVAILABLE;
  1116. RCU_INIT_POINTER(devdata, new_devdata);
  1117. return vio_register_driver(&nx842_driver);
  1118. }
  1119. module_init(nx842_init);
  1120. static void __exit nx842_exit(void)
  1121. {
  1122. struct nx842_devdata *old_devdata;
  1123. unsigned long flags;
  1124. pr_info("Exiting IBM Power 842 compression driver\n");
  1125. spin_lock_irqsave(&devdata_mutex, flags);
  1126. old_devdata = rcu_dereference_check(devdata,
  1127. lockdep_is_held(&devdata_mutex));
  1128. RCU_INIT_POINTER(devdata, NULL);
  1129. spin_unlock_irqrestore(&devdata_mutex, flags);
  1130. synchronize_rcu();
  1131. if (old_devdata)
  1132. dev_set_drvdata(old_devdata->dev, NULL);
  1133. kfree(old_devdata);
  1134. vio_unregister_driver(&nx842_driver);
  1135. }
  1136. module_exit(nx842_exit);
  1137. /*********************************
  1138. * 842 software decompressor
  1139. *********************************/
  1140. typedef int (*sw842_template_op)(const char **, int *, unsigned char **,
  1141. struct sw842_fifo *);
  1142. static int sw842_data8(const char **, int *, unsigned char **,
  1143. struct sw842_fifo *);
  1144. static int sw842_data4(const char **, int *, unsigned char **,
  1145. struct sw842_fifo *);
  1146. static int sw842_data2(const char **, int *, unsigned char **,
  1147. struct sw842_fifo *);
  1148. static int sw842_ptr8(const char **, int *, unsigned char **,
  1149. struct sw842_fifo *);
  1150. static int sw842_ptr4(const char **, int *, unsigned char **,
  1151. struct sw842_fifo *);
  1152. static int sw842_ptr2(const char **, int *, unsigned char **,
  1153. struct sw842_fifo *);
  1154. /* special templates */
  1155. #define SW842_TMPL_REPEAT 0x1B
  1156. #define SW842_TMPL_ZEROS 0x1C
  1157. #define SW842_TMPL_EOF 0x1E
  1158. static sw842_template_op sw842_tmpl_ops[26][4] = {
  1159. { sw842_data8, NULL}, /* 0 (00000) */
  1160. { sw842_data4, sw842_data2, sw842_ptr2, NULL},
  1161. { sw842_data4, sw842_ptr2, sw842_data2, NULL},
  1162. { sw842_data4, sw842_ptr2, sw842_ptr2, NULL},
  1163. { sw842_data4, sw842_ptr4, NULL},
  1164. { sw842_data2, sw842_ptr2, sw842_data4, NULL},
  1165. { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2},
  1166. { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2},
  1167. { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,},
  1168. { sw842_data2, sw842_ptr2, sw842_ptr4, NULL},
  1169. { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */
  1170. { sw842_ptr2, sw842_data4, sw842_ptr2, NULL},
  1171. { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2},
  1172. { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2},
  1173. { sw842_ptr2, sw842_data2, sw842_ptr4, NULL},
  1174. { sw842_ptr2, sw842_ptr2, sw842_data4, NULL},
  1175. { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2},
  1176. { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2},
  1177. { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2},
  1178. { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL},
  1179. { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */
  1180. { sw842_ptr4, sw842_data2, sw842_ptr2, NULL},
  1181. { sw842_ptr4, sw842_ptr2, sw842_data2, NULL},
  1182. { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL},
  1183. { sw842_ptr4, sw842_ptr4, NULL},
  1184. { sw842_ptr8, NULL}
  1185. };
  1186. /* Software decompress helpers */
  1187. static uint8_t sw842_get_byte(const char *buf, int bit)
  1188. {
  1189. uint8_t tmpl;
  1190. uint16_t tmp;
  1191. tmp = htons(*(uint16_t *)(buf));
  1192. tmp = (uint16_t)(tmp << bit);
  1193. tmp = ntohs(tmp);
  1194. memcpy(&tmpl, &tmp, 1);
  1195. return tmpl;
  1196. }
  1197. static uint8_t sw842_get_template(const char **buf, int *bit)
  1198. {
  1199. uint8_t byte;
  1200. byte = sw842_get_byte(*buf, *bit);
  1201. byte = byte >> 3;
  1202. byte &= 0x1F;
  1203. *buf += (*bit + 5) / 8;
  1204. *bit = (*bit + 5) % 8;
  1205. return byte;
  1206. }
  1207. /* repeat_count happens to be 5-bit too (like the template) */
  1208. static uint8_t sw842_get_repeat_count(const char **buf, int *bit)
  1209. {
  1210. uint8_t byte;
  1211. byte = sw842_get_byte(*buf, *bit);
  1212. byte = byte >> 2;
  1213. byte &= 0x3F;
  1214. *buf += (*bit + 6) / 8;
  1215. *bit = (*bit + 6) % 8;
  1216. return byte;
  1217. }
  1218. static uint8_t sw842_get_ptr2(const char **buf, int *bit)
  1219. {
  1220. uint8_t ptr;
  1221. ptr = sw842_get_byte(*buf, *bit);
  1222. (*buf)++;
  1223. return ptr;
  1224. }
  1225. static uint16_t sw842_get_ptr4(const char **buf, int *bit,
  1226. struct sw842_fifo *fifo)
  1227. {
  1228. uint16_t ptr;
  1229. ptr = htons(*(uint16_t *)(*buf));
  1230. ptr = (uint16_t)(ptr << *bit);
  1231. ptr = ptr >> 7;
  1232. ptr &= 0x01FF;
  1233. *buf += (*bit + 9) / 8;
  1234. *bit = (*bit + 9) % 8;
  1235. return ptr;
  1236. }
  1237. static uint8_t sw842_get_ptr8(const char **buf, int *bit,
  1238. struct sw842_fifo *fifo)
  1239. {
  1240. return sw842_get_ptr2(buf, bit);
  1241. }
  1242. /* Software decompress template ops */
  1243. static int sw842_data8(const char **inbuf, int *inbit,
  1244. unsigned char **outbuf, struct sw842_fifo *fifo)
  1245. {
  1246. int ret;
  1247. ret = sw842_data4(inbuf, inbit, outbuf, fifo);
  1248. if (ret)
  1249. return ret;
  1250. ret = sw842_data4(inbuf, inbit, outbuf, fifo);
  1251. return ret;
  1252. }
  1253. static int sw842_data4(const char **inbuf, int *inbit,
  1254. unsigned char **outbuf, struct sw842_fifo *fifo)
  1255. {
  1256. int ret;
  1257. ret = sw842_data2(inbuf, inbit, outbuf, fifo);
  1258. if (ret)
  1259. return ret;
  1260. ret = sw842_data2(inbuf, inbit, outbuf, fifo);
  1261. return ret;
  1262. }
  1263. static int sw842_data2(const char **inbuf, int *inbit,
  1264. unsigned char **outbuf, struct sw842_fifo *fifo)
  1265. {
  1266. **outbuf = sw842_get_byte(*inbuf, *inbit);
  1267. (*inbuf)++;
  1268. (*outbuf)++;
  1269. **outbuf = sw842_get_byte(*inbuf, *inbit);
  1270. (*inbuf)++;
  1271. (*outbuf)++;
  1272. return 0;
  1273. }
  1274. static int sw842_ptr8(const char **inbuf, int *inbit,
  1275. unsigned char **outbuf, struct sw842_fifo *fifo)
  1276. {
  1277. uint8_t ptr;
  1278. ptr = sw842_get_ptr8(inbuf, inbit, fifo);
  1279. if (!fifo->f84_full && (ptr >= fifo->f8_count))
  1280. return 1;
  1281. memcpy(*outbuf, fifo->f8[ptr], 8);
  1282. *outbuf += 8;
  1283. return 0;
  1284. }
  1285. static int sw842_ptr4(const char **inbuf, int *inbit,
  1286. unsigned char **outbuf, struct sw842_fifo *fifo)
  1287. {
  1288. uint16_t ptr;
  1289. ptr = sw842_get_ptr4(inbuf, inbit, fifo);
  1290. if (!fifo->f84_full && (ptr >= fifo->f4_count))
  1291. return 1;
  1292. memcpy(*outbuf, fifo->f4[ptr], 4);
  1293. *outbuf += 4;
  1294. return 0;
  1295. }
  1296. static int sw842_ptr2(const char **inbuf, int *inbit,
  1297. unsigned char **outbuf, struct sw842_fifo *fifo)
  1298. {
  1299. uint8_t ptr;
  1300. ptr = sw842_get_ptr2(inbuf, inbit);
  1301. if (!fifo->f2_full && (ptr >= fifo->f2_count))
  1302. return 1;
  1303. memcpy(*outbuf, fifo->f2[ptr], 2);
  1304. *outbuf += 2;
  1305. return 0;
  1306. }
  1307. static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo)
  1308. {
  1309. unsigned char initial_f2count = fifo->f2_count;
  1310. memcpy(fifo->f8[fifo->f8_count], buf, 8);
  1311. fifo->f4_count += 2;
  1312. fifo->f8_count += 1;
  1313. if (!fifo->f84_full && fifo->f4_count >= 512) {
  1314. fifo->f84_full = 1;
  1315. fifo->f4_count /= 512;
  1316. }
  1317. memcpy(fifo->f2[fifo->f2_count++], buf, 2);
  1318. memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2);
  1319. memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2);
  1320. memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2);
  1321. if (fifo->f2_count < initial_f2count)
  1322. fifo->f2_full = 1;
  1323. }
  1324. static int sw842_decompress(const unsigned char *src, int srclen,
  1325. unsigned char *dst, int *destlen,
  1326. const void *wrkmem)
  1327. {
  1328. uint8_t tmpl;
  1329. const char *inbuf;
  1330. int inbit = 0;
  1331. unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf;
  1332. const char *inbuf_end;
  1333. sw842_template_op op;
  1334. int opindex;
  1335. int i, repeat_count;
  1336. struct sw842_fifo *fifo;
  1337. int ret = 0;
  1338. fifo = &((struct nx842_workmem *)(wrkmem))->swfifo;
  1339. memset(fifo, 0, sizeof(*fifo));
  1340. origbuf = NULL;
  1341. inbuf = src;
  1342. inbuf_end = src + srclen;
  1343. outbuf = dst;
  1344. outbuf_end = dst + *destlen;
  1345. while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) {
  1346. if (inbuf >= inbuf_end) {
  1347. ret = -EINVAL;
  1348. goto out;
  1349. }
  1350. opindex = 0;
  1351. prevbuf = origbuf;
  1352. origbuf = outbuf;
  1353. switch (tmpl) {
  1354. case SW842_TMPL_REPEAT:
  1355. if (prevbuf == NULL) {
  1356. ret = -EINVAL;
  1357. goto out;
  1358. }
  1359. repeat_count = sw842_get_repeat_count(&inbuf,
  1360. &inbit) + 1;
  1361. /* Did the repeat count advance past the end of input */
  1362. if (inbuf > inbuf_end) {
  1363. ret = -EINVAL;
  1364. goto out;
  1365. }
  1366. for (i = 0; i < repeat_count; i++) {
  1367. /* Would this overflow the output buffer */
  1368. if ((outbuf + 8) > outbuf_end) {
  1369. ret = -ENOSPC;
  1370. goto out;
  1371. }
  1372. memcpy(outbuf, prevbuf, 8);
  1373. sw842_copy_to_fifo(outbuf, fifo);
  1374. outbuf += 8;
  1375. }
  1376. break;
  1377. case SW842_TMPL_ZEROS:
  1378. /* Would this overflow the output buffer */
  1379. if ((outbuf + 8) > outbuf_end) {
  1380. ret = -ENOSPC;
  1381. goto out;
  1382. }
  1383. memset(outbuf, 0, 8);
  1384. sw842_copy_to_fifo(outbuf, fifo);
  1385. outbuf += 8;
  1386. break;
  1387. default:
  1388. if (tmpl > 25) {
  1389. ret = -EINVAL;
  1390. goto out;
  1391. }
  1392. /* Does this go past the end of the input buffer */
  1393. if ((inbuf + 2) > inbuf_end) {
  1394. ret = -EINVAL;
  1395. goto out;
  1396. }
  1397. /* Would this overflow the output buffer */
  1398. if ((outbuf + 8) > outbuf_end) {
  1399. ret = -ENOSPC;
  1400. goto out;
  1401. }
  1402. while (opindex < 4 &&
  1403. (op = sw842_tmpl_ops[tmpl][opindex++])
  1404. != NULL) {
  1405. ret = (*op)(&inbuf, &inbit, &outbuf, fifo);
  1406. if (ret) {
  1407. ret = -EINVAL;
  1408. goto out;
  1409. }
  1410. sw842_copy_to_fifo(origbuf, fifo);
  1411. }
  1412. }
  1413. }
  1414. out:
  1415. if (!ret)
  1416. *destlen = (unsigned int)(outbuf - dst);
  1417. else
  1418. *destlen = 0;
  1419. return ret;
  1420. }