nx-842.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622
  1. /*
  2. * Driver for IBM Power 842 compression accelerator
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2012
  19. *
  20. * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
  21. * Seth Jennings <sjenning@linux.vnet.ibm.com>
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/nx842.h>
  26. #include <linux/of.h>
  27. #include <linux/slab.h>
  28. #include <asm/page.h>
  29. #include <asm/vio.h>
  30. #include "nx_csbcpb.h" /* struct nx_csbcpb */
  31. #define MODULE_NAME "nx-compress"
  32. MODULE_LICENSE("GPL");
  33. MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
  34. MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
  35. #define SHIFT_4K 12
  36. #define SHIFT_64K 16
  37. #define SIZE_4K (1UL << SHIFT_4K)
  38. #define SIZE_64K (1UL << SHIFT_64K)
  39. /* IO buffer must be 128 byte aligned */
  40. #define IO_BUFFER_ALIGN 128
  41. struct nx842_header {
  42. int blocks_nr; /* number of compressed blocks */
  43. int offset; /* offset of the first block (from beginning of header) */
  44. int sizes[0]; /* size of compressed blocks */
  45. };
  46. static inline int nx842_header_size(const struct nx842_header *hdr)
  47. {
  48. return sizeof(struct nx842_header) +
  49. hdr->blocks_nr * sizeof(hdr->sizes[0]);
  50. }
  51. /* Macros for fields within nx_csbcpb */
  52. /* Check the valid bit within the csbcpb valid field */
  53. #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
  54. /* CE macros operate on the completion_extension field bits in the csbcpb.
  55. * CE0 0=full completion, 1=partial completion
  56. * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
  57. * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
  58. #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7))
  59. #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6))
  60. #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5))
  61. /* The NX unit accepts data only on 4K page boundaries */
  62. #define NX842_HW_PAGE_SHIFT SHIFT_4K
  63. #define NX842_HW_PAGE_SIZE (ASM_CONST(1) << NX842_HW_PAGE_SHIFT)
  64. #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1))
  65. enum nx842_status {
  66. UNAVAILABLE,
  67. AVAILABLE
  68. };
  69. struct ibm_nx842_counters {
  70. atomic64_t comp_complete;
  71. atomic64_t comp_failed;
  72. atomic64_t decomp_complete;
  73. atomic64_t decomp_failed;
  74. atomic64_t swdecomp;
  75. atomic64_t comp_times[32];
  76. atomic64_t decomp_times[32];
  77. };
  78. static struct nx842_devdata {
  79. struct vio_dev *vdev;
  80. struct device *dev;
  81. struct ibm_nx842_counters *counters;
  82. unsigned int max_sg_len;
  83. unsigned int max_sync_size;
  84. unsigned int max_sync_sg;
  85. enum nx842_status status;
  86. } __rcu *devdata;
  87. static DEFINE_SPINLOCK(devdata_mutex);
  88. #define NX842_COUNTER_INC(_x) \
  89. static inline void nx842_inc_##_x( \
  90. const struct nx842_devdata *dev) { \
  91. if (dev) \
  92. atomic64_inc(&dev->counters->_x); \
  93. }
  94. NX842_COUNTER_INC(comp_complete);
  95. NX842_COUNTER_INC(comp_failed);
  96. NX842_COUNTER_INC(decomp_complete);
  97. NX842_COUNTER_INC(decomp_failed);
  98. NX842_COUNTER_INC(swdecomp);
  99. #define NX842_HIST_SLOTS 16
  100. static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
  101. {
  102. int bucket = fls(time);
  103. if (bucket)
  104. bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
  105. atomic64_inc(&times[bucket]);
  106. }
  107. /* NX unit operation flags */
  108. #define NX842_OP_COMPRESS 0x0
  109. #define NX842_OP_CRC 0x1
  110. #define NX842_OP_DECOMPRESS 0x2
  111. #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC)
  112. #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
  113. #define NX842_OP_ASYNC (1<<23)
  114. #define NX842_OP_NOTIFY (1<<22)
  115. #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8)
  116. static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
  117. {
  118. /* No use of DMA mappings within the driver. */
  119. return 0;
  120. }
  121. struct nx842_slentry {
  122. unsigned long ptr; /* Real address (use __pa()) */
  123. unsigned long len;
  124. };
  125. /* pHyp scatterlist entry */
  126. struct nx842_scatterlist {
  127. int entry_nr; /* number of slentries */
  128. struct nx842_slentry *entries; /* ptr to array of slentries */
  129. };
  130. /* Does not include sizeof(entry_nr) in the size */
  131. static inline unsigned long nx842_get_scatterlist_size(
  132. struct nx842_scatterlist *sl)
  133. {
  134. return sl->entry_nr * sizeof(struct nx842_slentry);
  135. }
  136. static inline unsigned long nx842_get_pa(void *addr)
  137. {
  138. if (is_vmalloc_addr(addr))
  139. return page_to_phys(vmalloc_to_page(addr))
  140. + offset_in_page(addr);
  141. else
  142. return __pa(addr);
  143. }
  144. static int nx842_build_scatterlist(unsigned long buf, int len,
  145. struct nx842_scatterlist *sl)
  146. {
  147. unsigned long nextpage;
  148. struct nx842_slentry *entry;
  149. sl->entry_nr = 0;
  150. entry = sl->entries;
  151. while (len) {
  152. entry->ptr = nx842_get_pa((void *)buf);
  153. nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
  154. if (nextpage < buf + len) {
  155. /* we aren't at the end yet */
  156. if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE))
  157. /* we are in the middle (or beginning) */
  158. entry->len = NX842_HW_PAGE_SIZE;
  159. else
  160. /* we are at the beginning */
  161. entry->len = nextpage - buf;
  162. } else {
  163. /* at the end */
  164. entry->len = len;
  165. }
  166. len -= entry->len;
  167. buf += entry->len;
  168. sl->entry_nr++;
  169. entry++;
  170. }
  171. return 0;
  172. }
  173. /*
  174. * Working memory for software decompression
  175. */
  176. struct sw842_fifo {
  177. union {
  178. char f8[256][8];
  179. char f4[512][4];
  180. };
  181. char f2[256][2];
  182. unsigned char f84_full;
  183. unsigned char f2_full;
  184. unsigned char f8_count;
  185. unsigned char f2_count;
  186. unsigned int f4_count;
  187. };
  188. /*
  189. * Working memory for crypto API
  190. */
  191. struct nx842_workmem {
  192. char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */
  193. union {
  194. /* hardware working memory */
  195. struct {
  196. /* scatterlist */
  197. char slin[SIZE_4K];
  198. char slout[SIZE_4K];
  199. /* coprocessor status/parameter block */
  200. struct nx_csbcpb csbcpb;
  201. };
  202. /* software working memory */
  203. struct sw842_fifo swfifo; /* software decompression fifo */
  204. };
  205. };
  206. int nx842_get_workmem_size(void)
  207. {
  208. return sizeof(struct nx842_workmem) + NX842_HW_PAGE_SIZE;
  209. }
  210. EXPORT_SYMBOL_GPL(nx842_get_workmem_size);
  211. int nx842_get_workmem_size_aligned(void)
  212. {
  213. return sizeof(struct nx842_workmem);
  214. }
  215. EXPORT_SYMBOL_GPL(nx842_get_workmem_size_aligned);
  216. static int nx842_validate_result(struct device *dev,
  217. struct cop_status_block *csb)
  218. {
  219. /* The csb must be valid after returning from vio_h_cop_sync */
  220. if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
  221. dev_err(dev, "%s: cspcbp not valid upon completion.\n",
  222. __func__);
  223. dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
  224. csb->valid,
  225. csb->crb_seq_number,
  226. csb->completion_code,
  227. csb->completion_extension);
  228. dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
  229. csb->processed_byte_count,
  230. (unsigned long)csb->address);
  231. return -EIO;
  232. }
  233. /* Check return values from the hardware in the CSB */
  234. switch (csb->completion_code) {
  235. case 0: /* Completed without error */
  236. break;
  237. case 64: /* Target bytes > Source bytes during compression */
  238. case 13: /* Output buffer too small */
  239. dev_dbg(dev, "%s: Compression output larger than input\n",
  240. __func__);
  241. return -ENOSPC;
  242. case 66: /* Input data contains an illegal template field */
  243. case 67: /* Template indicates data past the end of the input stream */
  244. dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
  245. __func__, csb->completion_code);
  246. return -EINVAL;
  247. default:
  248. dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
  249. __func__, csb->completion_code);
  250. return -EIO;
  251. }
  252. /* Hardware sanity check */
  253. if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
  254. dev_err(dev, "%s: No error returned by hardware, but "
  255. "data returned is unusable, contact support.\n"
  256. "(Additional info: csbcbp->processed bytes "
  257. "does not specify processed bytes for the "
  258. "target buffer.)\n", __func__);
  259. return -EIO;
  260. }
  261. return 0;
  262. }
  263. /**
  264. * nx842_compress - Compress data using the 842 algorithm
  265. *
  266. * Compression provide by the NX842 coprocessor on IBM Power systems.
  267. * The input buffer is compressed and the result is stored in the
  268. * provided output buffer.
  269. *
  270. * Upon return from this function @outlen contains the length of the
  271. * compressed data. If there is an error then @outlen will be 0 and an
  272. * error will be specified by the return code from this function.
  273. *
  274. * @in: Pointer to input buffer, must be page aligned
  275. * @inlen: Length of input buffer, must be PAGE_SIZE
  276. * @out: Pointer to output buffer
  277. * @outlen: Length of output buffer
  278. * @wrkmem: ptr to buffer for working memory, size determined by
  279. * nx842_get_workmem_size()
  280. *
  281. * Returns:
  282. * 0 Success, output of length @outlen stored in the buffer at @out
  283. * -ENOMEM Unable to allocate internal buffers
  284. * -ENOSPC Output buffer is to small
  285. * -EMSGSIZE XXX Difficult to describe this limitation
  286. * -EIO Internal error
  287. * -ENODEV Hardware unavailable
  288. */
  289. int nx842_compress(const unsigned char *in, unsigned int inlen,
  290. unsigned char *out, unsigned int *outlen, void *wmem)
  291. {
  292. struct nx842_header *hdr;
  293. struct nx842_devdata *local_devdata;
  294. struct device *dev = NULL;
  295. struct nx842_workmem *workmem;
  296. struct nx842_scatterlist slin, slout;
  297. struct nx_csbcpb *csbcpb;
  298. int ret = 0, max_sync_size, i, bytesleft, size, hdrsize;
  299. unsigned long inbuf, outbuf, padding;
  300. struct vio_pfo_op op = {
  301. .done = NULL,
  302. .handle = 0,
  303. .timeout = 0,
  304. };
  305. unsigned long start_time = get_tb();
  306. /*
  307. * Make sure input buffer is 64k page aligned. This is assumed since
  308. * this driver is designed for page compression only (for now). This
  309. * is very nice since we can now use direct DDE(s) for the input and
  310. * the alignment is guaranteed.
  311. */
  312. inbuf = (unsigned long)in;
  313. if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE)
  314. return -EINVAL;
  315. rcu_read_lock();
  316. local_devdata = rcu_dereference(devdata);
  317. if (!local_devdata || !local_devdata->dev) {
  318. rcu_read_unlock();
  319. return -ENODEV;
  320. }
  321. max_sync_size = local_devdata->max_sync_size;
  322. dev = local_devdata->dev;
  323. /* Create the header */
  324. hdr = (struct nx842_header *)out;
  325. hdr->blocks_nr = PAGE_SIZE / max_sync_size;
  326. hdrsize = nx842_header_size(hdr);
  327. outbuf = (unsigned long)out + hdrsize;
  328. bytesleft = *outlen - hdrsize;
  329. /* Init scatterlist */
  330. workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
  331. NX842_HW_PAGE_SIZE);
  332. slin.entries = (struct nx842_slentry *)workmem->slin;
  333. slout.entries = (struct nx842_slentry *)workmem->slout;
  334. /* Init operation */
  335. op.flags = NX842_OP_COMPRESS;
  336. csbcpb = &workmem->csbcpb;
  337. memset(csbcpb, 0, sizeof(*csbcpb));
  338. op.csbcpb = nx842_get_pa(csbcpb);
  339. op.out = nx842_get_pa(slout.entries);
  340. for (i = 0; i < hdr->blocks_nr; i++) {
  341. /*
  342. * Aligning the output blocks to 128 bytes does waste space,
  343. * but it prevents the need for bounce buffers and memory
  344. * copies. It also simplifies the code a lot. In the worst
  345. * case (64k page, 4k max_sync_size), you lose up to
  346. * (128*16)/64k = ~3% the compression factor. For 64k
  347. * max_sync_size, the loss would be at most 128/64k = ~0.2%.
  348. */
  349. padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf;
  350. outbuf += padding;
  351. bytesleft -= padding;
  352. if (i == 0)
  353. /* save offset into first block in header */
  354. hdr->offset = padding + hdrsize;
  355. if (bytesleft <= 0) {
  356. ret = -ENOSPC;
  357. goto unlock;
  358. }
  359. /*
  360. * NOTE: If the default max_sync_size is changed from 4k
  361. * to 64k, remove the "likely" case below, since a
  362. * scatterlist will always be needed.
  363. */
  364. if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
  365. /* Create direct DDE */
  366. op.in = nx842_get_pa((void *)inbuf);
  367. op.inlen = max_sync_size;
  368. } else {
  369. /* Create indirect DDE (scatterlist) */
  370. nx842_build_scatterlist(inbuf, max_sync_size, &slin);
  371. op.in = nx842_get_pa(slin.entries);
  372. op.inlen = -nx842_get_scatterlist_size(&slin);
  373. }
  374. /*
  375. * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect
  376. * DDE is required for the outbuf.
  377. * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must
  378. * also be page aligned (1 in 128/4k=32 chance) in order
  379. * to use a direct DDE.
  380. * This is unlikely, just use an indirect DDE always.
  381. */
  382. nx842_build_scatterlist(outbuf,
  383. min(bytesleft, max_sync_size), &slout);
  384. /* op.out set before loop */
  385. op.outlen = -nx842_get_scatterlist_size(&slout);
  386. /* Send request to pHyp */
  387. ret = vio_h_cop_sync(local_devdata->vdev, &op);
  388. /* Check for pHyp error */
  389. if (ret) {
  390. dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
  391. __func__, ret, op.hcall_err);
  392. ret = -EIO;
  393. goto unlock;
  394. }
  395. /* Check for hardware error */
  396. ret = nx842_validate_result(dev, &csbcpb->csb);
  397. if (ret && ret != -ENOSPC)
  398. goto unlock;
  399. /* Handle incompressible data */
  400. if (unlikely(ret == -ENOSPC)) {
  401. if (bytesleft < max_sync_size) {
  402. /*
  403. * Not enough space left in the output buffer
  404. * to store uncompressed block
  405. */
  406. goto unlock;
  407. } else {
  408. /* Store incompressible block */
  409. memcpy((void *)outbuf, (void *)inbuf,
  410. max_sync_size);
  411. hdr->sizes[i] = -max_sync_size;
  412. outbuf += max_sync_size;
  413. bytesleft -= max_sync_size;
  414. /* Reset ret, incompressible data handled */
  415. ret = 0;
  416. }
  417. } else {
  418. /* Normal case, compression was successful */
  419. size = csbcpb->csb.processed_byte_count;
  420. dev_dbg(dev, "%s: processed_bytes=%d\n",
  421. __func__, size);
  422. hdr->sizes[i] = size;
  423. outbuf += size;
  424. bytesleft -= size;
  425. }
  426. inbuf += max_sync_size;
  427. }
  428. *outlen = (unsigned int)(outbuf - (unsigned long)out);
  429. unlock:
  430. if (ret)
  431. nx842_inc_comp_failed(local_devdata);
  432. else {
  433. nx842_inc_comp_complete(local_devdata);
  434. ibm_nx842_incr_hist(local_devdata->counters->comp_times,
  435. (get_tb() - start_time) / tb_ticks_per_usec);
  436. }
  437. rcu_read_unlock();
  438. return ret;
  439. }
  440. EXPORT_SYMBOL_GPL(nx842_compress);
  441. static int sw842_decompress(const unsigned char *, int, unsigned char *, int *,
  442. const void *);
  443. /**
  444. * nx842_decompress - Decompress data using the 842 algorithm
  445. *
  446. * Decompression provide by the NX842 coprocessor on IBM Power systems.
  447. * The input buffer is decompressed and the result is stored in the
  448. * provided output buffer. The size allocated to the output buffer is
  449. * provided by the caller of this function in @outlen. Upon return from
  450. * this function @outlen contains the length of the decompressed data.
  451. * If there is an error then @outlen will be 0 and an error will be
  452. * specified by the return code from this function.
  453. *
  454. * @in: Pointer to input buffer, will use bounce buffer if not 128 byte
  455. * aligned
  456. * @inlen: Length of input buffer
  457. * @out: Pointer to output buffer, must be page aligned
  458. * @outlen: Length of output buffer, must be PAGE_SIZE
  459. * @wrkmem: ptr to buffer for working memory, size determined by
  460. * nx842_get_workmem_size()
  461. *
  462. * Returns:
  463. * 0 Success, output of length @outlen stored in the buffer at @out
  464. * -ENODEV Hardware decompression device is unavailable
  465. * -ENOMEM Unable to allocate internal buffers
  466. * -ENOSPC Output buffer is to small
  467. * -EINVAL Bad input data encountered when attempting decompress
  468. * -EIO Internal error
  469. */
  470. int nx842_decompress(const unsigned char *in, unsigned int inlen,
  471. unsigned char *out, unsigned int *outlen, void *wmem)
  472. {
  473. struct nx842_header *hdr;
  474. struct nx842_devdata *local_devdata;
  475. struct device *dev = NULL;
  476. struct nx842_workmem *workmem;
  477. struct nx842_scatterlist slin, slout;
  478. struct nx_csbcpb *csbcpb;
  479. int ret = 0, i, size, max_sync_size;
  480. unsigned long inbuf, outbuf;
  481. struct vio_pfo_op op = {
  482. .done = NULL,
  483. .handle = 0,
  484. .timeout = 0,
  485. };
  486. unsigned long start_time = get_tb();
  487. /* Ensure page alignment and size */
  488. outbuf = (unsigned long)out;
  489. if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE)
  490. return -EINVAL;
  491. rcu_read_lock();
  492. local_devdata = rcu_dereference(devdata);
  493. if (local_devdata)
  494. dev = local_devdata->dev;
  495. /* Get header */
  496. hdr = (struct nx842_header *)in;
  497. workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
  498. NX842_HW_PAGE_SIZE);
  499. inbuf = (unsigned long)in + hdr->offset;
  500. if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) {
  501. /* Copy block(s) into bounce buffer for alignment */
  502. memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset);
  503. inbuf = (unsigned long)workmem->bounce;
  504. }
  505. /* Init scatterlist */
  506. slin.entries = (struct nx842_slentry *)workmem->slin;
  507. slout.entries = (struct nx842_slentry *)workmem->slout;
  508. /* Init operation */
  509. op.flags = NX842_OP_DECOMPRESS;
  510. csbcpb = &workmem->csbcpb;
  511. memset(csbcpb, 0, sizeof(*csbcpb));
  512. op.csbcpb = nx842_get_pa(csbcpb);
  513. /*
  514. * max_sync_size may have changed since compression,
  515. * so we can't read it from the device info. We need
  516. * to derive it from hdr->blocks_nr.
  517. */
  518. max_sync_size = PAGE_SIZE / hdr->blocks_nr;
  519. for (i = 0; i < hdr->blocks_nr; i++) {
  520. /* Skip padding */
  521. inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN);
  522. if (hdr->sizes[i] < 0) {
  523. /* Negative sizes indicate uncompressed data blocks */
  524. size = abs(hdr->sizes[i]);
  525. memcpy((void *)outbuf, (void *)inbuf, size);
  526. outbuf += size;
  527. inbuf += size;
  528. continue;
  529. }
  530. if (!dev)
  531. goto sw;
  532. /*
  533. * The better the compression, the more likely the "likely"
  534. * case becomes.
  535. */
  536. if (likely((inbuf & NX842_HW_PAGE_MASK) ==
  537. ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
  538. /* Create direct DDE */
  539. op.in = nx842_get_pa((void *)inbuf);
  540. op.inlen = hdr->sizes[i];
  541. } else {
  542. /* Create indirect DDE (scatterlist) */
  543. nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
  544. op.in = nx842_get_pa(slin.entries);
  545. op.inlen = -nx842_get_scatterlist_size(&slin);
  546. }
  547. /*
  548. * NOTE: If the default max_sync_size is changed from 4k
  549. * to 64k, remove the "likely" case below, since a
  550. * scatterlist will always be needed.
  551. */
  552. if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
  553. /* Create direct DDE */
  554. op.out = nx842_get_pa((void *)outbuf);
  555. op.outlen = max_sync_size;
  556. } else {
  557. /* Create indirect DDE (scatterlist) */
  558. nx842_build_scatterlist(outbuf, max_sync_size, &slout);
  559. op.out = nx842_get_pa(slout.entries);
  560. op.outlen = -nx842_get_scatterlist_size(&slout);
  561. }
  562. /* Send request to pHyp */
  563. ret = vio_h_cop_sync(local_devdata->vdev, &op);
  564. /* Check for pHyp error */
  565. if (ret) {
  566. dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
  567. __func__, ret, op.hcall_err);
  568. dev = NULL;
  569. goto sw;
  570. }
  571. /* Check for hardware error */
  572. ret = nx842_validate_result(dev, &csbcpb->csb);
  573. if (ret) {
  574. dev = NULL;
  575. goto sw;
  576. }
  577. /* HW decompression success */
  578. inbuf += hdr->sizes[i];
  579. outbuf += csbcpb->csb.processed_byte_count;
  580. continue;
  581. sw:
  582. /* software decompression */
  583. size = max_sync_size;
  584. ret = sw842_decompress(
  585. (unsigned char *)inbuf, hdr->sizes[i],
  586. (unsigned char *)outbuf, &size, wmem);
  587. if (ret)
  588. pr_debug("%s: sw842_decompress failed with %d\n",
  589. __func__, ret);
  590. if (ret) {
  591. if (ret != -ENOSPC && ret != -EINVAL &&
  592. ret != -EMSGSIZE)
  593. ret = -EIO;
  594. goto unlock;
  595. }
  596. /* SW decompression success */
  597. inbuf += hdr->sizes[i];
  598. outbuf += size;
  599. }
  600. *outlen = (unsigned int)(outbuf - (unsigned long)out);
  601. unlock:
  602. if (ret)
  603. /* decompress fail */
  604. nx842_inc_decomp_failed(local_devdata);
  605. else {
  606. if (!dev)
  607. /* software decompress */
  608. nx842_inc_swdecomp(local_devdata);
  609. nx842_inc_decomp_complete(local_devdata);
  610. ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
  611. (get_tb() - start_time) / tb_ticks_per_usec);
  612. }
  613. rcu_read_unlock();
  614. return ret;
  615. }
  616. EXPORT_SYMBOL_GPL(nx842_decompress);
  617. /**
  618. * nx842_OF_set_defaults -- Set default (disabled) values for devdata
  619. *
  620. * @devdata - struct nx842_devdata to update
  621. *
  622. * Returns:
  623. * 0 on success
  624. * -ENOENT if @devdata ptr is NULL
  625. */
  626. static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
  627. {
  628. if (devdata) {
  629. devdata->max_sync_size = 0;
  630. devdata->max_sync_sg = 0;
  631. devdata->max_sg_len = 0;
  632. devdata->status = UNAVAILABLE;
  633. return 0;
  634. } else
  635. return -ENOENT;
  636. }
  637. /**
  638. * nx842_OF_upd_status -- Update the device info from OF status prop
  639. *
  640. * The status property indicates if the accelerator is enabled. If the
  641. * device is in the OF tree it indicates that the hardware is present.
  642. * The status field indicates if the device is enabled when the status
  643. * is 'okay'. Otherwise the device driver will be disabled.
  644. *
  645. * @devdata - struct nx842_devdata to update
  646. * @prop - struct property point containing the maxsyncop for the update
  647. *
  648. * Returns:
  649. * 0 - Device is available
  650. * -EINVAL - Device is not available
  651. */
  652. static int nx842_OF_upd_status(struct nx842_devdata *devdata,
  653. struct property *prop) {
  654. int ret = 0;
  655. const char *status = (const char *)prop->value;
  656. if (!strncmp(status, "okay", (size_t)prop->length)) {
  657. devdata->status = AVAILABLE;
  658. } else {
  659. dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
  660. __func__, status);
  661. devdata->status = UNAVAILABLE;
  662. }
  663. return ret;
  664. }
  665. /**
  666. * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
  667. *
  668. * Definition of the 'ibm,max-sg-len' OF property:
  669. * This field indicates the maximum byte length of a scatter list
  670. * for the platform facility. It is a single cell encoded as with encode-int.
  671. *
  672. * Example:
  673. * # od -x ibm,max-sg-len
  674. * 0000000 0000 0ff0
  675. *
  676. * In this example, the maximum byte length of a scatter list is
  677. * 0x0ff0 (4,080).
  678. *
  679. * @devdata - struct nx842_devdata to update
  680. * @prop - struct property point containing the maxsyncop for the update
  681. *
  682. * Returns:
  683. * 0 on success
  684. * -EINVAL on failure
  685. */
  686. static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
  687. struct property *prop) {
  688. int ret = 0;
  689. const int *maxsglen = prop->value;
  690. if (prop->length != sizeof(*maxsglen)) {
  691. dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
  692. dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
  693. prop->length, sizeof(*maxsglen));
  694. ret = -EINVAL;
  695. } else {
  696. devdata->max_sg_len = (unsigned int)min(*maxsglen,
  697. (int)NX842_HW_PAGE_SIZE);
  698. }
  699. return ret;
  700. }
  701. /**
  702. * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
  703. *
  704. * Definition of the 'ibm,max-sync-cop' OF property:
  705. * Two series of cells. The first series of cells represents the maximums
  706. * that can be synchronously compressed. The second series of cells
  707. * represents the maximums that can be synchronously decompressed.
  708. * 1. The first cell in each series contains the count of the number of
  709. * data length, scatter list elements pairs that follow – each being
  710. * of the form
  711. * a. One cell data byte length
  712. * b. One cell total number of scatter list elements
  713. *
  714. * Example:
  715. * # od -x ibm,max-sync-cop
  716. * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001
  717. * 0000020 0000 1000 0000 01fe
  718. *
  719. * In this example, compression supports 0x1000 (4,096) data byte length
  720. * and 0x1fe (510) total scatter list elements. Decompression supports
  721. * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
  722. * elements.
  723. *
  724. * @devdata - struct nx842_devdata to update
  725. * @prop - struct property point containing the maxsyncop for the update
  726. *
  727. * Returns:
  728. * 0 on success
  729. * -EINVAL on failure
  730. */
  731. static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
  732. struct property *prop) {
  733. int ret = 0;
  734. const struct maxsynccop_t {
  735. int comp_elements;
  736. int comp_data_limit;
  737. int comp_sg_limit;
  738. int decomp_elements;
  739. int decomp_data_limit;
  740. int decomp_sg_limit;
  741. } *maxsynccop;
  742. if (prop->length != sizeof(*maxsynccop)) {
  743. dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
  744. dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
  745. sizeof(*maxsynccop));
  746. ret = -EINVAL;
  747. goto out;
  748. }
  749. maxsynccop = (const struct maxsynccop_t *)prop->value;
  750. /* Use one limit rather than separate limits for compression and
  751. * decompression. Set a maximum for this so as not to exceed the
  752. * size that the header can support and round the value down to
  753. * the hardware page size (4K) */
  754. devdata->max_sync_size =
  755. (unsigned int)min(maxsynccop->comp_data_limit,
  756. maxsynccop->decomp_data_limit);
  757. devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
  758. SIZE_64K);
  759. if (devdata->max_sync_size < SIZE_4K) {
  760. dev_err(devdata->dev, "%s: hardware max data size (%u) is "
  761. "less than the driver minimum, unable to use "
  762. "the hardware device\n",
  763. __func__, devdata->max_sync_size);
  764. ret = -EINVAL;
  765. goto out;
  766. }
  767. devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit,
  768. maxsynccop->decomp_sg_limit);
  769. if (devdata->max_sync_sg < 1) {
  770. dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
  771. "less than the driver minimum, unable to use "
  772. "the hardware device\n",
  773. __func__, devdata->max_sync_sg);
  774. ret = -EINVAL;
  775. goto out;
  776. }
  777. out:
  778. return ret;
  779. }
  780. /**
  781. *
  782. * nx842_OF_upd -- Handle OF properties updates for the device.
  783. *
  784. * Set all properties from the OF tree. Optionally, a new property
  785. * can be provided by the @new_prop pointer to overwrite an existing value.
  786. * The device will remain disabled until all values are valid, this function
  787. * will return an error for updates unless all values are valid.
  788. *
  789. * @new_prop: If not NULL, this property is being updated. If NULL, update
  790. * all properties from the current values in the OF tree.
  791. *
  792. * Returns:
  793. * 0 - Success
  794. * -ENOMEM - Could not allocate memory for new devdata structure
  795. * -EINVAL - property value not found, new_prop is not a recognized
  796. * property for the device or property value is not valid.
  797. * -ENODEV - Device is not available
  798. */
  799. static int nx842_OF_upd(struct property *new_prop)
  800. {
  801. struct nx842_devdata *old_devdata = NULL;
  802. struct nx842_devdata *new_devdata = NULL;
  803. struct device_node *of_node = NULL;
  804. struct property *status = NULL;
  805. struct property *maxsglen = NULL;
  806. struct property *maxsyncop = NULL;
  807. int ret = 0;
  808. unsigned long flags;
  809. spin_lock_irqsave(&devdata_mutex, flags);
  810. old_devdata = rcu_dereference_check(devdata,
  811. lockdep_is_held(&devdata_mutex));
  812. if (old_devdata)
  813. of_node = old_devdata->dev->of_node;
  814. if (!old_devdata || !of_node) {
  815. pr_err("%s: device is not available\n", __func__);
  816. spin_unlock_irqrestore(&devdata_mutex, flags);
  817. return -ENODEV;
  818. }
  819. new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
  820. if (!new_devdata) {
  821. dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
  822. ret = -ENOMEM;
  823. goto error_out;
  824. }
  825. memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
  826. new_devdata->counters = old_devdata->counters;
  827. /* Set ptrs for existing properties */
  828. status = of_find_property(of_node, "status", NULL);
  829. maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
  830. maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
  831. if (!status || !maxsglen || !maxsyncop) {
  832. dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
  833. ret = -EINVAL;
  834. goto error_out;
  835. }
  836. /* Set ptr to new property if provided */
  837. if (new_prop) {
  838. /* Single property */
  839. if (!strncmp(new_prop->name, "status", new_prop->length)) {
  840. status = new_prop;
  841. } else if (!strncmp(new_prop->name, "ibm,max-sg-len",
  842. new_prop->length)) {
  843. maxsglen = new_prop;
  844. } else if (!strncmp(new_prop->name, "ibm,max-sync-cop",
  845. new_prop->length)) {
  846. maxsyncop = new_prop;
  847. } else {
  848. /*
  849. * Skip the update, the property being updated
  850. * has no impact.
  851. */
  852. goto out;
  853. }
  854. }
  855. /* Perform property updates */
  856. ret = nx842_OF_upd_status(new_devdata, status);
  857. if (ret)
  858. goto error_out;
  859. ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
  860. if (ret)
  861. goto error_out;
  862. ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
  863. if (ret)
  864. goto error_out;
  865. out:
  866. dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
  867. __func__, new_devdata->max_sync_size,
  868. old_devdata->max_sync_size);
  869. dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
  870. __func__, new_devdata->max_sync_sg,
  871. old_devdata->max_sync_sg);
  872. dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
  873. __func__, new_devdata->max_sg_len,
  874. old_devdata->max_sg_len);
  875. rcu_assign_pointer(devdata, new_devdata);
  876. spin_unlock_irqrestore(&devdata_mutex, flags);
  877. synchronize_rcu();
  878. dev_set_drvdata(new_devdata->dev, new_devdata);
  879. kfree(old_devdata);
  880. return 0;
  881. error_out:
  882. if (new_devdata) {
  883. dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
  884. nx842_OF_set_defaults(new_devdata);
  885. rcu_assign_pointer(devdata, new_devdata);
  886. spin_unlock_irqrestore(&devdata_mutex, flags);
  887. synchronize_rcu();
  888. dev_set_drvdata(new_devdata->dev, new_devdata);
  889. kfree(old_devdata);
  890. } else {
  891. dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
  892. spin_unlock_irqrestore(&devdata_mutex, flags);
  893. }
  894. if (!ret)
  895. ret = -EINVAL;
  896. return ret;
  897. }
  898. /**
  899. * nx842_OF_notifier - Process updates to OF properties for the device
  900. *
  901. * @np: notifier block
  902. * @action: notifier action
  903. * @update: struct pSeries_reconfig_prop_update pointer if action is
  904. * PSERIES_UPDATE_PROPERTY
  905. *
  906. * Returns:
  907. * NOTIFY_OK on success
  908. * NOTIFY_BAD encoded with error number on failure, use
  909. * notifier_to_errno() to decode this value
  910. */
  911. static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
  912. void *update)
  913. {
  914. struct of_prop_reconfig *upd = update;
  915. struct nx842_devdata *local_devdata;
  916. struct device_node *node = NULL;
  917. rcu_read_lock();
  918. local_devdata = rcu_dereference(devdata);
  919. if (local_devdata)
  920. node = local_devdata->dev->of_node;
  921. if (local_devdata &&
  922. action == OF_RECONFIG_UPDATE_PROPERTY &&
  923. !strcmp(upd->dn->name, node->name)) {
  924. rcu_read_unlock();
  925. nx842_OF_upd(upd->prop);
  926. } else
  927. rcu_read_unlock();
  928. return NOTIFY_OK;
  929. }
  930. static struct notifier_block nx842_of_nb = {
  931. .notifier_call = nx842_OF_notifier,
  932. };
  933. #define nx842_counter_read(_name) \
  934. static ssize_t nx842_##_name##_show(struct device *dev, \
  935. struct device_attribute *attr, \
  936. char *buf) { \
  937. struct nx842_devdata *local_devdata; \
  938. int p = 0; \
  939. rcu_read_lock(); \
  940. local_devdata = rcu_dereference(devdata); \
  941. if (local_devdata) \
  942. p = snprintf(buf, PAGE_SIZE, "%ld\n", \
  943. atomic64_read(&local_devdata->counters->_name)); \
  944. rcu_read_unlock(); \
  945. return p; \
  946. }
  947. #define NX842DEV_COUNTER_ATTR_RO(_name) \
  948. nx842_counter_read(_name); \
  949. static struct device_attribute dev_attr_##_name = __ATTR(_name, \
  950. 0444, \
  951. nx842_##_name##_show,\
  952. NULL);
  953. NX842DEV_COUNTER_ATTR_RO(comp_complete);
  954. NX842DEV_COUNTER_ATTR_RO(comp_failed);
  955. NX842DEV_COUNTER_ATTR_RO(decomp_complete);
  956. NX842DEV_COUNTER_ATTR_RO(decomp_failed);
  957. NX842DEV_COUNTER_ATTR_RO(swdecomp);
  958. static ssize_t nx842_timehist_show(struct device *,
  959. struct device_attribute *, char *);
  960. static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
  961. nx842_timehist_show, NULL);
  962. static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
  963. 0444, nx842_timehist_show, NULL);
  964. static ssize_t nx842_timehist_show(struct device *dev,
  965. struct device_attribute *attr, char *buf) {
  966. char *p = buf;
  967. struct nx842_devdata *local_devdata;
  968. atomic64_t *times;
  969. int bytes_remain = PAGE_SIZE;
  970. int bytes;
  971. int i;
  972. rcu_read_lock();
  973. local_devdata = rcu_dereference(devdata);
  974. if (!local_devdata) {
  975. rcu_read_unlock();
  976. return 0;
  977. }
  978. if (attr == &dev_attr_comp_times)
  979. times = local_devdata->counters->comp_times;
  980. else if (attr == &dev_attr_decomp_times)
  981. times = local_devdata->counters->decomp_times;
  982. else {
  983. rcu_read_unlock();
  984. return 0;
  985. }
  986. for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
  987. bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
  988. i ? (2<<(i-1)) : 0, (2<<i)-1,
  989. atomic64_read(&times[i]));
  990. bytes_remain -= bytes;
  991. p += bytes;
  992. }
  993. /* The last bucket holds everything over
  994. * 2<<(NX842_HIST_SLOTS - 2) us */
  995. bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
  996. 2<<(NX842_HIST_SLOTS - 2),
  997. atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
  998. p += bytes;
  999. rcu_read_unlock();
  1000. return p - buf;
  1001. }
  1002. static struct attribute *nx842_sysfs_entries[] = {
  1003. &dev_attr_comp_complete.attr,
  1004. &dev_attr_comp_failed.attr,
  1005. &dev_attr_decomp_complete.attr,
  1006. &dev_attr_decomp_failed.attr,
  1007. &dev_attr_swdecomp.attr,
  1008. &dev_attr_comp_times.attr,
  1009. &dev_attr_decomp_times.attr,
  1010. NULL,
  1011. };
  1012. static struct attribute_group nx842_attribute_group = {
  1013. .name = NULL, /* put in device directory */
  1014. .attrs = nx842_sysfs_entries,
  1015. };
  1016. static int __init nx842_probe(struct vio_dev *viodev,
  1017. const struct vio_device_id *id)
  1018. {
  1019. struct nx842_devdata *old_devdata, *new_devdata = NULL;
  1020. unsigned long flags;
  1021. int ret = 0;
  1022. spin_lock_irqsave(&devdata_mutex, flags);
  1023. old_devdata = rcu_dereference_check(devdata,
  1024. lockdep_is_held(&devdata_mutex));
  1025. if (old_devdata && old_devdata->vdev != NULL) {
  1026. dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
  1027. ret = -1;
  1028. goto error_unlock;
  1029. }
  1030. dev_set_drvdata(&viodev->dev, NULL);
  1031. new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
  1032. if (!new_devdata) {
  1033. dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
  1034. ret = -ENOMEM;
  1035. goto error_unlock;
  1036. }
  1037. new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
  1038. GFP_NOFS);
  1039. if (!new_devdata->counters) {
  1040. dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
  1041. ret = -ENOMEM;
  1042. goto error_unlock;
  1043. }
  1044. new_devdata->vdev = viodev;
  1045. new_devdata->dev = &viodev->dev;
  1046. nx842_OF_set_defaults(new_devdata);
  1047. rcu_assign_pointer(devdata, new_devdata);
  1048. spin_unlock_irqrestore(&devdata_mutex, flags);
  1049. synchronize_rcu();
  1050. kfree(old_devdata);
  1051. of_reconfig_notifier_register(&nx842_of_nb);
  1052. ret = nx842_OF_upd(NULL);
  1053. if (ret && ret != -ENODEV) {
  1054. dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
  1055. ret = -1;
  1056. goto error;
  1057. }
  1058. rcu_read_lock();
  1059. if (dev_set_drvdata(&viodev->dev, rcu_dereference(devdata))) {
  1060. rcu_read_unlock();
  1061. dev_err(&viodev->dev, "failed to set driver data for device\n");
  1062. ret = -1;
  1063. goto error;
  1064. }
  1065. rcu_read_unlock();
  1066. if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
  1067. dev_err(&viodev->dev, "could not create sysfs device attributes\n");
  1068. ret = -1;
  1069. goto error;
  1070. }
  1071. return 0;
  1072. error_unlock:
  1073. spin_unlock_irqrestore(&devdata_mutex, flags);
  1074. if (new_devdata)
  1075. kfree(new_devdata->counters);
  1076. kfree(new_devdata);
  1077. error:
  1078. return ret;
  1079. }
  1080. static int __exit nx842_remove(struct vio_dev *viodev)
  1081. {
  1082. struct nx842_devdata *old_devdata;
  1083. unsigned long flags;
  1084. pr_info("Removing IBM Power 842 compression device\n");
  1085. sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
  1086. spin_lock_irqsave(&devdata_mutex, flags);
  1087. old_devdata = rcu_dereference_check(devdata,
  1088. lockdep_is_held(&devdata_mutex));
  1089. of_reconfig_notifier_unregister(&nx842_of_nb);
  1090. rcu_assign_pointer(devdata, NULL);
  1091. spin_unlock_irqrestore(&devdata_mutex, flags);
  1092. synchronize_rcu();
  1093. dev_set_drvdata(&viodev->dev, NULL);
  1094. if (old_devdata)
  1095. kfree(old_devdata->counters);
  1096. kfree(old_devdata);
  1097. return 0;
  1098. }
  1099. static struct vio_device_id nx842_driver_ids[] = {
  1100. {"ibm,compression-v1", "ibm,compression"},
  1101. {"", ""},
  1102. };
  1103. static struct vio_driver nx842_driver = {
  1104. .name = MODULE_NAME,
  1105. .probe = nx842_probe,
  1106. .remove = nx842_remove,
  1107. .get_desired_dma = nx842_get_desired_dma,
  1108. .id_table = nx842_driver_ids,
  1109. };
  1110. static int __init nx842_init(void)
  1111. {
  1112. struct nx842_devdata *new_devdata;
  1113. pr_info("Registering IBM Power 842 compression driver\n");
  1114. RCU_INIT_POINTER(devdata, NULL);
  1115. new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
  1116. if (!new_devdata) {
  1117. pr_err("Could not allocate memory for device data\n");
  1118. return -ENOMEM;
  1119. }
  1120. new_devdata->status = UNAVAILABLE;
  1121. RCU_INIT_POINTER(devdata, new_devdata);
  1122. return vio_register_driver(&nx842_driver);
  1123. }
  1124. module_init(nx842_init);
  1125. static void __exit nx842_exit(void)
  1126. {
  1127. struct nx842_devdata *old_devdata;
  1128. unsigned long flags;
  1129. pr_info("Exiting IBM Power 842 compression driver\n");
  1130. spin_lock_irqsave(&devdata_mutex, flags);
  1131. old_devdata = rcu_dereference_check(devdata,
  1132. lockdep_is_held(&devdata_mutex));
  1133. rcu_assign_pointer(devdata, NULL);
  1134. spin_unlock_irqrestore(&devdata_mutex, flags);
  1135. synchronize_rcu();
  1136. if (old_devdata)
  1137. dev_set_drvdata(old_devdata->dev, NULL);
  1138. kfree(old_devdata);
  1139. vio_unregister_driver(&nx842_driver);
  1140. }
  1141. module_exit(nx842_exit);
  1142. /*********************************
  1143. * 842 software decompressor
  1144. *********************************/
  1145. typedef int (*sw842_template_op)(const char **, int *, unsigned char **,
  1146. struct sw842_fifo *);
  1147. static int sw842_data8(const char **, int *, unsigned char **,
  1148. struct sw842_fifo *);
  1149. static int sw842_data4(const char **, int *, unsigned char **,
  1150. struct sw842_fifo *);
  1151. static int sw842_data2(const char **, int *, unsigned char **,
  1152. struct sw842_fifo *);
  1153. static int sw842_ptr8(const char **, int *, unsigned char **,
  1154. struct sw842_fifo *);
  1155. static int sw842_ptr4(const char **, int *, unsigned char **,
  1156. struct sw842_fifo *);
  1157. static int sw842_ptr2(const char **, int *, unsigned char **,
  1158. struct sw842_fifo *);
  1159. /* special templates */
  1160. #define SW842_TMPL_REPEAT 0x1B
  1161. #define SW842_TMPL_ZEROS 0x1C
  1162. #define SW842_TMPL_EOF 0x1E
  1163. static sw842_template_op sw842_tmpl_ops[26][4] = {
  1164. { sw842_data8, NULL}, /* 0 (00000) */
  1165. { sw842_data4, sw842_data2, sw842_ptr2, NULL},
  1166. { sw842_data4, sw842_ptr2, sw842_data2, NULL},
  1167. { sw842_data4, sw842_ptr2, sw842_ptr2, NULL},
  1168. { sw842_data4, sw842_ptr4, NULL},
  1169. { sw842_data2, sw842_ptr2, sw842_data4, NULL},
  1170. { sw842_data2, sw842_ptr2, sw842_data2, sw842_ptr2},
  1171. { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_data2},
  1172. { sw842_data2, sw842_ptr2, sw842_ptr2, sw842_ptr2,},
  1173. { sw842_data2, sw842_ptr2, sw842_ptr4, NULL},
  1174. { sw842_ptr2, sw842_data2, sw842_data4, NULL}, /* 10 (01010) */
  1175. { sw842_ptr2, sw842_data4, sw842_ptr2, NULL},
  1176. { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_data2},
  1177. { sw842_ptr2, sw842_data2, sw842_ptr2, sw842_ptr2},
  1178. { sw842_ptr2, sw842_data2, sw842_ptr4, NULL},
  1179. { sw842_ptr2, sw842_ptr2, sw842_data4, NULL},
  1180. { sw842_ptr2, sw842_ptr2, sw842_data2, sw842_ptr2},
  1181. { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_data2},
  1182. { sw842_ptr2, sw842_ptr2, sw842_ptr2, sw842_ptr2},
  1183. { sw842_ptr2, sw842_ptr2, sw842_ptr4, NULL},
  1184. { sw842_ptr4, sw842_data4, NULL}, /* 20 (10100) */
  1185. { sw842_ptr4, sw842_data2, sw842_ptr2, NULL},
  1186. { sw842_ptr4, sw842_ptr2, sw842_data2, NULL},
  1187. { sw842_ptr4, sw842_ptr2, sw842_ptr2, NULL},
  1188. { sw842_ptr4, sw842_ptr4, NULL},
  1189. { sw842_ptr8, NULL}
  1190. };
  1191. /* Software decompress helpers */
  1192. static uint8_t sw842_get_byte(const char *buf, int bit)
  1193. {
  1194. uint8_t tmpl;
  1195. uint16_t tmp;
  1196. tmp = htons(*(uint16_t *)(buf));
  1197. tmp = (uint16_t)(tmp << bit);
  1198. tmp = ntohs(tmp);
  1199. memcpy(&tmpl, &tmp, 1);
  1200. return tmpl;
  1201. }
  1202. static uint8_t sw842_get_template(const char **buf, int *bit)
  1203. {
  1204. uint8_t byte;
  1205. byte = sw842_get_byte(*buf, *bit);
  1206. byte = byte >> 3;
  1207. byte &= 0x1F;
  1208. *buf += (*bit + 5) / 8;
  1209. *bit = (*bit + 5) % 8;
  1210. return byte;
  1211. }
  1212. /* repeat_count happens to be 5-bit too (like the template) */
  1213. static uint8_t sw842_get_repeat_count(const char **buf, int *bit)
  1214. {
  1215. uint8_t byte;
  1216. byte = sw842_get_byte(*buf, *bit);
  1217. byte = byte >> 2;
  1218. byte &= 0x3F;
  1219. *buf += (*bit + 6) / 8;
  1220. *bit = (*bit + 6) % 8;
  1221. return byte;
  1222. }
  1223. static uint8_t sw842_get_ptr2(const char **buf, int *bit)
  1224. {
  1225. uint8_t ptr;
  1226. ptr = sw842_get_byte(*buf, *bit);
  1227. (*buf)++;
  1228. return ptr;
  1229. }
  1230. static uint16_t sw842_get_ptr4(const char **buf, int *bit,
  1231. struct sw842_fifo *fifo)
  1232. {
  1233. uint16_t ptr;
  1234. ptr = htons(*(uint16_t *)(*buf));
  1235. ptr = (uint16_t)(ptr << *bit);
  1236. ptr = ptr >> 7;
  1237. ptr &= 0x01FF;
  1238. *buf += (*bit + 9) / 8;
  1239. *bit = (*bit + 9) % 8;
  1240. return ptr;
  1241. }
  1242. static uint8_t sw842_get_ptr8(const char **buf, int *bit,
  1243. struct sw842_fifo *fifo)
  1244. {
  1245. return sw842_get_ptr2(buf, bit);
  1246. }
  1247. /* Software decompress template ops */
  1248. static int sw842_data8(const char **inbuf, int *inbit,
  1249. unsigned char **outbuf, struct sw842_fifo *fifo)
  1250. {
  1251. int ret;
  1252. ret = sw842_data4(inbuf, inbit, outbuf, fifo);
  1253. if (ret)
  1254. return ret;
  1255. ret = sw842_data4(inbuf, inbit, outbuf, fifo);
  1256. return ret;
  1257. }
  1258. static int sw842_data4(const char **inbuf, int *inbit,
  1259. unsigned char **outbuf, struct sw842_fifo *fifo)
  1260. {
  1261. int ret;
  1262. ret = sw842_data2(inbuf, inbit, outbuf, fifo);
  1263. if (ret)
  1264. return ret;
  1265. ret = sw842_data2(inbuf, inbit, outbuf, fifo);
  1266. return ret;
  1267. }
  1268. static int sw842_data2(const char **inbuf, int *inbit,
  1269. unsigned char **outbuf, struct sw842_fifo *fifo)
  1270. {
  1271. **outbuf = sw842_get_byte(*inbuf, *inbit);
  1272. (*inbuf)++;
  1273. (*outbuf)++;
  1274. **outbuf = sw842_get_byte(*inbuf, *inbit);
  1275. (*inbuf)++;
  1276. (*outbuf)++;
  1277. return 0;
  1278. }
  1279. static int sw842_ptr8(const char **inbuf, int *inbit,
  1280. unsigned char **outbuf, struct sw842_fifo *fifo)
  1281. {
  1282. uint8_t ptr;
  1283. ptr = sw842_get_ptr8(inbuf, inbit, fifo);
  1284. if (!fifo->f84_full && (ptr >= fifo->f8_count))
  1285. return 1;
  1286. memcpy(*outbuf, fifo->f8[ptr], 8);
  1287. *outbuf += 8;
  1288. return 0;
  1289. }
  1290. static int sw842_ptr4(const char **inbuf, int *inbit,
  1291. unsigned char **outbuf, struct sw842_fifo *fifo)
  1292. {
  1293. uint16_t ptr;
  1294. ptr = sw842_get_ptr4(inbuf, inbit, fifo);
  1295. if (!fifo->f84_full && (ptr >= fifo->f4_count))
  1296. return 1;
  1297. memcpy(*outbuf, fifo->f4[ptr], 4);
  1298. *outbuf += 4;
  1299. return 0;
  1300. }
  1301. static int sw842_ptr2(const char **inbuf, int *inbit,
  1302. unsigned char **outbuf, struct sw842_fifo *fifo)
  1303. {
  1304. uint8_t ptr;
  1305. ptr = sw842_get_ptr2(inbuf, inbit);
  1306. if (!fifo->f2_full && (ptr >= fifo->f2_count))
  1307. return 1;
  1308. memcpy(*outbuf, fifo->f2[ptr], 2);
  1309. *outbuf += 2;
  1310. return 0;
  1311. }
  1312. static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo)
  1313. {
  1314. unsigned char initial_f2count = fifo->f2_count;
  1315. memcpy(fifo->f8[fifo->f8_count], buf, 8);
  1316. fifo->f4_count += 2;
  1317. fifo->f8_count += 1;
  1318. if (!fifo->f84_full && fifo->f4_count >= 512) {
  1319. fifo->f84_full = 1;
  1320. fifo->f4_count /= 512;
  1321. }
  1322. memcpy(fifo->f2[fifo->f2_count++], buf, 2);
  1323. memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2);
  1324. memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2);
  1325. memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2);
  1326. if (fifo->f2_count < initial_f2count)
  1327. fifo->f2_full = 1;
  1328. }
  1329. static int sw842_decompress(const unsigned char *src, int srclen,
  1330. unsigned char *dst, int *destlen,
  1331. const void *wrkmem)
  1332. {
  1333. uint8_t tmpl;
  1334. const char *inbuf;
  1335. int inbit = 0;
  1336. unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf;
  1337. const char *inbuf_end;
  1338. sw842_template_op op;
  1339. int opindex;
  1340. int i, repeat_count;
  1341. struct sw842_fifo *fifo;
  1342. int ret = 0;
  1343. fifo = &((struct nx842_workmem *)(wrkmem))->swfifo;
  1344. memset(fifo, 0, sizeof(*fifo));
  1345. origbuf = NULL;
  1346. inbuf = src;
  1347. inbuf_end = src + srclen;
  1348. outbuf = dst;
  1349. outbuf_end = dst + *destlen;
  1350. while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) {
  1351. if (inbuf >= inbuf_end) {
  1352. ret = -EINVAL;
  1353. goto out;
  1354. }
  1355. opindex = 0;
  1356. prevbuf = origbuf;
  1357. origbuf = outbuf;
  1358. switch (tmpl) {
  1359. case SW842_TMPL_REPEAT:
  1360. if (prevbuf == NULL) {
  1361. ret = -EINVAL;
  1362. goto out;
  1363. }
  1364. repeat_count = sw842_get_repeat_count(&inbuf,
  1365. &inbit) + 1;
  1366. /* Did the repeat count advance past the end of input */
  1367. if (inbuf > inbuf_end) {
  1368. ret = -EINVAL;
  1369. goto out;
  1370. }
  1371. for (i = 0; i < repeat_count; i++) {
  1372. /* Would this overflow the output buffer */
  1373. if ((outbuf + 8) > outbuf_end) {
  1374. ret = -ENOSPC;
  1375. goto out;
  1376. }
  1377. memcpy(outbuf, prevbuf, 8);
  1378. sw842_copy_to_fifo(outbuf, fifo);
  1379. outbuf += 8;
  1380. }
  1381. break;
  1382. case SW842_TMPL_ZEROS:
  1383. /* Would this overflow the output buffer */
  1384. if ((outbuf + 8) > outbuf_end) {
  1385. ret = -ENOSPC;
  1386. goto out;
  1387. }
  1388. memset(outbuf, 0, 8);
  1389. sw842_copy_to_fifo(outbuf, fifo);
  1390. outbuf += 8;
  1391. break;
  1392. default:
  1393. if (tmpl > 25) {
  1394. ret = -EINVAL;
  1395. goto out;
  1396. }
  1397. /* Does this go past the end of the input buffer */
  1398. if ((inbuf + 2) > inbuf_end) {
  1399. ret = -EINVAL;
  1400. goto out;
  1401. }
  1402. /* Would this overflow the output buffer */
  1403. if ((outbuf + 8) > outbuf_end) {
  1404. ret = -ENOSPC;
  1405. goto out;
  1406. }
  1407. while (opindex < 4 &&
  1408. (op = sw842_tmpl_ops[tmpl][opindex++])
  1409. != NULL) {
  1410. ret = (*op)(&inbuf, &inbit, &outbuf, fifo);
  1411. if (ret) {
  1412. ret = -EINVAL;
  1413. goto out;
  1414. }
  1415. sw842_copy_to_fifo(origbuf, fifo);
  1416. }
  1417. }
  1418. }
  1419. out:
  1420. if (!ret)
  1421. *destlen = (unsigned int)(outbuf - dst);
  1422. else
  1423. *destlen = 0;
  1424. return ret;
  1425. }