xdr.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. /*
  2. * linux/net/sunrpc/xdr.c
  3. *
  4. * Generic XDR support.
  5. *
  6. * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <linux/types.h>
  11. #include <linux/string.h>
  12. #include <linux/kernel.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/errno.h>
  15. #include <linux/sunrpc/xdr.h>
  16. #include <linux/sunrpc/msg_prot.h>
  17. /*
  18. * XDR functions for basic NFS types
  19. */
  20. __be32 *
  21. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  22. {
  23. unsigned int quadlen = XDR_QUADLEN(obj->len);
  24. p[quadlen] = 0; /* zero trailing bytes */
  25. *p++ = cpu_to_be32(obj->len);
  26. memcpy(p, obj->data, obj->len);
  27. return p + XDR_QUADLEN(obj->len);
  28. }
  29. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  30. __be32 *
  31. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  32. {
  33. unsigned int len;
  34. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  35. return NULL;
  36. obj->len = len;
  37. obj->data = (u8 *) p;
  38. return p + XDR_QUADLEN(len);
  39. }
  40. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  41. /**
  42. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  43. * @p: pointer to current position in XDR buffer.
  44. * @ptr: pointer to data to encode (or NULL)
  45. * @nbytes: size of data.
  46. *
  47. * Copy the array of data of length nbytes at ptr to the XDR buffer
  48. * at position p, then align to the next 32-bit boundary by padding
  49. * with zero bytes (see RFC1832).
  50. * Note: if ptr is NULL, only the padding is performed.
  51. *
  52. * Returns the updated current XDR buffer position
  53. *
  54. */
  55. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  56. {
  57. if (likely(nbytes != 0)) {
  58. unsigned int quadlen = XDR_QUADLEN(nbytes);
  59. unsigned int padding = (quadlen << 2) - nbytes;
  60. if (ptr != NULL)
  61. memcpy(p, ptr, nbytes);
  62. if (padding != 0)
  63. memset((char *)p + nbytes, 0, padding);
  64. p += quadlen;
  65. }
  66. return p;
  67. }
  68. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  69. /**
  70. * xdr_encode_opaque - Encode variable length opaque data
  71. * @p: pointer to current position in XDR buffer.
  72. * @ptr: pointer to data to encode (or NULL)
  73. * @nbytes: size of data.
  74. *
  75. * Returns the updated current XDR buffer position
  76. */
  77. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  78. {
  79. *p++ = cpu_to_be32(nbytes);
  80. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  81. }
  82. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  83. __be32 *
  84. xdr_encode_string(__be32 *p, const char *string)
  85. {
  86. return xdr_encode_array(p, string, strlen(string));
  87. }
  88. EXPORT_SYMBOL_GPL(xdr_encode_string);
  89. __be32 *
  90. xdr_decode_string_inplace(__be32 *p, char **sp,
  91. unsigned int *lenp, unsigned int maxlen)
  92. {
  93. u32 len;
  94. len = be32_to_cpu(*p++);
  95. if (len > maxlen)
  96. return NULL;
  97. *lenp = len;
  98. *sp = (char *) p;
  99. return p + XDR_QUADLEN(len);
  100. }
  101. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  102. /**
  103. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  104. * @buf: XDR buffer where string resides
  105. * @len: length of string, in bytes
  106. *
  107. */
  108. void
  109. xdr_terminate_string(struct xdr_buf *buf, const u32 len)
  110. {
  111. char *kaddr;
  112. kaddr = kmap_atomic(buf->pages[0]);
  113. kaddr[buf->page_base + len] = '\0';
  114. kunmap_atomic(kaddr);
  115. }
  116. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  117. void
  118. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  119. struct page **pages, unsigned int base, unsigned int len)
  120. {
  121. struct kvec *head = xdr->head;
  122. struct kvec *tail = xdr->tail;
  123. char *buf = (char *)head->iov_base;
  124. unsigned int buflen = head->iov_len;
  125. head->iov_len = offset;
  126. xdr->pages = pages;
  127. xdr->page_base = base;
  128. xdr->page_len = len;
  129. tail->iov_base = buf + offset;
  130. tail->iov_len = buflen - offset;
  131. xdr->buflen += len;
  132. }
  133. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  134. /*
  135. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  136. */
  137. /**
  138. * _shift_data_right_pages
  139. * @pages: vector of pages containing both the source and dest memory area.
  140. * @pgto_base: page vector address of destination
  141. * @pgfrom_base: page vector address of source
  142. * @len: number of bytes to copy
  143. *
  144. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  145. * the same way:
  146. * if a memory area starts at byte 'base' in page 'pages[i]',
  147. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  148. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  149. * they point to may overlap.
  150. */
  151. static void
  152. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  153. size_t pgfrom_base, size_t len)
  154. {
  155. struct page **pgfrom, **pgto;
  156. char *vfrom, *vto;
  157. size_t copy;
  158. BUG_ON(pgto_base <= pgfrom_base);
  159. pgto_base += len;
  160. pgfrom_base += len;
  161. pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
  162. pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
  163. pgto_base &= ~PAGE_CACHE_MASK;
  164. pgfrom_base &= ~PAGE_CACHE_MASK;
  165. do {
  166. /* Are any pointers crossing a page boundary? */
  167. if (pgto_base == 0) {
  168. pgto_base = PAGE_CACHE_SIZE;
  169. pgto--;
  170. }
  171. if (pgfrom_base == 0) {
  172. pgfrom_base = PAGE_CACHE_SIZE;
  173. pgfrom--;
  174. }
  175. copy = len;
  176. if (copy > pgto_base)
  177. copy = pgto_base;
  178. if (copy > pgfrom_base)
  179. copy = pgfrom_base;
  180. pgto_base -= copy;
  181. pgfrom_base -= copy;
  182. vto = kmap_atomic(*pgto);
  183. if (*pgto != *pgfrom) {
  184. vfrom = kmap_atomic(*pgfrom);
  185. memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
  186. kunmap_atomic(vfrom);
  187. } else
  188. memmove(vto + pgto_base, vto + pgfrom_base, copy);
  189. flush_dcache_page(*pgto);
  190. kunmap_atomic(vto);
  191. } while ((len -= copy) != 0);
  192. }
  193. /**
  194. * _copy_to_pages
  195. * @pages: array of pages
  196. * @pgbase: page vector address of destination
  197. * @p: pointer to source data
  198. * @len: length
  199. *
  200. * Copies data from an arbitrary memory location into an array of pages
  201. * The copy is assumed to be non-overlapping.
  202. */
  203. static void
  204. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  205. {
  206. struct page **pgto;
  207. char *vto;
  208. size_t copy;
  209. pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
  210. pgbase &= ~PAGE_CACHE_MASK;
  211. for (;;) {
  212. copy = PAGE_CACHE_SIZE - pgbase;
  213. if (copy > len)
  214. copy = len;
  215. vto = kmap_atomic(*pgto);
  216. memcpy(vto + pgbase, p, copy);
  217. kunmap_atomic(vto);
  218. len -= copy;
  219. if (len == 0)
  220. break;
  221. pgbase += copy;
  222. if (pgbase == PAGE_CACHE_SIZE) {
  223. flush_dcache_page(*pgto);
  224. pgbase = 0;
  225. pgto++;
  226. }
  227. p += copy;
  228. }
  229. flush_dcache_page(*pgto);
  230. }
  231. /**
  232. * _copy_from_pages
  233. * @p: pointer to destination
  234. * @pages: array of pages
  235. * @pgbase: offset of source data
  236. * @len: length
  237. *
  238. * Copies data into an arbitrary memory location from an array of pages
  239. * The copy is assumed to be non-overlapping.
  240. */
  241. void
  242. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  243. {
  244. struct page **pgfrom;
  245. char *vfrom;
  246. size_t copy;
  247. pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
  248. pgbase &= ~PAGE_CACHE_MASK;
  249. do {
  250. copy = PAGE_CACHE_SIZE - pgbase;
  251. if (copy > len)
  252. copy = len;
  253. vfrom = kmap_atomic(*pgfrom);
  254. memcpy(p, vfrom + pgbase, copy);
  255. kunmap_atomic(vfrom);
  256. pgbase += copy;
  257. if (pgbase == PAGE_CACHE_SIZE) {
  258. pgbase = 0;
  259. pgfrom++;
  260. }
  261. p += copy;
  262. } while ((len -= copy) != 0);
  263. }
  264. EXPORT_SYMBOL_GPL(_copy_from_pages);
  265. /**
  266. * xdr_shrink_bufhead
  267. * @buf: xdr_buf
  268. * @len: bytes to remove from buf->head[0]
  269. *
  270. * Shrinks XDR buffer's header kvec buf->head[0] by
  271. * 'len' bytes. The extra data is not lost, but is instead
  272. * moved into the inlined pages and/or the tail.
  273. */
  274. static void
  275. xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
  276. {
  277. struct kvec *head, *tail;
  278. size_t copy, offs;
  279. unsigned int pglen = buf->page_len;
  280. tail = buf->tail;
  281. head = buf->head;
  282. WARN_ON_ONCE(len > head->iov_len);
  283. if (len > head->iov_len)
  284. len = head->iov_len;
  285. /* Shift the tail first */
  286. if (tail->iov_len != 0) {
  287. if (tail->iov_len > len) {
  288. copy = tail->iov_len - len;
  289. memmove((char *)tail->iov_base + len,
  290. tail->iov_base, copy);
  291. }
  292. /* Copy from the inlined pages into the tail */
  293. copy = len;
  294. if (copy > pglen)
  295. copy = pglen;
  296. offs = len - copy;
  297. if (offs >= tail->iov_len)
  298. copy = 0;
  299. else if (copy > tail->iov_len - offs)
  300. copy = tail->iov_len - offs;
  301. if (copy != 0)
  302. _copy_from_pages((char *)tail->iov_base + offs,
  303. buf->pages,
  304. buf->page_base + pglen + offs - len,
  305. copy);
  306. /* Do we also need to copy data from the head into the tail ? */
  307. if (len > pglen) {
  308. offs = copy = len - pglen;
  309. if (copy > tail->iov_len)
  310. copy = tail->iov_len;
  311. memcpy(tail->iov_base,
  312. (char *)head->iov_base +
  313. head->iov_len - offs,
  314. copy);
  315. }
  316. }
  317. /* Now handle pages */
  318. if (pglen != 0) {
  319. if (pglen > len)
  320. _shift_data_right_pages(buf->pages,
  321. buf->page_base + len,
  322. buf->page_base,
  323. pglen - len);
  324. copy = len;
  325. if (len > pglen)
  326. copy = pglen;
  327. _copy_to_pages(buf->pages, buf->page_base,
  328. (char *)head->iov_base + head->iov_len - len,
  329. copy);
  330. }
  331. head->iov_len -= len;
  332. buf->buflen -= len;
  333. /* Have we truncated the message? */
  334. if (buf->len > buf->buflen)
  335. buf->len = buf->buflen;
  336. }
  337. /**
  338. * xdr_shrink_pagelen
  339. * @buf: xdr_buf
  340. * @len: bytes to remove from buf->pages
  341. *
  342. * Shrinks XDR buffer's page array buf->pages by
  343. * 'len' bytes. The extra data is not lost, but is instead
  344. * moved into the tail.
  345. */
  346. static void
  347. xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
  348. {
  349. struct kvec *tail;
  350. size_t copy;
  351. unsigned int pglen = buf->page_len;
  352. unsigned int tailbuf_len;
  353. tail = buf->tail;
  354. BUG_ON (len > pglen);
  355. tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
  356. /* Shift the tail first */
  357. if (tailbuf_len != 0) {
  358. unsigned int free_space = tailbuf_len - tail->iov_len;
  359. if (len < free_space)
  360. free_space = len;
  361. tail->iov_len += free_space;
  362. copy = len;
  363. if (tail->iov_len > len) {
  364. char *p = (char *)tail->iov_base + len;
  365. memmove(p, tail->iov_base, tail->iov_len - len);
  366. } else
  367. copy = tail->iov_len;
  368. /* Copy from the inlined pages into the tail */
  369. _copy_from_pages((char *)tail->iov_base,
  370. buf->pages, buf->page_base + pglen - len,
  371. copy);
  372. }
  373. buf->page_len -= len;
  374. buf->buflen -= len;
  375. /* Have we truncated the message? */
  376. if (buf->len > buf->buflen)
  377. buf->len = buf->buflen;
  378. }
  379. void
  380. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  381. {
  382. xdr_shrink_bufhead(buf, len);
  383. }
  384. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  385. /**
  386. * xdr_stream_pos - Return the current offset from the start of the xdr_stream
  387. * @xdr: pointer to struct xdr_stream
  388. */
  389. unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
  390. {
  391. return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
  392. }
  393. EXPORT_SYMBOL_GPL(xdr_stream_pos);
  394. /**
  395. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  396. * @xdr: pointer to xdr_stream struct
  397. * @buf: pointer to XDR buffer in which to encode data
  398. * @p: current pointer inside XDR buffer
  399. *
  400. * Note: at the moment the RPC client only passes the length of our
  401. * scratch buffer in the xdr_buf's header kvec. Previously this
  402. * meant we needed to call xdr_adjust_iovec() after encoding the
  403. * data. With the new scheme, the xdr_stream manages the details
  404. * of the buffer length, and takes care of adjusting the kvec
  405. * length for us.
  406. */
  407. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  408. {
  409. struct kvec *iov = buf->head;
  410. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  411. BUG_ON(scratch_len < 0);
  412. xdr->buf = buf;
  413. xdr->iov = iov;
  414. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  415. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  416. BUG_ON(iov->iov_len > scratch_len);
  417. if (p != xdr->p && p != NULL) {
  418. size_t len;
  419. BUG_ON(p < xdr->p || p > xdr->end);
  420. len = (char *)p - (char *)xdr->p;
  421. xdr->p = p;
  422. buf->len += len;
  423. iov->iov_len += len;
  424. }
  425. }
  426. EXPORT_SYMBOL_GPL(xdr_init_encode);
  427. /**
  428. * xdr_reserve_space - Reserve buffer space for sending
  429. * @xdr: pointer to xdr_stream
  430. * @nbytes: number of bytes to reserve
  431. *
  432. * Checks that we have enough buffer space to encode 'nbytes' more
  433. * bytes of data. If so, update the total xdr_buf length, and
  434. * adjust the length of the current kvec.
  435. */
  436. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  437. {
  438. __be32 *p = xdr->p;
  439. __be32 *q;
  440. /* align nbytes on the next 32-bit boundary */
  441. nbytes += 3;
  442. nbytes &= ~3;
  443. q = p + (nbytes >> 2);
  444. if (unlikely(q > xdr->end || q < p))
  445. return NULL;
  446. xdr->p = q;
  447. xdr->iov->iov_len += nbytes;
  448. xdr->buf->len += nbytes;
  449. return p;
  450. }
  451. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  452. /**
  453. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  454. * @xdr: pointer to xdr_stream
  455. * @pages: list of pages
  456. * @base: offset of first byte
  457. * @len: length of data in bytes
  458. *
  459. */
  460. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  461. unsigned int len)
  462. {
  463. struct xdr_buf *buf = xdr->buf;
  464. struct kvec *iov = buf->tail;
  465. buf->pages = pages;
  466. buf->page_base = base;
  467. buf->page_len = len;
  468. iov->iov_base = (char *)xdr->p;
  469. iov->iov_len = 0;
  470. xdr->iov = iov;
  471. if (len & 3) {
  472. unsigned int pad = 4 - (len & 3);
  473. BUG_ON(xdr->p >= xdr->end);
  474. iov->iov_base = (char *)xdr->p + (len & 3);
  475. iov->iov_len += pad;
  476. len += pad;
  477. *xdr->p++ = 0;
  478. }
  479. buf->buflen += len;
  480. buf->len += len;
  481. }
  482. EXPORT_SYMBOL_GPL(xdr_write_pages);
  483. static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  484. unsigned int len)
  485. {
  486. if (len > iov->iov_len)
  487. len = iov->iov_len;
  488. xdr->p = (__be32*)iov->iov_base;
  489. xdr->end = (__be32*)(iov->iov_base + len);
  490. xdr->iov = iov;
  491. xdr->page_ptr = NULL;
  492. }
  493. static int xdr_set_page_base(struct xdr_stream *xdr,
  494. unsigned int base, unsigned int len)
  495. {
  496. unsigned int pgnr;
  497. unsigned int maxlen;
  498. unsigned int pgoff;
  499. unsigned int pgend;
  500. void *kaddr;
  501. maxlen = xdr->buf->page_len;
  502. if (base >= maxlen)
  503. return -EINVAL;
  504. maxlen -= base;
  505. if (len > maxlen)
  506. len = maxlen;
  507. base += xdr->buf->page_base;
  508. pgnr = base >> PAGE_SHIFT;
  509. xdr->page_ptr = &xdr->buf->pages[pgnr];
  510. kaddr = page_address(*xdr->page_ptr);
  511. pgoff = base & ~PAGE_MASK;
  512. xdr->p = (__be32*)(kaddr + pgoff);
  513. pgend = pgoff + len;
  514. if (pgend > PAGE_SIZE)
  515. pgend = PAGE_SIZE;
  516. xdr->end = (__be32*)(kaddr + pgend);
  517. xdr->iov = NULL;
  518. return 0;
  519. }
  520. static void xdr_set_next_page(struct xdr_stream *xdr)
  521. {
  522. unsigned int newbase;
  523. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  524. newbase -= xdr->buf->page_base;
  525. if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
  526. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  527. }
  528. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  529. {
  530. if (xdr->page_ptr != NULL)
  531. xdr_set_next_page(xdr);
  532. else if (xdr->iov == xdr->buf->head) {
  533. if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
  534. xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
  535. }
  536. return xdr->p != xdr->end;
  537. }
  538. /**
  539. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  540. * @xdr: pointer to xdr_stream struct
  541. * @buf: pointer to XDR buffer from which to decode data
  542. * @p: current pointer inside XDR buffer
  543. */
  544. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
  545. {
  546. xdr->buf = buf;
  547. xdr->scratch.iov_base = NULL;
  548. xdr->scratch.iov_len = 0;
  549. xdr->nwords = XDR_QUADLEN(buf->len);
  550. if (buf->head[0].iov_len != 0)
  551. xdr_set_iov(xdr, buf->head, buf->len);
  552. else if (buf->page_len != 0)
  553. xdr_set_page_base(xdr, 0, buf->len);
  554. if (p != NULL && p > xdr->p && xdr->end >= p) {
  555. xdr->nwords -= p - xdr->p;
  556. xdr->p = p;
  557. }
  558. }
  559. EXPORT_SYMBOL_GPL(xdr_init_decode);
  560. /**
  561. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  562. * @xdr: pointer to xdr_stream struct
  563. * @buf: pointer to XDR buffer from which to decode data
  564. * @pages: list of pages to decode into
  565. * @len: length in bytes of buffer in pages
  566. */
  567. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  568. struct page **pages, unsigned int len)
  569. {
  570. memset(buf, 0, sizeof(*buf));
  571. buf->pages = pages;
  572. buf->page_len = len;
  573. buf->buflen = len;
  574. buf->len = len;
  575. xdr_init_decode(xdr, buf, NULL);
  576. }
  577. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  578. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  579. {
  580. unsigned int nwords = XDR_QUADLEN(nbytes);
  581. __be32 *p = xdr->p;
  582. __be32 *q = p + nwords;
  583. if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
  584. return NULL;
  585. xdr->p = q;
  586. xdr->nwords -= nwords;
  587. return p;
  588. }
  589. /**
  590. * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
  591. * @xdr: pointer to xdr_stream struct
  592. * @buf: pointer to an empty buffer
  593. * @buflen: size of 'buf'
  594. *
  595. * The scratch buffer is used when decoding from an array of pages.
  596. * If an xdr_inline_decode() call spans across page boundaries, then
  597. * we copy the data into the scratch buffer in order to allow linear
  598. * access.
  599. */
  600. void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
  601. {
  602. xdr->scratch.iov_base = buf;
  603. xdr->scratch.iov_len = buflen;
  604. }
  605. EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
  606. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  607. {
  608. __be32 *p;
  609. void *cpdest = xdr->scratch.iov_base;
  610. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  611. if (nbytes > xdr->scratch.iov_len)
  612. return NULL;
  613. memcpy(cpdest, xdr->p, cplen);
  614. cpdest += cplen;
  615. nbytes -= cplen;
  616. if (!xdr_set_next_buffer(xdr))
  617. return NULL;
  618. p = __xdr_inline_decode(xdr, nbytes);
  619. if (p == NULL)
  620. return NULL;
  621. memcpy(cpdest, p, nbytes);
  622. return xdr->scratch.iov_base;
  623. }
  624. /**
  625. * xdr_inline_decode - Retrieve XDR data to decode
  626. * @xdr: pointer to xdr_stream struct
  627. * @nbytes: number of bytes of data to decode
  628. *
  629. * Check if the input buffer is long enough to enable us to decode
  630. * 'nbytes' more bytes of data starting at the current position.
  631. * If so return the current pointer, then update the current
  632. * pointer position.
  633. */
  634. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  635. {
  636. __be32 *p;
  637. if (nbytes == 0)
  638. return xdr->p;
  639. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  640. return NULL;
  641. p = __xdr_inline_decode(xdr, nbytes);
  642. if (p != NULL)
  643. return p;
  644. return xdr_copy_to_scratch(xdr, nbytes);
  645. }
  646. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  647. static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
  648. {
  649. struct xdr_buf *buf = xdr->buf;
  650. struct kvec *iov;
  651. unsigned int nwords = XDR_QUADLEN(len);
  652. unsigned int cur = xdr_stream_pos(xdr);
  653. if (xdr->nwords == 0)
  654. return 0;
  655. /* Realign pages to current pointer position */
  656. iov = buf->head;
  657. if (iov->iov_len > cur) {
  658. xdr_shrink_bufhead(buf, iov->iov_len - cur);
  659. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  660. }
  661. if (nwords > xdr->nwords) {
  662. nwords = xdr->nwords;
  663. len = nwords << 2;
  664. }
  665. if (buf->page_len <= len)
  666. len = buf->page_len;
  667. else if (nwords < xdr->nwords) {
  668. /* Truncate page data and move it into the tail */
  669. xdr_shrink_pagelen(buf, buf->page_len - len);
  670. xdr->nwords = XDR_QUADLEN(buf->len - cur);
  671. }
  672. return len;
  673. }
  674. /**
  675. * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
  676. * @xdr: pointer to xdr_stream struct
  677. * @len: number of bytes of page data
  678. *
  679. * Moves data beyond the current pointer position from the XDR head[] buffer
  680. * into the page list. Any data that lies beyond current position + "len"
  681. * bytes is moved into the XDR tail[].
  682. *
  683. * Returns the number of XDR encoded bytes now contained in the pages
  684. */
  685. unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  686. {
  687. struct xdr_buf *buf = xdr->buf;
  688. struct kvec *iov;
  689. unsigned int nwords;
  690. unsigned int end;
  691. unsigned int padding;
  692. len = xdr_align_pages(xdr, len);
  693. if (len == 0)
  694. return 0;
  695. nwords = XDR_QUADLEN(len);
  696. padding = (nwords << 2) - len;
  697. xdr->iov = iov = buf->tail;
  698. /* Compute remaining message length. */
  699. end = ((xdr->nwords - nwords) << 2) + padding;
  700. if (end > iov->iov_len)
  701. end = iov->iov_len;
  702. /*
  703. * Position current pointer at beginning of tail, and
  704. * set remaining message length.
  705. */
  706. xdr->p = (__be32 *)((char *)iov->iov_base + padding);
  707. xdr->end = (__be32 *)((char *)iov->iov_base + end);
  708. xdr->page_ptr = NULL;
  709. xdr->nwords = XDR_QUADLEN(end - padding);
  710. return len;
  711. }
  712. EXPORT_SYMBOL_GPL(xdr_read_pages);
  713. /**
  714. * xdr_enter_page - decode data from the XDR page
  715. * @xdr: pointer to xdr_stream struct
  716. * @len: number of bytes of page data
  717. *
  718. * Moves data beyond the current pointer position from the XDR head[] buffer
  719. * into the page list. Any data that lies beyond current position + "len"
  720. * bytes is moved into the XDR tail[]. The current pointer is then
  721. * repositioned at the beginning of the first XDR page.
  722. */
  723. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  724. {
  725. len = xdr_align_pages(xdr, len);
  726. /*
  727. * Position current pointer at beginning of tail, and
  728. * set remaining message length.
  729. */
  730. if (len != 0)
  731. xdr_set_page_base(xdr, 0, len);
  732. }
  733. EXPORT_SYMBOL_GPL(xdr_enter_page);
  734. static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  735. void
  736. xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
  737. {
  738. buf->head[0] = *iov;
  739. buf->tail[0] = empty_iov;
  740. buf->page_len = 0;
  741. buf->buflen = buf->len = iov->iov_len;
  742. }
  743. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  744. /**
  745. * xdr_buf_subsegment - set subbuf to a portion of buf
  746. * @buf: an xdr buffer
  747. * @subbuf: the result buffer
  748. * @base: beginning of range in bytes
  749. * @len: length of range in bytes
  750. *
  751. * sets @subbuf to an xdr buffer representing the portion of @buf of
  752. * length @len starting at offset @base.
  753. *
  754. * @buf and @subbuf may be pointers to the same struct xdr_buf.
  755. *
  756. * Returns -1 if base of length are out of bounds.
  757. */
  758. int
  759. xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
  760. unsigned int base, unsigned int len)
  761. {
  762. subbuf->buflen = subbuf->len = len;
  763. if (base < buf->head[0].iov_len) {
  764. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  765. subbuf->head[0].iov_len = min_t(unsigned int, len,
  766. buf->head[0].iov_len - base);
  767. len -= subbuf->head[0].iov_len;
  768. base = 0;
  769. } else {
  770. base -= buf->head[0].iov_len;
  771. subbuf->head[0].iov_len = 0;
  772. }
  773. if (base < buf->page_len) {
  774. subbuf->page_len = min(buf->page_len - base, len);
  775. base += buf->page_base;
  776. subbuf->page_base = base & ~PAGE_CACHE_MASK;
  777. subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
  778. len -= subbuf->page_len;
  779. base = 0;
  780. } else {
  781. base -= buf->page_len;
  782. subbuf->page_len = 0;
  783. }
  784. if (base < buf->tail[0].iov_len) {
  785. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  786. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  787. buf->tail[0].iov_len - base);
  788. len -= subbuf->tail[0].iov_len;
  789. base = 0;
  790. } else {
  791. base -= buf->tail[0].iov_len;
  792. subbuf->tail[0].iov_len = 0;
  793. }
  794. if (base || len)
  795. return -1;
  796. return 0;
  797. }
  798. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  799. /**
  800. * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
  801. * @buf: buf to be trimmed
  802. * @len: number of bytes to reduce "buf" by
  803. *
  804. * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
  805. * that it's possible that we'll trim less than that amount if the xdr_buf is
  806. * too small, or if (for instance) it's all in the head and the parser has
  807. * already read too far into it.
  808. */
  809. void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
  810. {
  811. size_t cur;
  812. unsigned int trim = len;
  813. if (buf->tail[0].iov_len) {
  814. cur = min_t(size_t, buf->tail[0].iov_len, trim);
  815. buf->tail[0].iov_len -= cur;
  816. trim -= cur;
  817. if (!trim)
  818. goto fix_len;
  819. }
  820. if (buf->page_len) {
  821. cur = min_t(unsigned int, buf->page_len, trim);
  822. buf->page_len -= cur;
  823. trim -= cur;
  824. if (!trim)
  825. goto fix_len;
  826. }
  827. if (buf->head[0].iov_len) {
  828. cur = min_t(size_t, buf->head[0].iov_len, trim);
  829. buf->head[0].iov_len -= cur;
  830. trim -= cur;
  831. }
  832. fix_len:
  833. buf->len -= (len - trim);
  834. }
  835. EXPORT_SYMBOL_GPL(xdr_buf_trim);
  836. static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  837. {
  838. unsigned int this_len;
  839. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  840. memcpy(obj, subbuf->head[0].iov_base, this_len);
  841. len -= this_len;
  842. obj += this_len;
  843. this_len = min_t(unsigned int, len, subbuf->page_len);
  844. if (this_len)
  845. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  846. len -= this_len;
  847. obj += this_len;
  848. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  849. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  850. }
  851. /* obj is assumed to point to allocated memory of size at least len: */
  852. int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  853. {
  854. struct xdr_buf subbuf;
  855. int status;
  856. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  857. if (status != 0)
  858. return status;
  859. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  860. return 0;
  861. }
  862. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  863. static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
  864. {
  865. unsigned int this_len;
  866. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  867. memcpy(subbuf->head[0].iov_base, obj, this_len);
  868. len -= this_len;
  869. obj += this_len;
  870. this_len = min_t(unsigned int, len, subbuf->page_len);
  871. if (this_len)
  872. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  873. len -= this_len;
  874. obj += this_len;
  875. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  876. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  877. }
  878. /* obj is assumed to point to allocated memory of size at least len: */
  879. int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
  880. {
  881. struct xdr_buf subbuf;
  882. int status;
  883. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  884. if (status != 0)
  885. return status;
  886. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  887. return 0;
  888. }
  889. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  890. int
  891. xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
  892. {
  893. __be32 raw;
  894. int status;
  895. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  896. if (status)
  897. return status;
  898. *obj = be32_to_cpu(raw);
  899. return 0;
  900. }
  901. EXPORT_SYMBOL_GPL(xdr_decode_word);
  902. int
  903. xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
  904. {
  905. __be32 raw = cpu_to_be32(obj);
  906. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  907. }
  908. EXPORT_SYMBOL_GPL(xdr_encode_word);
  909. /* If the netobj starting offset bytes from the start of xdr_buf is contained
  910. * entirely in the head or the tail, set object to point to it; otherwise
  911. * try to find space for it at the end of the tail, copy it there, and
  912. * set obj to point to it. */
  913. int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
  914. {
  915. struct xdr_buf subbuf;
  916. if (xdr_decode_word(buf, offset, &obj->len))
  917. return -EFAULT;
  918. if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
  919. return -EFAULT;
  920. /* Is the obj contained entirely in the head? */
  921. obj->data = subbuf.head[0].iov_base;
  922. if (subbuf.head[0].iov_len == obj->len)
  923. return 0;
  924. /* ..or is the obj contained entirely in the tail? */
  925. obj->data = subbuf.tail[0].iov_base;
  926. if (subbuf.tail[0].iov_len == obj->len)
  927. return 0;
  928. /* use end of tail as storage for obj:
  929. * (We don't copy to the beginning because then we'd have
  930. * to worry about doing a potentially overlapping copy.
  931. * This assumes the object is at most half the length of the
  932. * tail.) */
  933. if (obj->len > buf->buflen - buf->len)
  934. return -ENOMEM;
  935. if (buf->tail[0].iov_len != 0)
  936. obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
  937. else
  938. obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
  939. __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
  940. return 0;
  941. }
  942. EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
  943. /* Returns 0 on success, or else a negative error code. */
  944. static int
  945. xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
  946. struct xdr_array2_desc *desc, int encode)
  947. {
  948. char *elem = NULL, *c;
  949. unsigned int copied = 0, todo, avail_here;
  950. struct page **ppages = NULL;
  951. int err;
  952. if (encode) {
  953. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  954. return -EINVAL;
  955. } else {
  956. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  957. desc->array_len > desc->array_maxlen ||
  958. (unsigned long) base + 4 + desc->array_len *
  959. desc->elem_size > buf->len)
  960. return -EINVAL;
  961. }
  962. base += 4;
  963. if (!desc->xcode)
  964. return 0;
  965. todo = desc->array_len * desc->elem_size;
  966. /* process head */
  967. if (todo && base < buf->head->iov_len) {
  968. c = buf->head->iov_base + base;
  969. avail_here = min_t(unsigned int, todo,
  970. buf->head->iov_len - base);
  971. todo -= avail_here;
  972. while (avail_here >= desc->elem_size) {
  973. err = desc->xcode(desc, c);
  974. if (err)
  975. goto out;
  976. c += desc->elem_size;
  977. avail_here -= desc->elem_size;
  978. }
  979. if (avail_here) {
  980. if (!elem) {
  981. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  982. err = -ENOMEM;
  983. if (!elem)
  984. goto out;
  985. }
  986. if (encode) {
  987. err = desc->xcode(desc, elem);
  988. if (err)
  989. goto out;
  990. memcpy(c, elem, avail_here);
  991. } else
  992. memcpy(elem, c, avail_here);
  993. copied = avail_here;
  994. }
  995. base = buf->head->iov_len; /* align to start of pages */
  996. }
  997. /* process pages array */
  998. base -= buf->head->iov_len;
  999. if (todo && base < buf->page_len) {
  1000. unsigned int avail_page;
  1001. avail_here = min(todo, buf->page_len - base);
  1002. todo -= avail_here;
  1003. base += buf->page_base;
  1004. ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
  1005. base &= ~PAGE_CACHE_MASK;
  1006. avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
  1007. avail_here);
  1008. c = kmap(*ppages) + base;
  1009. while (avail_here) {
  1010. avail_here -= avail_page;
  1011. if (copied || avail_page < desc->elem_size) {
  1012. unsigned int l = min(avail_page,
  1013. desc->elem_size - copied);
  1014. if (!elem) {
  1015. elem = kmalloc(desc->elem_size,
  1016. GFP_KERNEL);
  1017. err = -ENOMEM;
  1018. if (!elem)
  1019. goto out;
  1020. }
  1021. if (encode) {
  1022. if (!copied) {
  1023. err = desc->xcode(desc, elem);
  1024. if (err)
  1025. goto out;
  1026. }
  1027. memcpy(c, elem + copied, l);
  1028. copied += l;
  1029. if (copied == desc->elem_size)
  1030. copied = 0;
  1031. } else {
  1032. memcpy(elem + copied, c, l);
  1033. copied += l;
  1034. if (copied == desc->elem_size) {
  1035. err = desc->xcode(desc, elem);
  1036. if (err)
  1037. goto out;
  1038. copied = 0;
  1039. }
  1040. }
  1041. avail_page -= l;
  1042. c += l;
  1043. }
  1044. while (avail_page >= desc->elem_size) {
  1045. err = desc->xcode(desc, c);
  1046. if (err)
  1047. goto out;
  1048. c += desc->elem_size;
  1049. avail_page -= desc->elem_size;
  1050. }
  1051. if (avail_page) {
  1052. unsigned int l = min(avail_page,
  1053. desc->elem_size - copied);
  1054. if (!elem) {
  1055. elem = kmalloc(desc->elem_size,
  1056. GFP_KERNEL);
  1057. err = -ENOMEM;
  1058. if (!elem)
  1059. goto out;
  1060. }
  1061. if (encode) {
  1062. if (!copied) {
  1063. err = desc->xcode(desc, elem);
  1064. if (err)
  1065. goto out;
  1066. }
  1067. memcpy(c, elem + copied, l);
  1068. copied += l;
  1069. if (copied == desc->elem_size)
  1070. copied = 0;
  1071. } else {
  1072. memcpy(elem + copied, c, l);
  1073. copied += l;
  1074. if (copied == desc->elem_size) {
  1075. err = desc->xcode(desc, elem);
  1076. if (err)
  1077. goto out;
  1078. copied = 0;
  1079. }
  1080. }
  1081. }
  1082. if (avail_here) {
  1083. kunmap(*ppages);
  1084. ppages++;
  1085. c = kmap(*ppages);
  1086. }
  1087. avail_page = min(avail_here,
  1088. (unsigned int) PAGE_CACHE_SIZE);
  1089. }
  1090. base = buf->page_len; /* align to start of tail */
  1091. }
  1092. /* process tail */
  1093. base -= buf->page_len;
  1094. if (todo) {
  1095. c = buf->tail->iov_base + base;
  1096. if (copied) {
  1097. unsigned int l = desc->elem_size - copied;
  1098. if (encode)
  1099. memcpy(c, elem + copied, l);
  1100. else {
  1101. memcpy(elem + copied, c, l);
  1102. err = desc->xcode(desc, elem);
  1103. if (err)
  1104. goto out;
  1105. }
  1106. todo -= l;
  1107. c += l;
  1108. }
  1109. while (todo) {
  1110. err = desc->xcode(desc, c);
  1111. if (err)
  1112. goto out;
  1113. c += desc->elem_size;
  1114. todo -= desc->elem_size;
  1115. }
  1116. }
  1117. err = 0;
  1118. out:
  1119. kfree(elem);
  1120. if (ppages)
  1121. kunmap(*ppages);
  1122. return err;
  1123. }
  1124. int
  1125. xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
  1126. struct xdr_array2_desc *desc)
  1127. {
  1128. if (base >= buf->len)
  1129. return -EINVAL;
  1130. return xdr_xcode_array2(buf, base, desc, 0);
  1131. }
  1132. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1133. int
  1134. xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
  1135. struct xdr_array2_desc *desc)
  1136. {
  1137. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1138. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1139. return -EINVAL;
  1140. return xdr_xcode_array2(buf, base, desc, 1);
  1141. }
  1142. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1143. int
  1144. xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
  1145. int (*actor)(struct scatterlist *, void *), void *data)
  1146. {
  1147. int i, ret = 0;
  1148. unsigned int page_len, thislen, page_offset;
  1149. struct scatterlist sg[1];
  1150. sg_init_table(sg, 1);
  1151. if (offset >= buf->head[0].iov_len) {
  1152. offset -= buf->head[0].iov_len;
  1153. } else {
  1154. thislen = buf->head[0].iov_len - offset;
  1155. if (thislen > len)
  1156. thislen = len;
  1157. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1158. ret = actor(sg, data);
  1159. if (ret)
  1160. goto out;
  1161. offset = 0;
  1162. len -= thislen;
  1163. }
  1164. if (len == 0)
  1165. goto out;
  1166. if (offset >= buf->page_len) {
  1167. offset -= buf->page_len;
  1168. } else {
  1169. page_len = buf->page_len - offset;
  1170. if (page_len > len)
  1171. page_len = len;
  1172. len -= page_len;
  1173. page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
  1174. i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
  1175. thislen = PAGE_CACHE_SIZE - page_offset;
  1176. do {
  1177. if (thislen > page_len)
  1178. thislen = page_len;
  1179. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1180. ret = actor(sg, data);
  1181. if (ret)
  1182. goto out;
  1183. page_len -= thislen;
  1184. i++;
  1185. page_offset = 0;
  1186. thislen = PAGE_CACHE_SIZE;
  1187. } while (page_len != 0);
  1188. offset = 0;
  1189. }
  1190. if (len == 0)
  1191. goto out;
  1192. if (offset < buf->tail[0].iov_len) {
  1193. thislen = buf->tail[0].iov_len - offset;
  1194. if (thislen > len)
  1195. thislen = len;
  1196. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1197. ret = actor(sg, data);
  1198. len -= thislen;
  1199. }
  1200. if (len != 0)
  1201. ret = -EINVAL;
  1202. out:
  1203. return ret;
  1204. }
  1205. EXPORT_SYMBOL_GPL(xdr_process_buf);