xdr.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/net/sunrpc/xdr.c
  4. *
  5. * Generic XDR support.
  6. *
  7. * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/types.h>
  12. #include <linux/string.h>
  13. #include <linux/kernel.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/errno.h>
  16. #include <linux/sunrpc/xdr.h>
  17. #include <linux/sunrpc/msg_prot.h>
  18. #include <linux/bvec.h>
  19. #include <trace/events/sunrpc.h>
  20. static void _copy_to_pages(struct page **, size_t, const char *, size_t);
  21. /*
  22. * XDR functions for basic NFS types
  23. */
  24. __be32 *
  25. xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  26. {
  27. unsigned int quadlen = XDR_QUADLEN(obj->len);
  28. p[quadlen] = 0; /* zero trailing bytes */
  29. *p++ = cpu_to_be32(obj->len);
  30. memcpy(p, obj->data, obj->len);
  31. return p + XDR_QUADLEN(obj->len);
  32. }
  33. EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  34. __be32 *
  35. xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  36. {
  37. unsigned int len;
  38. if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  39. return NULL;
  40. obj->len = len;
  41. obj->data = (u8 *) p;
  42. return p + XDR_QUADLEN(len);
  43. }
  44. EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  45. /**
  46. * xdr_encode_opaque_fixed - Encode fixed length opaque data
  47. * @p: pointer to current position in XDR buffer.
  48. * @ptr: pointer to data to encode (or NULL)
  49. * @nbytes: size of data.
  50. *
  51. * Copy the array of data of length nbytes at ptr to the XDR buffer
  52. * at position p, then align to the next 32-bit boundary by padding
  53. * with zero bytes (see RFC1832).
  54. * Note: if ptr is NULL, only the padding is performed.
  55. *
  56. * Returns the updated current XDR buffer position
  57. *
  58. */
  59. __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  60. {
  61. if (likely(nbytes != 0)) {
  62. unsigned int quadlen = XDR_QUADLEN(nbytes);
  63. unsigned int padding = (quadlen << 2) - nbytes;
  64. if (ptr != NULL)
  65. memcpy(p, ptr, nbytes);
  66. if (padding != 0)
  67. memset((char *)p + nbytes, 0, padding);
  68. p += quadlen;
  69. }
  70. return p;
  71. }
  72. EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  73. /**
  74. * xdr_encode_opaque - Encode variable length opaque data
  75. * @p: pointer to current position in XDR buffer.
  76. * @ptr: pointer to data to encode (or NULL)
  77. * @nbytes: size of data.
  78. *
  79. * Returns the updated current XDR buffer position
  80. */
  81. __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  82. {
  83. *p++ = cpu_to_be32(nbytes);
  84. return xdr_encode_opaque_fixed(p, ptr, nbytes);
  85. }
  86. EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  87. __be32 *
  88. xdr_encode_string(__be32 *p, const char *string)
  89. {
  90. return xdr_encode_array(p, string, strlen(string));
  91. }
  92. EXPORT_SYMBOL_GPL(xdr_encode_string);
  93. __be32 *
  94. xdr_decode_string_inplace(__be32 *p, char **sp,
  95. unsigned int *lenp, unsigned int maxlen)
  96. {
  97. u32 len;
  98. len = be32_to_cpu(*p++);
  99. if (len > maxlen)
  100. return NULL;
  101. *lenp = len;
  102. *sp = (char *) p;
  103. return p + XDR_QUADLEN(len);
  104. }
  105. EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
  106. /**
  107. * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
  108. * @buf: XDR buffer where string resides
  109. * @len: length of string, in bytes
  110. *
  111. */
  112. void xdr_terminate_string(const struct xdr_buf *buf, const u32 len)
  113. {
  114. char *kaddr;
  115. kaddr = kmap_atomic(buf->pages[0]);
  116. kaddr[buf->page_base + len] = '\0';
  117. kunmap_atomic(kaddr);
  118. }
  119. EXPORT_SYMBOL_GPL(xdr_terminate_string);
  120. size_t xdr_buf_pagecount(const struct xdr_buf *buf)
  121. {
  122. if (!buf->page_len)
  123. return 0;
  124. return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  125. }
  126. int
  127. xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
  128. {
  129. size_t i, n = xdr_buf_pagecount(buf);
  130. if (n != 0 && buf->bvec == NULL) {
  131. buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
  132. if (!buf->bvec)
  133. return -ENOMEM;
  134. for (i = 0; i < n; i++) {
  135. buf->bvec[i].bv_page = buf->pages[i];
  136. buf->bvec[i].bv_len = PAGE_SIZE;
  137. buf->bvec[i].bv_offset = 0;
  138. }
  139. }
  140. return 0;
  141. }
  142. void
  143. xdr_free_bvec(struct xdr_buf *buf)
  144. {
  145. kfree(buf->bvec);
  146. buf->bvec = NULL;
  147. }
  148. /**
  149. * xdr_inline_pages - Prepare receive buffer for a large reply
  150. * @xdr: xdr_buf into which reply will be placed
  151. * @offset: expected offset where data payload will start, in bytes
  152. * @pages: vector of struct page pointers
  153. * @base: offset in first page where receive should start, in bytes
  154. * @len: expected size of the upper layer data payload, in bytes
  155. *
  156. */
  157. void
  158. xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
  159. struct page **pages, unsigned int base, unsigned int len)
  160. {
  161. struct kvec *head = xdr->head;
  162. struct kvec *tail = xdr->tail;
  163. char *buf = (char *)head->iov_base;
  164. unsigned int buflen = head->iov_len;
  165. head->iov_len = offset;
  166. xdr->pages = pages;
  167. xdr->page_base = base;
  168. xdr->page_len = len;
  169. tail->iov_base = buf + offset;
  170. tail->iov_len = buflen - offset;
  171. xdr->buflen += len;
  172. }
  173. EXPORT_SYMBOL_GPL(xdr_inline_pages);
  174. /*
  175. * Helper routines for doing 'memmove' like operations on a struct xdr_buf
  176. */
  177. /**
  178. * _shift_data_left_pages
  179. * @pages: vector of pages containing both the source and dest memory area.
  180. * @pgto_base: page vector address of destination
  181. * @pgfrom_base: page vector address of source
  182. * @len: number of bytes to copy
  183. *
  184. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  185. * the same way:
  186. * if a memory area starts at byte 'base' in page 'pages[i]',
  187. * then its address is given as (i << PAGE_CACHE_SHIFT) + base
  188. * Alse note: pgto_base must be < pgfrom_base, but the memory areas
  189. * they point to may overlap.
  190. */
  191. static void
  192. _shift_data_left_pages(struct page **pages, size_t pgto_base,
  193. size_t pgfrom_base, size_t len)
  194. {
  195. struct page **pgfrom, **pgto;
  196. char *vfrom, *vto;
  197. size_t copy;
  198. BUG_ON(pgfrom_base <= pgto_base);
  199. if (!len)
  200. return;
  201. pgto = pages + (pgto_base >> PAGE_SHIFT);
  202. pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
  203. pgto_base &= ~PAGE_MASK;
  204. pgfrom_base &= ~PAGE_MASK;
  205. do {
  206. if (pgto_base >= PAGE_SIZE) {
  207. pgto_base = 0;
  208. pgto++;
  209. }
  210. if (pgfrom_base >= PAGE_SIZE){
  211. pgfrom_base = 0;
  212. pgfrom++;
  213. }
  214. copy = len;
  215. if (copy > (PAGE_SIZE - pgto_base))
  216. copy = PAGE_SIZE - pgto_base;
  217. if (copy > (PAGE_SIZE - pgfrom_base))
  218. copy = PAGE_SIZE - pgfrom_base;
  219. vto = kmap_atomic(*pgto);
  220. if (*pgto != *pgfrom) {
  221. vfrom = kmap_atomic(*pgfrom);
  222. memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
  223. kunmap_atomic(vfrom);
  224. } else
  225. memmove(vto + pgto_base, vto + pgfrom_base, copy);
  226. flush_dcache_page(*pgto);
  227. kunmap_atomic(vto);
  228. pgto_base += copy;
  229. pgfrom_base += copy;
  230. } while ((len -= copy) != 0);
  231. }
  232. /**
  233. * _shift_data_right_pages
  234. * @pages: vector of pages containing both the source and dest memory area.
  235. * @pgto_base: page vector address of destination
  236. * @pgfrom_base: page vector address of source
  237. * @len: number of bytes to copy
  238. *
  239. * Note: the addresses pgto_base and pgfrom_base are both calculated in
  240. * the same way:
  241. * if a memory area starts at byte 'base' in page 'pages[i]',
  242. * then its address is given as (i << PAGE_SHIFT) + base
  243. * Also note: pgfrom_base must be < pgto_base, but the memory areas
  244. * they point to may overlap.
  245. */
  246. static void
  247. _shift_data_right_pages(struct page **pages, size_t pgto_base,
  248. size_t pgfrom_base, size_t len)
  249. {
  250. struct page **pgfrom, **pgto;
  251. char *vfrom, *vto;
  252. size_t copy;
  253. BUG_ON(pgto_base <= pgfrom_base);
  254. if (!len)
  255. return;
  256. pgto_base += len;
  257. pgfrom_base += len;
  258. pgto = pages + (pgto_base >> PAGE_SHIFT);
  259. pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
  260. pgto_base &= ~PAGE_MASK;
  261. pgfrom_base &= ~PAGE_MASK;
  262. do {
  263. /* Are any pointers crossing a page boundary? */
  264. if (pgto_base == 0) {
  265. pgto_base = PAGE_SIZE;
  266. pgto--;
  267. }
  268. if (pgfrom_base == 0) {
  269. pgfrom_base = PAGE_SIZE;
  270. pgfrom--;
  271. }
  272. copy = len;
  273. if (copy > pgto_base)
  274. copy = pgto_base;
  275. if (copy > pgfrom_base)
  276. copy = pgfrom_base;
  277. pgto_base -= copy;
  278. pgfrom_base -= copy;
  279. vto = kmap_atomic(*pgto);
  280. if (*pgto != *pgfrom) {
  281. vfrom = kmap_atomic(*pgfrom);
  282. memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
  283. kunmap_atomic(vfrom);
  284. } else
  285. memmove(vto + pgto_base, vto + pgfrom_base, copy);
  286. flush_dcache_page(*pgto);
  287. kunmap_atomic(vto);
  288. } while ((len -= copy) != 0);
  289. }
  290. /**
  291. * _copy_to_pages
  292. * @pages: array of pages
  293. * @pgbase: page vector address of destination
  294. * @p: pointer to source data
  295. * @len: length
  296. *
  297. * Copies data from an arbitrary memory location into an array of pages
  298. * The copy is assumed to be non-overlapping.
  299. */
  300. static void
  301. _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
  302. {
  303. struct page **pgto;
  304. char *vto;
  305. size_t copy;
  306. if (!len)
  307. return;
  308. pgto = pages + (pgbase >> PAGE_SHIFT);
  309. pgbase &= ~PAGE_MASK;
  310. for (;;) {
  311. copy = PAGE_SIZE - pgbase;
  312. if (copy > len)
  313. copy = len;
  314. vto = kmap_atomic(*pgto);
  315. memcpy(vto + pgbase, p, copy);
  316. kunmap_atomic(vto);
  317. len -= copy;
  318. if (len == 0)
  319. break;
  320. pgbase += copy;
  321. if (pgbase == PAGE_SIZE) {
  322. flush_dcache_page(*pgto);
  323. pgbase = 0;
  324. pgto++;
  325. }
  326. p += copy;
  327. }
  328. flush_dcache_page(*pgto);
  329. }
  330. /**
  331. * _copy_from_pages
  332. * @p: pointer to destination
  333. * @pages: array of pages
  334. * @pgbase: offset of source data
  335. * @len: length
  336. *
  337. * Copies data into an arbitrary memory location from an array of pages
  338. * The copy is assumed to be non-overlapping.
  339. */
  340. void
  341. _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
  342. {
  343. struct page **pgfrom;
  344. char *vfrom;
  345. size_t copy;
  346. if (!len)
  347. return;
  348. pgfrom = pages + (pgbase >> PAGE_SHIFT);
  349. pgbase &= ~PAGE_MASK;
  350. do {
  351. copy = PAGE_SIZE - pgbase;
  352. if (copy > len)
  353. copy = len;
  354. vfrom = kmap_atomic(*pgfrom);
  355. memcpy(p, vfrom + pgbase, copy);
  356. kunmap_atomic(vfrom);
  357. pgbase += copy;
  358. if (pgbase == PAGE_SIZE) {
  359. pgbase = 0;
  360. pgfrom++;
  361. }
  362. p += copy;
  363. } while ((len -= copy) != 0);
  364. }
  365. EXPORT_SYMBOL_GPL(_copy_from_pages);
  366. static void xdr_buf_iov_zero(const struct kvec *iov, unsigned int base,
  367. unsigned int len)
  368. {
  369. if (base >= iov->iov_len)
  370. return;
  371. if (len > iov->iov_len - base)
  372. len = iov->iov_len - base;
  373. memset(iov->iov_base + base, 0, len);
  374. }
  375. /**
  376. * xdr_buf_pages_zero
  377. * @buf: xdr_buf
  378. * @pgbase: beginning offset
  379. * @len: length
  380. */
  381. static void xdr_buf_pages_zero(const struct xdr_buf *buf, unsigned int pgbase,
  382. unsigned int len)
  383. {
  384. struct page **pages = buf->pages;
  385. struct page **page;
  386. char *vpage;
  387. unsigned int zero;
  388. if (!len)
  389. return;
  390. if (pgbase >= buf->page_len) {
  391. xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len);
  392. return;
  393. }
  394. if (pgbase + len > buf->page_len) {
  395. xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len);
  396. len = buf->page_len - pgbase;
  397. }
  398. pgbase += buf->page_base;
  399. page = pages + (pgbase >> PAGE_SHIFT);
  400. pgbase &= ~PAGE_MASK;
  401. do {
  402. zero = PAGE_SIZE - pgbase;
  403. if (zero > len)
  404. zero = len;
  405. vpage = kmap_atomic(*page);
  406. memset(vpage + pgbase, 0, zero);
  407. kunmap_atomic(vpage);
  408. flush_dcache_page(*page);
  409. pgbase = 0;
  410. page++;
  411. } while ((len -= zero) != 0);
  412. }
  413. static unsigned int xdr_buf_pages_fill_sparse(const struct xdr_buf *buf,
  414. unsigned int buflen, gfp_t gfp)
  415. {
  416. unsigned int i, npages, pagelen;
  417. if (!(buf->flags & XDRBUF_SPARSE_PAGES))
  418. return buflen;
  419. if (buflen <= buf->head->iov_len)
  420. return buflen;
  421. pagelen = buflen - buf->head->iov_len;
  422. if (pagelen > buf->page_len)
  423. pagelen = buf->page_len;
  424. npages = (pagelen + buf->page_base + PAGE_SIZE - 1) >> PAGE_SHIFT;
  425. for (i = 0; i < npages; i++) {
  426. if (!buf->pages[i])
  427. continue;
  428. buf->pages[i] = alloc_page(gfp);
  429. if (likely(buf->pages[i]))
  430. continue;
  431. buflen -= pagelen;
  432. pagelen = i << PAGE_SHIFT;
  433. if (pagelen > buf->page_base)
  434. buflen += pagelen - buf->page_base;
  435. break;
  436. }
  437. return buflen;
  438. }
  439. static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len)
  440. {
  441. struct kvec *head = buf->head;
  442. struct kvec *tail = buf->tail;
  443. unsigned int sum = head->iov_len + buf->page_len + tail->iov_len;
  444. unsigned int free_space, newlen;
  445. if (sum > buf->len) {
  446. free_space = min_t(unsigned int, sum - buf->len, len);
  447. newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space,
  448. GFP_KERNEL);
  449. free_space = newlen - buf->len;
  450. buf->len = newlen;
  451. len -= free_space;
  452. if (!len)
  453. return;
  454. }
  455. if (buf->buflen > sum) {
  456. /* Expand the tail buffer */
  457. free_space = min_t(unsigned int, buf->buflen - sum, len);
  458. tail->iov_len += free_space;
  459. buf->len += free_space;
  460. }
  461. }
  462. static void xdr_buf_tail_copy_right(const struct xdr_buf *buf,
  463. unsigned int base, unsigned int len,
  464. unsigned int shift)
  465. {
  466. const struct kvec *tail = buf->tail;
  467. unsigned int to = base + shift;
  468. if (to >= tail->iov_len)
  469. return;
  470. if (len + to > tail->iov_len)
  471. len = tail->iov_len - to;
  472. memmove(tail->iov_base + to, tail->iov_base + base, len);
  473. }
  474. static void xdr_buf_pages_copy_right(const struct xdr_buf *buf,
  475. unsigned int base, unsigned int len,
  476. unsigned int shift)
  477. {
  478. const struct kvec *tail = buf->tail;
  479. unsigned int to = base + shift;
  480. unsigned int pglen = 0;
  481. unsigned int talen = 0, tato = 0;
  482. if (base >= buf->page_len)
  483. return;
  484. if (len > buf->page_len - base)
  485. len = buf->page_len - base;
  486. if (to >= buf->page_len) {
  487. tato = to - buf->page_len;
  488. if (tail->iov_len >= len + tato)
  489. talen = len;
  490. else if (tail->iov_len > tato)
  491. talen = tail->iov_len - tato;
  492. } else if (len + to >= buf->page_len) {
  493. pglen = buf->page_len - to;
  494. talen = len - pglen;
  495. if (talen > tail->iov_len)
  496. talen = tail->iov_len;
  497. } else
  498. pglen = len;
  499. _copy_from_pages(tail->iov_base + tato, buf->pages,
  500. buf->page_base + base + pglen, talen);
  501. _shift_data_right_pages(buf->pages, buf->page_base + to,
  502. buf->page_base + base, pglen);
  503. }
  504. static void xdr_buf_head_copy_right(const struct xdr_buf *buf,
  505. unsigned int base, unsigned int len,
  506. unsigned int shift)
  507. {
  508. const struct kvec *head = buf->head;
  509. const struct kvec *tail = buf->tail;
  510. unsigned int to = base + shift;
  511. unsigned int pglen = 0, pgto = 0;
  512. unsigned int talen = 0, tato = 0;
  513. if (base >= head->iov_len)
  514. return;
  515. if (len > head->iov_len - base)
  516. len = head->iov_len - base;
  517. if (to >= buf->page_len + head->iov_len) {
  518. tato = to - buf->page_len - head->iov_len;
  519. talen = len;
  520. } else if (to >= head->iov_len) {
  521. pgto = to - head->iov_len;
  522. pglen = len;
  523. if (pgto + pglen > buf->page_len) {
  524. talen = pgto + pglen - buf->page_len;
  525. pglen -= talen;
  526. }
  527. } else {
  528. pglen = len - to;
  529. if (pglen > buf->page_len) {
  530. talen = pglen - buf->page_len;
  531. pglen = buf->page_len;
  532. }
  533. }
  534. len -= talen;
  535. base += len;
  536. if (talen + tato > tail->iov_len)
  537. talen = tail->iov_len > tato ? tail->iov_len - tato : 0;
  538. memcpy(tail->iov_base + tato, head->iov_base + base, talen);
  539. len -= pglen;
  540. base -= pglen;
  541. _copy_to_pages(buf->pages, buf->page_base + pgto, head->iov_base + base,
  542. pglen);
  543. base -= len;
  544. memmove(head->iov_base + to, head->iov_base + base, len);
  545. }
  546. static void xdr_buf_tail_shift_right(const struct xdr_buf *buf,
  547. unsigned int base, unsigned int len,
  548. unsigned int shift)
  549. {
  550. const struct kvec *tail = buf->tail;
  551. if (base >= tail->iov_len || !shift || !len)
  552. return;
  553. xdr_buf_tail_copy_right(buf, base, len, shift);
  554. }
  555. static void xdr_buf_pages_shift_right(const struct xdr_buf *buf,
  556. unsigned int base, unsigned int len,
  557. unsigned int shift)
  558. {
  559. if (!shift || !len)
  560. return;
  561. if (base >= buf->page_len) {
  562. xdr_buf_tail_shift_right(buf, base - buf->page_len, len, shift);
  563. return;
  564. }
  565. if (base + len > buf->page_len)
  566. xdr_buf_tail_shift_right(buf, 0, base + len - buf->page_len,
  567. shift);
  568. xdr_buf_pages_copy_right(buf, base, len, shift);
  569. }
  570. static void xdr_buf_head_shift_right(const struct xdr_buf *buf,
  571. unsigned int base, unsigned int len,
  572. unsigned int shift)
  573. {
  574. const struct kvec *head = buf->head;
  575. if (!shift)
  576. return;
  577. if (base >= head->iov_len) {
  578. xdr_buf_pages_shift_right(buf, head->iov_len - base, len,
  579. shift);
  580. return;
  581. }
  582. if (base + len > head->iov_len)
  583. xdr_buf_pages_shift_right(buf, 0, base + len - head->iov_len,
  584. shift);
  585. xdr_buf_head_copy_right(buf, base, len, shift);
  586. }
  587. static void xdr_buf_tail_copy_left(const struct xdr_buf *buf, unsigned int base,
  588. unsigned int len, unsigned int shift)
  589. {
  590. const struct kvec *tail = buf->tail;
  591. if (base >= tail->iov_len)
  592. return;
  593. if (len > tail->iov_len - base)
  594. len = tail->iov_len - base;
  595. /* Shift data into head */
  596. if (shift > buf->page_len + base) {
  597. const struct kvec *head = buf->head;
  598. unsigned int hdto =
  599. head->iov_len + buf->page_len + base - shift;
  600. unsigned int hdlen = len;
  601. if (WARN_ONCE(shift > head->iov_len + buf->page_len + base,
  602. "SUNRPC: Misaligned data.\n"))
  603. return;
  604. if (hdto + hdlen > head->iov_len)
  605. hdlen = head->iov_len - hdto;
  606. memcpy(head->iov_base + hdto, tail->iov_base + base, hdlen);
  607. base += hdlen;
  608. len -= hdlen;
  609. if (!len)
  610. return;
  611. }
  612. /* Shift data into pages */
  613. if (shift > base) {
  614. unsigned int pgto = buf->page_len + base - shift;
  615. unsigned int pglen = len;
  616. if (pgto + pglen > buf->page_len)
  617. pglen = buf->page_len - pgto;
  618. _copy_to_pages(buf->pages, buf->page_base + pgto,
  619. tail->iov_base + base, pglen);
  620. base += pglen;
  621. len -= pglen;
  622. if (!len)
  623. return;
  624. }
  625. memmove(tail->iov_base + base - shift, tail->iov_base + base, len);
  626. }
  627. static void xdr_buf_pages_copy_left(const struct xdr_buf *buf,
  628. unsigned int base, unsigned int len,
  629. unsigned int shift)
  630. {
  631. unsigned int pgto;
  632. if (base >= buf->page_len)
  633. return;
  634. if (len > buf->page_len - base)
  635. len = buf->page_len - base;
  636. /* Shift data into head */
  637. if (shift > base) {
  638. const struct kvec *head = buf->head;
  639. unsigned int hdto = head->iov_len + base - shift;
  640. unsigned int hdlen = len;
  641. if (WARN_ONCE(shift > head->iov_len + base,
  642. "SUNRPC: Misaligned data.\n"))
  643. return;
  644. if (hdto + hdlen > head->iov_len)
  645. hdlen = head->iov_len - hdto;
  646. _copy_from_pages(head->iov_base + hdto, buf->pages,
  647. buf->page_base + base, hdlen);
  648. base += hdlen;
  649. len -= hdlen;
  650. if (!len)
  651. return;
  652. }
  653. pgto = base - shift;
  654. _shift_data_left_pages(buf->pages, buf->page_base + pgto,
  655. buf->page_base + base, len);
  656. }
  657. static void xdr_buf_tail_shift_left(const struct xdr_buf *buf,
  658. unsigned int base, unsigned int len,
  659. unsigned int shift)
  660. {
  661. if (!shift || !len)
  662. return;
  663. xdr_buf_tail_copy_left(buf, base, len, shift);
  664. }
  665. static void xdr_buf_pages_shift_left(const struct xdr_buf *buf,
  666. unsigned int base, unsigned int len,
  667. unsigned int shift)
  668. {
  669. if (!shift || !len)
  670. return;
  671. if (base >= buf->page_len) {
  672. xdr_buf_tail_shift_left(buf, base - buf->page_len, len, shift);
  673. return;
  674. }
  675. xdr_buf_pages_copy_left(buf, base, len, shift);
  676. len += base;
  677. if (len <= buf->page_len)
  678. return;
  679. xdr_buf_tail_copy_left(buf, 0, len - buf->page_len, shift);
  680. }
  681. static void xdr_buf_head_shift_left(const struct xdr_buf *buf,
  682. unsigned int base, unsigned int len,
  683. unsigned int shift)
  684. {
  685. const struct kvec *head = buf->head;
  686. unsigned int bytes;
  687. if (!shift || !len)
  688. return;
  689. if (shift > base) {
  690. bytes = (shift - base);
  691. if (bytes >= len)
  692. return;
  693. base += bytes;
  694. len -= bytes;
  695. }
  696. if (base < head->iov_len) {
  697. bytes = min_t(unsigned int, len, head->iov_len - base);
  698. memmove(head->iov_base + (base - shift),
  699. head->iov_base + base, bytes);
  700. base += bytes;
  701. len -= bytes;
  702. }
  703. xdr_buf_pages_shift_left(buf, base - head->iov_len, len, shift);
  704. }
  705. /**
  706. * xdr_shrink_bufhead
  707. * @buf: xdr_buf
  708. * @len: new length of buf->head[0]
  709. *
  710. * Shrinks XDR buffer's header kvec buf->head[0], setting it to
  711. * 'len' bytes. The extra data is not lost, but is instead
  712. * moved into the inlined pages and/or the tail.
  713. */
  714. static unsigned int xdr_shrink_bufhead(struct xdr_buf *buf, unsigned int len)
  715. {
  716. struct kvec *head = buf->head;
  717. unsigned int shift, buflen = max(buf->len, len);
  718. WARN_ON_ONCE(len > head->iov_len);
  719. if (head->iov_len > buflen) {
  720. buf->buflen -= head->iov_len - buflen;
  721. head->iov_len = buflen;
  722. }
  723. if (len >= head->iov_len)
  724. return 0;
  725. shift = head->iov_len - len;
  726. xdr_buf_try_expand(buf, shift);
  727. xdr_buf_head_shift_right(buf, len, buflen - len, shift);
  728. head->iov_len = len;
  729. buf->buflen -= shift;
  730. buf->len -= shift;
  731. return shift;
  732. }
  733. /**
  734. * xdr_shrink_pagelen - shrinks buf->pages to @len bytes
  735. * @buf: xdr_buf
  736. * @len: new page buffer length
  737. *
  738. * The extra data is not lost, but is instead moved into buf->tail.
  739. * Returns the actual number of bytes moved.
  740. */
  741. static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len)
  742. {
  743. unsigned int shift, buflen = buf->len - buf->head->iov_len;
  744. WARN_ON_ONCE(len > buf->page_len);
  745. if (buf->head->iov_len >= buf->len || len > buflen)
  746. buflen = len;
  747. if (buf->page_len > buflen) {
  748. buf->buflen -= buf->page_len - buflen;
  749. buf->page_len = buflen;
  750. }
  751. if (len >= buf->page_len)
  752. return 0;
  753. shift = buf->page_len - len;
  754. xdr_buf_try_expand(buf, shift);
  755. xdr_buf_pages_shift_right(buf, len, buflen - len, shift);
  756. buf->page_len = len;
  757. buf->len -= shift;
  758. buf->buflen -= shift;
  759. return shift;
  760. }
  761. void
  762. xdr_shift_buf(struct xdr_buf *buf, size_t len)
  763. {
  764. xdr_shrink_bufhead(buf, buf->head->iov_len - len);
  765. }
  766. EXPORT_SYMBOL_GPL(xdr_shift_buf);
  767. /**
  768. * xdr_stream_pos - Return the current offset from the start of the xdr_stream
  769. * @xdr: pointer to struct xdr_stream
  770. */
  771. unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
  772. {
  773. return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
  774. }
  775. EXPORT_SYMBOL_GPL(xdr_stream_pos);
  776. static void xdr_stream_set_pos(struct xdr_stream *xdr, unsigned int pos)
  777. {
  778. unsigned int blen = xdr->buf->len;
  779. xdr->nwords = blen > pos ? XDR_QUADLEN(blen) - XDR_QUADLEN(pos) : 0;
  780. }
  781. static void xdr_stream_page_set_pos(struct xdr_stream *xdr, unsigned int pos)
  782. {
  783. xdr_stream_set_pos(xdr, pos + xdr->buf->head[0].iov_len);
  784. }
  785. /**
  786. * xdr_page_pos - Return the current offset from the start of the xdr pages
  787. * @xdr: pointer to struct xdr_stream
  788. */
  789. unsigned int xdr_page_pos(const struct xdr_stream *xdr)
  790. {
  791. unsigned int pos = xdr_stream_pos(xdr);
  792. WARN_ON(pos < xdr->buf->head[0].iov_len);
  793. return pos - xdr->buf->head[0].iov_len;
  794. }
  795. EXPORT_SYMBOL_GPL(xdr_page_pos);
  796. /**
  797. * xdr_init_encode - Initialize a struct xdr_stream for sending data.
  798. * @xdr: pointer to xdr_stream struct
  799. * @buf: pointer to XDR buffer in which to encode data
  800. * @p: current pointer inside XDR buffer
  801. * @rqst: pointer to controlling rpc_rqst, for debugging
  802. *
  803. * Note: at the moment the RPC client only passes the length of our
  804. * scratch buffer in the xdr_buf's header kvec. Previously this
  805. * meant we needed to call xdr_adjust_iovec() after encoding the
  806. * data. With the new scheme, the xdr_stream manages the details
  807. * of the buffer length, and takes care of adjusting the kvec
  808. * length for us.
  809. */
  810. void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
  811. struct rpc_rqst *rqst)
  812. {
  813. struct kvec *iov = buf->head;
  814. int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
  815. xdr_reset_scratch_buffer(xdr);
  816. BUG_ON(scratch_len < 0);
  817. xdr->buf = buf;
  818. xdr->iov = iov;
  819. xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
  820. xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
  821. BUG_ON(iov->iov_len > scratch_len);
  822. if (p != xdr->p && p != NULL) {
  823. size_t len;
  824. BUG_ON(p < xdr->p || p > xdr->end);
  825. len = (char *)p - (char *)xdr->p;
  826. xdr->p = p;
  827. buf->len += len;
  828. iov->iov_len += len;
  829. }
  830. xdr->rqst = rqst;
  831. }
  832. EXPORT_SYMBOL_GPL(xdr_init_encode);
  833. /**
  834. * xdr_init_encode_pages - Initialize an xdr_stream for encoding into pages
  835. * @xdr: pointer to xdr_stream struct
  836. * @buf: pointer to XDR buffer into which to encode data
  837. * @pages: list of pages to decode into
  838. * @rqst: pointer to controlling rpc_rqst, for debugging
  839. *
  840. */
  841. void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  842. struct page **pages, struct rpc_rqst *rqst)
  843. {
  844. xdr_reset_scratch_buffer(xdr);
  845. xdr->buf = buf;
  846. xdr->page_ptr = pages;
  847. xdr->iov = NULL;
  848. xdr->p = page_address(*pages);
  849. xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
  850. xdr->rqst = rqst;
  851. }
  852. EXPORT_SYMBOL_GPL(xdr_init_encode_pages);
  853. /**
  854. * __xdr_commit_encode - Ensure all data is written to buffer
  855. * @xdr: pointer to xdr_stream
  856. *
  857. * We handle encoding across page boundaries by giving the caller a
  858. * temporary location to write to, then later copying the data into
  859. * place; xdr_commit_encode does that copying.
  860. *
  861. * Normally the caller doesn't need to call this directly, as the
  862. * following xdr_reserve_space will do it. But an explicit call may be
  863. * required at the end of encoding, or any other time when the xdr_buf
  864. * data might be read.
  865. */
  866. void __xdr_commit_encode(struct xdr_stream *xdr)
  867. {
  868. size_t shift = xdr->scratch.iov_len;
  869. void *page;
  870. page = page_address(*xdr->page_ptr);
  871. memcpy(xdr->scratch.iov_base, page, shift);
  872. memmove(page, page + shift, (void *)xdr->p - page);
  873. xdr_reset_scratch_buffer(xdr);
  874. }
  875. EXPORT_SYMBOL_GPL(__xdr_commit_encode);
  876. /*
  877. * The buffer space to be reserved crosses the boundary between
  878. * xdr->buf->head and xdr->buf->pages, or between two pages
  879. * in xdr->buf->pages.
  880. */
  881. static noinline __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
  882. size_t nbytes)
  883. {
  884. int space_left;
  885. int frag1bytes, frag2bytes;
  886. void *p;
  887. if (nbytes > PAGE_SIZE)
  888. goto out_overflow; /* Bigger buffers require special handling */
  889. if (xdr->buf->len + nbytes > xdr->buf->buflen)
  890. goto out_overflow; /* Sorry, we're totally out of space */
  891. frag1bytes = (xdr->end - xdr->p) << 2;
  892. frag2bytes = nbytes - frag1bytes;
  893. if (xdr->iov)
  894. xdr->iov->iov_len += frag1bytes;
  895. else
  896. xdr->buf->page_len += frag1bytes;
  897. xdr->page_ptr++;
  898. xdr->iov = NULL;
  899. /*
  900. * If the last encode didn't end exactly on a page boundary, the
  901. * next one will straddle boundaries. Encode into the next
  902. * page, then copy it back later in xdr_commit_encode. We use
  903. * the "scratch" iov to track any temporarily unused fragment of
  904. * space at the end of the previous buffer:
  905. */
  906. xdr_set_scratch_buffer(xdr, xdr->p, frag1bytes);
  907. /*
  908. * xdr->p is where the next encode will start after
  909. * xdr_commit_encode() has shifted this one back:
  910. */
  911. p = page_address(*xdr->page_ptr);
  912. xdr->p = p + frag2bytes;
  913. space_left = xdr->buf->buflen - xdr->buf->len;
  914. if (space_left - frag1bytes >= PAGE_SIZE)
  915. xdr->end = p + PAGE_SIZE;
  916. else
  917. xdr->end = p + space_left - frag1bytes;
  918. xdr->buf->page_len += frag2bytes;
  919. xdr->buf->len += nbytes;
  920. return p;
  921. out_overflow:
  922. trace_rpc_xdr_overflow(xdr, nbytes);
  923. return NULL;
  924. }
  925. /**
  926. * xdr_reserve_space - Reserve buffer space for sending
  927. * @xdr: pointer to xdr_stream
  928. * @nbytes: number of bytes to reserve
  929. *
  930. * Checks that we have enough buffer space to encode 'nbytes' more
  931. * bytes of data. If so, update the total xdr_buf length, and
  932. * adjust the length of the current kvec.
  933. */
  934. __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
  935. {
  936. __be32 *p = xdr->p;
  937. __be32 *q;
  938. xdr_commit_encode(xdr);
  939. /* align nbytes on the next 32-bit boundary */
  940. nbytes += 3;
  941. nbytes &= ~3;
  942. q = p + (nbytes >> 2);
  943. if (unlikely(q > xdr->end || q < p))
  944. return xdr_get_next_encode_buffer(xdr, nbytes);
  945. xdr->p = q;
  946. if (xdr->iov)
  947. xdr->iov->iov_len += nbytes;
  948. else
  949. xdr->buf->page_len += nbytes;
  950. xdr->buf->len += nbytes;
  951. return p;
  952. }
  953. EXPORT_SYMBOL_GPL(xdr_reserve_space);
  954. /**
  955. * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending
  956. * @xdr: pointer to xdr_stream
  957. * @vec: pointer to a kvec array
  958. * @nbytes: number of bytes to reserve
  959. *
  960. * Reserves enough buffer space to encode 'nbytes' of data and stores the
  961. * pointers in 'vec'. The size argument passed to xdr_reserve_space() is
  962. * determined based on the number of bytes remaining in the current page to
  963. * avoid invalidating iov_base pointers when xdr_commit_encode() is called.
  964. */
  965. int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes)
  966. {
  967. int thislen;
  968. int v = 0;
  969. __be32 *p;
  970. /*
  971. * svcrdma requires every READ payload to start somewhere
  972. * in xdr->pages.
  973. */
  974. if (xdr->iov == xdr->buf->head) {
  975. xdr->iov = NULL;
  976. xdr->end = xdr->p;
  977. }
  978. while (nbytes) {
  979. thislen = xdr->buf->page_len % PAGE_SIZE;
  980. thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen);
  981. p = xdr_reserve_space(xdr, thislen);
  982. if (!p)
  983. return -EIO;
  984. vec[v].iov_base = p;
  985. vec[v].iov_len = thislen;
  986. v++;
  987. nbytes -= thislen;
  988. }
  989. return v;
  990. }
  991. EXPORT_SYMBOL_GPL(xdr_reserve_space_vec);
  992. /**
  993. * xdr_truncate_encode - truncate an encode buffer
  994. * @xdr: pointer to xdr_stream
  995. * @len: new length of buffer
  996. *
  997. * Truncates the xdr stream, so that xdr->buf->len == len,
  998. * and xdr->p points at offset len from the start of the buffer, and
  999. * head, tail, and page lengths are adjusted to correspond.
  1000. *
  1001. * If this means moving xdr->p to a different buffer, we assume that
  1002. * the end pointer should be set to the end of the current page,
  1003. * except in the case of the head buffer when we assume the head
  1004. * buffer's current length represents the end of the available buffer.
  1005. *
  1006. * This is *not* safe to use on a buffer that already has inlined page
  1007. * cache pages (as in a zero-copy server read reply), except for the
  1008. * simple case of truncating from one position in the tail to another.
  1009. *
  1010. */
  1011. void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
  1012. {
  1013. struct xdr_buf *buf = xdr->buf;
  1014. struct kvec *head = buf->head;
  1015. struct kvec *tail = buf->tail;
  1016. int fraglen;
  1017. int new;
  1018. if (len > buf->len) {
  1019. WARN_ON_ONCE(1);
  1020. return;
  1021. }
  1022. xdr_commit_encode(xdr);
  1023. fraglen = min_t(int, buf->len - len, tail->iov_len);
  1024. tail->iov_len -= fraglen;
  1025. buf->len -= fraglen;
  1026. if (tail->iov_len) {
  1027. xdr->p = tail->iov_base + tail->iov_len;
  1028. WARN_ON_ONCE(!xdr->end);
  1029. WARN_ON_ONCE(!xdr->iov);
  1030. return;
  1031. }
  1032. WARN_ON_ONCE(fraglen);
  1033. fraglen = min_t(int, buf->len - len, buf->page_len);
  1034. buf->page_len -= fraglen;
  1035. buf->len -= fraglen;
  1036. new = buf->page_base + buf->page_len;
  1037. xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
  1038. if (buf->page_len) {
  1039. xdr->p = page_address(*xdr->page_ptr);
  1040. xdr->end = (void *)xdr->p + PAGE_SIZE;
  1041. xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
  1042. WARN_ON_ONCE(xdr->iov);
  1043. return;
  1044. }
  1045. if (fraglen)
  1046. xdr->end = head->iov_base + head->iov_len;
  1047. /* (otherwise assume xdr->end is already set) */
  1048. xdr->page_ptr--;
  1049. head->iov_len = len;
  1050. buf->len = len;
  1051. xdr->p = head->iov_base + head->iov_len;
  1052. xdr->iov = buf->head;
  1053. }
  1054. EXPORT_SYMBOL(xdr_truncate_encode);
  1055. /**
  1056. * xdr_restrict_buflen - decrease available buffer space
  1057. * @xdr: pointer to xdr_stream
  1058. * @newbuflen: new maximum number of bytes available
  1059. *
  1060. * Adjust our idea of how much space is available in the buffer.
  1061. * If we've already used too much space in the buffer, returns -1.
  1062. * If the available space is already smaller than newbuflen, returns 0
  1063. * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
  1064. * and ensures xdr->end is set at most offset newbuflen from the start
  1065. * of the buffer.
  1066. */
  1067. int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
  1068. {
  1069. struct xdr_buf *buf = xdr->buf;
  1070. int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
  1071. int end_offset = buf->len + left_in_this_buf;
  1072. if (newbuflen < 0 || newbuflen < buf->len)
  1073. return -1;
  1074. if (newbuflen > buf->buflen)
  1075. return 0;
  1076. if (newbuflen < end_offset)
  1077. xdr->end = (void *)xdr->end + newbuflen - end_offset;
  1078. buf->buflen = newbuflen;
  1079. return 0;
  1080. }
  1081. EXPORT_SYMBOL(xdr_restrict_buflen);
  1082. /**
  1083. * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
  1084. * @xdr: pointer to xdr_stream
  1085. * @pages: list of pages
  1086. * @base: offset of first byte
  1087. * @len: length of data in bytes
  1088. *
  1089. */
  1090. void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
  1091. unsigned int len)
  1092. {
  1093. struct xdr_buf *buf = xdr->buf;
  1094. struct kvec *iov = buf->tail;
  1095. buf->pages = pages;
  1096. buf->page_base = base;
  1097. buf->page_len = len;
  1098. iov->iov_base = (char *)xdr->p;
  1099. iov->iov_len = 0;
  1100. xdr->iov = iov;
  1101. if (len & 3) {
  1102. unsigned int pad = 4 - (len & 3);
  1103. BUG_ON(xdr->p >= xdr->end);
  1104. iov->iov_base = (char *)xdr->p + (len & 3);
  1105. iov->iov_len += pad;
  1106. len += pad;
  1107. *xdr->p++ = 0;
  1108. }
  1109. buf->buflen += len;
  1110. buf->len += len;
  1111. }
  1112. EXPORT_SYMBOL_GPL(xdr_write_pages);
  1113. static unsigned int xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
  1114. unsigned int base, unsigned int len)
  1115. {
  1116. if (len > iov->iov_len)
  1117. len = iov->iov_len;
  1118. if (unlikely(base > len))
  1119. base = len;
  1120. xdr->p = (__be32*)(iov->iov_base + base);
  1121. xdr->end = (__be32*)(iov->iov_base + len);
  1122. xdr->iov = iov;
  1123. xdr->page_ptr = NULL;
  1124. return len - base;
  1125. }
  1126. static unsigned int xdr_set_tail_base(struct xdr_stream *xdr,
  1127. unsigned int base, unsigned int len)
  1128. {
  1129. struct xdr_buf *buf = xdr->buf;
  1130. xdr_stream_set_pos(xdr, base + buf->page_len + buf->head->iov_len);
  1131. return xdr_set_iov(xdr, buf->tail, base, len);
  1132. }
  1133. static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
  1134. unsigned int base, unsigned int len)
  1135. {
  1136. unsigned int pgnr;
  1137. unsigned int maxlen;
  1138. unsigned int pgoff;
  1139. unsigned int pgend;
  1140. void *kaddr;
  1141. maxlen = xdr->buf->page_len;
  1142. if (base >= maxlen)
  1143. return 0;
  1144. else
  1145. maxlen -= base;
  1146. if (len > maxlen)
  1147. len = maxlen;
  1148. xdr_stream_page_set_pos(xdr, base);
  1149. base += xdr->buf->page_base;
  1150. pgnr = base >> PAGE_SHIFT;
  1151. xdr->page_ptr = &xdr->buf->pages[pgnr];
  1152. kaddr = page_address(*xdr->page_ptr);
  1153. pgoff = base & ~PAGE_MASK;
  1154. xdr->p = (__be32*)(kaddr + pgoff);
  1155. pgend = pgoff + len;
  1156. if (pgend > PAGE_SIZE)
  1157. pgend = PAGE_SIZE;
  1158. xdr->end = (__be32*)(kaddr + pgend);
  1159. xdr->iov = NULL;
  1160. return len;
  1161. }
  1162. static void xdr_set_page(struct xdr_stream *xdr, unsigned int base,
  1163. unsigned int len)
  1164. {
  1165. if (xdr_set_page_base(xdr, base, len) == 0) {
  1166. base -= xdr->buf->page_len;
  1167. xdr_set_tail_base(xdr, base, len);
  1168. }
  1169. }
  1170. static void xdr_set_next_page(struct xdr_stream *xdr)
  1171. {
  1172. unsigned int newbase;
  1173. newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
  1174. newbase -= xdr->buf->page_base;
  1175. if (newbase < xdr->buf->page_len)
  1176. xdr_set_page_base(xdr, newbase, xdr_stream_remaining(xdr));
  1177. else
  1178. xdr_set_tail_base(xdr, 0, xdr_stream_remaining(xdr));
  1179. }
  1180. static bool xdr_set_next_buffer(struct xdr_stream *xdr)
  1181. {
  1182. if (xdr->page_ptr != NULL)
  1183. xdr_set_next_page(xdr);
  1184. else if (xdr->iov == xdr->buf->head)
  1185. xdr_set_page(xdr, 0, xdr_stream_remaining(xdr));
  1186. return xdr->p != xdr->end;
  1187. }
  1188. /**
  1189. * xdr_init_decode - Initialize an xdr_stream for decoding data.
  1190. * @xdr: pointer to xdr_stream struct
  1191. * @buf: pointer to XDR buffer from which to decode data
  1192. * @p: current pointer inside XDR buffer
  1193. * @rqst: pointer to controlling rpc_rqst, for debugging
  1194. */
  1195. void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
  1196. struct rpc_rqst *rqst)
  1197. {
  1198. xdr->buf = buf;
  1199. xdr_reset_scratch_buffer(xdr);
  1200. xdr->nwords = XDR_QUADLEN(buf->len);
  1201. if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
  1202. xdr_set_page_base(xdr, 0, buf->len) == 0)
  1203. xdr_set_iov(xdr, buf->tail, 0, buf->len);
  1204. if (p != NULL && p > xdr->p && xdr->end >= p) {
  1205. xdr->nwords -= p - xdr->p;
  1206. xdr->p = p;
  1207. }
  1208. xdr->rqst = rqst;
  1209. }
  1210. EXPORT_SYMBOL_GPL(xdr_init_decode);
  1211. /**
  1212. * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
  1213. * @xdr: pointer to xdr_stream struct
  1214. * @buf: pointer to XDR buffer from which to decode data
  1215. * @pages: list of pages to decode into
  1216. * @len: length in bytes of buffer in pages
  1217. */
  1218. void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
  1219. struct page **pages, unsigned int len)
  1220. {
  1221. memset(buf, 0, sizeof(*buf));
  1222. buf->pages = pages;
  1223. buf->page_len = len;
  1224. buf->buflen = len;
  1225. buf->len = len;
  1226. xdr_init_decode(xdr, buf, NULL, NULL);
  1227. }
  1228. EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
  1229. static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  1230. {
  1231. unsigned int nwords = XDR_QUADLEN(nbytes);
  1232. __be32 *p = xdr->p;
  1233. __be32 *q = p + nwords;
  1234. if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
  1235. return NULL;
  1236. xdr->p = q;
  1237. xdr->nwords -= nwords;
  1238. return p;
  1239. }
  1240. static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
  1241. {
  1242. __be32 *p;
  1243. char *cpdest = xdr->scratch.iov_base;
  1244. size_t cplen = (char *)xdr->end - (char *)xdr->p;
  1245. if (nbytes > xdr->scratch.iov_len)
  1246. goto out_overflow;
  1247. p = __xdr_inline_decode(xdr, cplen);
  1248. if (p == NULL)
  1249. return NULL;
  1250. memcpy(cpdest, p, cplen);
  1251. if (!xdr_set_next_buffer(xdr))
  1252. goto out_overflow;
  1253. cpdest += cplen;
  1254. nbytes -= cplen;
  1255. p = __xdr_inline_decode(xdr, nbytes);
  1256. if (p == NULL)
  1257. return NULL;
  1258. memcpy(cpdest, p, nbytes);
  1259. return xdr->scratch.iov_base;
  1260. out_overflow:
  1261. trace_rpc_xdr_overflow(xdr, nbytes);
  1262. return NULL;
  1263. }
  1264. /**
  1265. * xdr_inline_decode - Retrieve XDR data to decode
  1266. * @xdr: pointer to xdr_stream struct
  1267. * @nbytes: number of bytes of data to decode
  1268. *
  1269. * Check if the input buffer is long enough to enable us to decode
  1270. * 'nbytes' more bytes of data starting at the current position.
  1271. * If so return the current pointer, then update the current
  1272. * pointer position.
  1273. */
  1274. __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
  1275. {
  1276. __be32 *p;
  1277. if (unlikely(nbytes == 0))
  1278. return xdr->p;
  1279. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  1280. goto out_overflow;
  1281. p = __xdr_inline_decode(xdr, nbytes);
  1282. if (p != NULL)
  1283. return p;
  1284. return xdr_copy_to_scratch(xdr, nbytes);
  1285. out_overflow:
  1286. trace_rpc_xdr_overflow(xdr, nbytes);
  1287. return NULL;
  1288. }
  1289. EXPORT_SYMBOL_GPL(xdr_inline_decode);
  1290. static void xdr_realign_pages(struct xdr_stream *xdr)
  1291. {
  1292. struct xdr_buf *buf = xdr->buf;
  1293. struct kvec *iov = buf->head;
  1294. unsigned int cur = xdr_stream_pos(xdr);
  1295. unsigned int copied;
  1296. /* Realign pages to current pointer position */
  1297. if (iov->iov_len > cur) {
  1298. copied = xdr_shrink_bufhead(buf, cur);
  1299. trace_rpc_xdr_alignment(xdr, cur, copied);
  1300. xdr_set_page(xdr, 0, buf->page_len);
  1301. }
  1302. }
  1303. static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
  1304. {
  1305. struct xdr_buf *buf = xdr->buf;
  1306. unsigned int nwords = XDR_QUADLEN(len);
  1307. unsigned int copied;
  1308. if (xdr->nwords == 0)
  1309. return 0;
  1310. xdr_realign_pages(xdr);
  1311. if (nwords > xdr->nwords) {
  1312. nwords = xdr->nwords;
  1313. len = nwords << 2;
  1314. }
  1315. if (buf->page_len <= len)
  1316. len = buf->page_len;
  1317. else if (nwords < xdr->nwords) {
  1318. /* Truncate page data and move it into the tail */
  1319. copied = xdr_shrink_pagelen(buf, len);
  1320. trace_rpc_xdr_alignment(xdr, len, copied);
  1321. }
  1322. return len;
  1323. }
  1324. /**
  1325. * xdr_read_pages - align page-based XDR data to current pointer position
  1326. * @xdr: pointer to xdr_stream struct
  1327. * @len: number of bytes of page data
  1328. *
  1329. * Moves data beyond the current pointer position from the XDR head[] buffer
  1330. * into the page list. Any data that lies beyond current position + @len
  1331. * bytes is moved into the XDR tail[]. The xdr_stream current position is
  1332. * then advanced past that data to align to the next XDR object in the tail.
  1333. *
  1334. * Returns the number of XDR encoded bytes now contained in the pages
  1335. */
  1336. unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
  1337. {
  1338. unsigned int nwords = XDR_QUADLEN(len);
  1339. unsigned int base, end, pglen;
  1340. pglen = xdr_align_pages(xdr, nwords << 2);
  1341. if (pglen == 0)
  1342. return 0;
  1343. base = (nwords << 2) - pglen;
  1344. end = xdr_stream_remaining(xdr) - pglen;
  1345. xdr_set_tail_base(xdr, base, end);
  1346. return len <= pglen ? len : pglen;
  1347. }
  1348. EXPORT_SYMBOL_GPL(xdr_read_pages);
  1349. /**
  1350. * xdr_set_pagelen - Sets the length of the XDR pages
  1351. * @xdr: pointer to xdr_stream struct
  1352. * @len: new length of the XDR page data
  1353. *
  1354. * Either grows or shrinks the length of the xdr pages by setting pagelen to
  1355. * @len bytes. When shrinking, any extra data is moved into buf->tail, whereas
  1356. * when growing any data beyond the current pointer is moved into the tail.
  1357. *
  1358. * Returns True if the operation was successful, and False otherwise.
  1359. */
  1360. void xdr_set_pagelen(struct xdr_stream *xdr, unsigned int len)
  1361. {
  1362. struct xdr_buf *buf = xdr->buf;
  1363. size_t remaining = xdr_stream_remaining(xdr);
  1364. size_t base = 0;
  1365. if (len < buf->page_len) {
  1366. base = buf->page_len - len;
  1367. xdr_shrink_pagelen(buf, len);
  1368. } else {
  1369. xdr_buf_head_shift_right(buf, xdr_stream_pos(xdr),
  1370. buf->page_len, remaining);
  1371. if (len > buf->page_len)
  1372. xdr_buf_try_expand(buf, len - buf->page_len);
  1373. }
  1374. xdr_set_tail_base(xdr, base, remaining);
  1375. }
  1376. EXPORT_SYMBOL_GPL(xdr_set_pagelen);
  1377. /**
  1378. * xdr_enter_page - decode data from the XDR page
  1379. * @xdr: pointer to xdr_stream struct
  1380. * @len: number of bytes of page data
  1381. *
  1382. * Moves data beyond the current pointer position from the XDR head[] buffer
  1383. * into the page list. Any data that lies beyond current position + "len"
  1384. * bytes is moved into the XDR tail[]. The current pointer is then
  1385. * repositioned at the beginning of the first XDR page.
  1386. */
  1387. void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
  1388. {
  1389. len = xdr_align_pages(xdr, len);
  1390. /*
  1391. * Position current pointer at beginning of tail, and
  1392. * set remaining message length.
  1393. */
  1394. if (len != 0)
  1395. xdr_set_page_base(xdr, 0, len);
  1396. }
  1397. EXPORT_SYMBOL_GPL(xdr_enter_page);
  1398. static const struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
  1399. void xdr_buf_from_iov(const struct kvec *iov, struct xdr_buf *buf)
  1400. {
  1401. buf->head[0] = *iov;
  1402. buf->tail[0] = empty_iov;
  1403. buf->page_len = 0;
  1404. buf->buflen = buf->len = iov->iov_len;
  1405. }
  1406. EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
  1407. /**
  1408. * xdr_buf_subsegment - set subbuf to a portion of buf
  1409. * @buf: an xdr buffer
  1410. * @subbuf: the result buffer
  1411. * @base: beginning of range in bytes
  1412. * @len: length of range in bytes
  1413. *
  1414. * sets @subbuf to an xdr buffer representing the portion of @buf of
  1415. * length @len starting at offset @base.
  1416. *
  1417. * @buf and @subbuf may be pointers to the same struct xdr_buf.
  1418. *
  1419. * Returns -1 if base or length are out of bounds.
  1420. */
  1421. int xdr_buf_subsegment(const struct xdr_buf *buf, struct xdr_buf *subbuf,
  1422. unsigned int base, unsigned int len)
  1423. {
  1424. subbuf->buflen = subbuf->len = len;
  1425. if (base < buf->head[0].iov_len) {
  1426. subbuf->head[0].iov_base = buf->head[0].iov_base + base;
  1427. subbuf->head[0].iov_len = min_t(unsigned int, len,
  1428. buf->head[0].iov_len - base);
  1429. len -= subbuf->head[0].iov_len;
  1430. base = 0;
  1431. } else {
  1432. base -= buf->head[0].iov_len;
  1433. subbuf->head[0].iov_base = buf->head[0].iov_base;
  1434. subbuf->head[0].iov_len = 0;
  1435. }
  1436. if (base < buf->page_len) {
  1437. subbuf->page_len = min(buf->page_len - base, len);
  1438. base += buf->page_base;
  1439. subbuf->page_base = base & ~PAGE_MASK;
  1440. subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
  1441. len -= subbuf->page_len;
  1442. base = 0;
  1443. } else {
  1444. base -= buf->page_len;
  1445. subbuf->pages = buf->pages;
  1446. subbuf->page_base = 0;
  1447. subbuf->page_len = 0;
  1448. }
  1449. if (base < buf->tail[0].iov_len) {
  1450. subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
  1451. subbuf->tail[0].iov_len = min_t(unsigned int, len,
  1452. buf->tail[0].iov_len - base);
  1453. len -= subbuf->tail[0].iov_len;
  1454. base = 0;
  1455. } else {
  1456. base -= buf->tail[0].iov_len;
  1457. subbuf->tail[0].iov_base = buf->tail[0].iov_base;
  1458. subbuf->tail[0].iov_len = 0;
  1459. }
  1460. if (base || len)
  1461. return -1;
  1462. return 0;
  1463. }
  1464. EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
  1465. /**
  1466. * xdr_stream_subsegment - set @subbuf to a portion of @xdr
  1467. * @xdr: an xdr_stream set up for decoding
  1468. * @subbuf: the result buffer
  1469. * @nbytes: length of @xdr to extract, in bytes
  1470. *
  1471. * Sets up @subbuf to represent a portion of @xdr. The portion
  1472. * starts at the current offset in @xdr, and extends for a length
  1473. * of @nbytes. If this is successful, @xdr is advanced to the next
  1474. * XDR data item following that portion.
  1475. *
  1476. * Return values:
  1477. * %true: @subbuf has been initialized, and @xdr has been advanced.
  1478. * %false: a bounds error has occurred
  1479. */
  1480. bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
  1481. unsigned int nbytes)
  1482. {
  1483. unsigned int start = xdr_stream_pos(xdr);
  1484. unsigned int remaining, len;
  1485. /* Extract @subbuf and bounds-check the fn arguments */
  1486. if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
  1487. return false;
  1488. /* Advance @xdr by @nbytes */
  1489. for (remaining = nbytes; remaining;) {
  1490. if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
  1491. return false;
  1492. len = (char *)xdr->end - (char *)xdr->p;
  1493. if (remaining <= len) {
  1494. xdr->p = (__be32 *)((char *)xdr->p +
  1495. (remaining + xdr_pad_size(nbytes)));
  1496. break;
  1497. }
  1498. xdr->p = (__be32 *)((char *)xdr->p + len);
  1499. xdr->end = xdr->p;
  1500. remaining -= len;
  1501. }
  1502. xdr_stream_set_pos(xdr, start + nbytes);
  1503. return true;
  1504. }
  1505. EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
  1506. /**
  1507. * xdr_stream_move_subsegment - Move part of a stream to another position
  1508. * @xdr: the source xdr_stream
  1509. * @offset: the source offset of the segment
  1510. * @target: the target offset of the segment
  1511. * @length: the number of bytes to move
  1512. *
  1513. * Moves @length bytes from @offset to @target in the xdr_stream, overwriting
  1514. * anything in its space. Returns the number of bytes in the segment.
  1515. */
  1516. unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
  1517. unsigned int target, unsigned int length)
  1518. {
  1519. struct xdr_buf buf;
  1520. unsigned int shift;
  1521. if (offset < target) {
  1522. shift = target - offset;
  1523. if (xdr_buf_subsegment(xdr->buf, &buf, offset, shift + length) < 0)
  1524. return 0;
  1525. xdr_buf_head_shift_right(&buf, 0, length, shift);
  1526. } else if (offset > target) {
  1527. shift = offset - target;
  1528. if (xdr_buf_subsegment(xdr->buf, &buf, target, shift + length) < 0)
  1529. return 0;
  1530. xdr_buf_head_shift_left(&buf, shift, length, shift);
  1531. }
  1532. return length;
  1533. }
  1534. EXPORT_SYMBOL_GPL(xdr_stream_move_subsegment);
  1535. /**
  1536. * xdr_stream_zero - zero out a portion of an xdr_stream
  1537. * @xdr: an xdr_stream to zero out
  1538. * @offset: the starting point in the stream
  1539. * @length: the number of bytes to zero
  1540. */
  1541. unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
  1542. unsigned int length)
  1543. {
  1544. struct xdr_buf buf;
  1545. if (xdr_buf_subsegment(xdr->buf, &buf, offset, length) < 0)
  1546. return 0;
  1547. if (buf.head[0].iov_len)
  1548. xdr_buf_iov_zero(buf.head, 0, buf.head[0].iov_len);
  1549. if (buf.page_len > 0)
  1550. xdr_buf_pages_zero(&buf, 0, buf.page_len);
  1551. if (buf.tail[0].iov_len)
  1552. xdr_buf_iov_zero(buf.tail, 0, buf.tail[0].iov_len);
  1553. return length;
  1554. }
  1555. EXPORT_SYMBOL_GPL(xdr_stream_zero);
  1556. /**
  1557. * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
  1558. * @buf: buf to be trimmed
  1559. * @len: number of bytes to reduce "buf" by
  1560. *
  1561. * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
  1562. * that it's possible that we'll trim less than that amount if the xdr_buf is
  1563. * too small, or if (for instance) it's all in the head and the parser has
  1564. * already read too far into it.
  1565. */
  1566. void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
  1567. {
  1568. size_t cur;
  1569. unsigned int trim = len;
  1570. if (buf->tail[0].iov_len) {
  1571. cur = min_t(size_t, buf->tail[0].iov_len, trim);
  1572. buf->tail[0].iov_len -= cur;
  1573. trim -= cur;
  1574. if (!trim)
  1575. goto fix_len;
  1576. }
  1577. if (buf->page_len) {
  1578. cur = min_t(unsigned int, buf->page_len, trim);
  1579. buf->page_len -= cur;
  1580. trim -= cur;
  1581. if (!trim)
  1582. goto fix_len;
  1583. }
  1584. if (buf->head[0].iov_len) {
  1585. cur = min_t(size_t, buf->head[0].iov_len, trim);
  1586. buf->head[0].iov_len -= cur;
  1587. trim -= cur;
  1588. }
  1589. fix_len:
  1590. buf->len -= (len - trim);
  1591. }
  1592. EXPORT_SYMBOL_GPL(xdr_buf_trim);
  1593. static void __read_bytes_from_xdr_buf(const struct xdr_buf *subbuf,
  1594. void *obj, unsigned int len)
  1595. {
  1596. unsigned int this_len;
  1597. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  1598. memcpy(obj, subbuf->head[0].iov_base, this_len);
  1599. len -= this_len;
  1600. obj += this_len;
  1601. this_len = min_t(unsigned int, len, subbuf->page_len);
  1602. _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
  1603. len -= this_len;
  1604. obj += this_len;
  1605. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  1606. memcpy(obj, subbuf->tail[0].iov_base, this_len);
  1607. }
  1608. /* obj is assumed to point to allocated memory of size at least len: */
  1609. int read_bytes_from_xdr_buf(const struct xdr_buf *buf, unsigned int base,
  1610. void *obj, unsigned int len)
  1611. {
  1612. struct xdr_buf subbuf;
  1613. int status;
  1614. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  1615. if (status != 0)
  1616. return status;
  1617. __read_bytes_from_xdr_buf(&subbuf, obj, len);
  1618. return 0;
  1619. }
  1620. EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
  1621. static void __write_bytes_to_xdr_buf(const struct xdr_buf *subbuf,
  1622. void *obj, unsigned int len)
  1623. {
  1624. unsigned int this_len;
  1625. this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
  1626. memcpy(subbuf->head[0].iov_base, obj, this_len);
  1627. len -= this_len;
  1628. obj += this_len;
  1629. this_len = min_t(unsigned int, len, subbuf->page_len);
  1630. _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
  1631. len -= this_len;
  1632. obj += this_len;
  1633. this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
  1634. memcpy(subbuf->tail[0].iov_base, obj, this_len);
  1635. }
  1636. /* obj is assumed to point to allocated memory of size at least len: */
  1637. int write_bytes_to_xdr_buf(const struct xdr_buf *buf, unsigned int base,
  1638. void *obj, unsigned int len)
  1639. {
  1640. struct xdr_buf subbuf;
  1641. int status;
  1642. status = xdr_buf_subsegment(buf, &subbuf, base, len);
  1643. if (status != 0)
  1644. return status;
  1645. __write_bytes_to_xdr_buf(&subbuf, obj, len);
  1646. return 0;
  1647. }
  1648. EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
  1649. int xdr_decode_word(const struct xdr_buf *buf, unsigned int base, u32 *obj)
  1650. {
  1651. __be32 raw;
  1652. int status;
  1653. status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
  1654. if (status)
  1655. return status;
  1656. *obj = be32_to_cpu(raw);
  1657. return 0;
  1658. }
  1659. EXPORT_SYMBOL_GPL(xdr_decode_word);
  1660. int xdr_encode_word(const struct xdr_buf *buf, unsigned int base, u32 obj)
  1661. {
  1662. __be32 raw = cpu_to_be32(obj);
  1663. return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
  1664. }
  1665. EXPORT_SYMBOL_GPL(xdr_encode_word);
  1666. /* Returns 0 on success, or else a negative error code. */
  1667. static int xdr_xcode_array2(const struct xdr_buf *buf, unsigned int base,
  1668. struct xdr_array2_desc *desc, int encode)
  1669. {
  1670. char *elem = NULL, *c;
  1671. unsigned int copied = 0, todo, avail_here;
  1672. struct page **ppages = NULL;
  1673. int err;
  1674. if (encode) {
  1675. if (xdr_encode_word(buf, base, desc->array_len) != 0)
  1676. return -EINVAL;
  1677. } else {
  1678. if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
  1679. desc->array_len > desc->array_maxlen ||
  1680. (unsigned long) base + 4 + desc->array_len *
  1681. desc->elem_size > buf->len)
  1682. return -EINVAL;
  1683. }
  1684. base += 4;
  1685. if (!desc->xcode)
  1686. return 0;
  1687. todo = desc->array_len * desc->elem_size;
  1688. /* process head */
  1689. if (todo && base < buf->head->iov_len) {
  1690. c = buf->head->iov_base + base;
  1691. avail_here = min_t(unsigned int, todo,
  1692. buf->head->iov_len - base);
  1693. todo -= avail_here;
  1694. while (avail_here >= desc->elem_size) {
  1695. err = desc->xcode(desc, c);
  1696. if (err)
  1697. goto out;
  1698. c += desc->elem_size;
  1699. avail_here -= desc->elem_size;
  1700. }
  1701. if (avail_here) {
  1702. if (!elem) {
  1703. elem = kmalloc(desc->elem_size, GFP_KERNEL);
  1704. err = -ENOMEM;
  1705. if (!elem)
  1706. goto out;
  1707. }
  1708. if (encode) {
  1709. err = desc->xcode(desc, elem);
  1710. if (err)
  1711. goto out;
  1712. memcpy(c, elem, avail_here);
  1713. } else
  1714. memcpy(elem, c, avail_here);
  1715. copied = avail_here;
  1716. }
  1717. base = buf->head->iov_len; /* align to start of pages */
  1718. }
  1719. /* process pages array */
  1720. base -= buf->head->iov_len;
  1721. if (todo && base < buf->page_len) {
  1722. unsigned int avail_page;
  1723. avail_here = min(todo, buf->page_len - base);
  1724. todo -= avail_here;
  1725. base += buf->page_base;
  1726. ppages = buf->pages + (base >> PAGE_SHIFT);
  1727. base &= ~PAGE_MASK;
  1728. avail_page = min_t(unsigned int, PAGE_SIZE - base,
  1729. avail_here);
  1730. c = kmap(*ppages) + base;
  1731. while (avail_here) {
  1732. avail_here -= avail_page;
  1733. if (copied || avail_page < desc->elem_size) {
  1734. unsigned int l = min(avail_page,
  1735. desc->elem_size - copied);
  1736. if (!elem) {
  1737. elem = kmalloc(desc->elem_size,
  1738. GFP_KERNEL);
  1739. err = -ENOMEM;
  1740. if (!elem)
  1741. goto out;
  1742. }
  1743. if (encode) {
  1744. if (!copied) {
  1745. err = desc->xcode(desc, elem);
  1746. if (err)
  1747. goto out;
  1748. }
  1749. memcpy(c, elem + copied, l);
  1750. copied += l;
  1751. if (copied == desc->elem_size)
  1752. copied = 0;
  1753. } else {
  1754. memcpy(elem + copied, c, l);
  1755. copied += l;
  1756. if (copied == desc->elem_size) {
  1757. err = desc->xcode(desc, elem);
  1758. if (err)
  1759. goto out;
  1760. copied = 0;
  1761. }
  1762. }
  1763. avail_page -= l;
  1764. c += l;
  1765. }
  1766. while (avail_page >= desc->elem_size) {
  1767. err = desc->xcode(desc, c);
  1768. if (err)
  1769. goto out;
  1770. c += desc->elem_size;
  1771. avail_page -= desc->elem_size;
  1772. }
  1773. if (avail_page) {
  1774. unsigned int l = min(avail_page,
  1775. desc->elem_size - copied);
  1776. if (!elem) {
  1777. elem = kmalloc(desc->elem_size,
  1778. GFP_KERNEL);
  1779. err = -ENOMEM;
  1780. if (!elem)
  1781. goto out;
  1782. }
  1783. if (encode) {
  1784. if (!copied) {
  1785. err = desc->xcode(desc, elem);
  1786. if (err)
  1787. goto out;
  1788. }
  1789. memcpy(c, elem + copied, l);
  1790. copied += l;
  1791. if (copied == desc->elem_size)
  1792. copied = 0;
  1793. } else {
  1794. memcpy(elem + copied, c, l);
  1795. copied += l;
  1796. if (copied == desc->elem_size) {
  1797. err = desc->xcode(desc, elem);
  1798. if (err)
  1799. goto out;
  1800. copied = 0;
  1801. }
  1802. }
  1803. }
  1804. if (avail_here) {
  1805. kunmap(*ppages);
  1806. ppages++;
  1807. c = kmap(*ppages);
  1808. }
  1809. avail_page = min(avail_here,
  1810. (unsigned int) PAGE_SIZE);
  1811. }
  1812. base = buf->page_len; /* align to start of tail */
  1813. }
  1814. /* process tail */
  1815. base -= buf->page_len;
  1816. if (todo) {
  1817. c = buf->tail->iov_base + base;
  1818. if (copied) {
  1819. unsigned int l = desc->elem_size - copied;
  1820. if (encode)
  1821. memcpy(c, elem + copied, l);
  1822. else {
  1823. memcpy(elem + copied, c, l);
  1824. err = desc->xcode(desc, elem);
  1825. if (err)
  1826. goto out;
  1827. }
  1828. todo -= l;
  1829. c += l;
  1830. }
  1831. while (todo) {
  1832. err = desc->xcode(desc, c);
  1833. if (err)
  1834. goto out;
  1835. c += desc->elem_size;
  1836. todo -= desc->elem_size;
  1837. }
  1838. }
  1839. err = 0;
  1840. out:
  1841. kfree(elem);
  1842. if (ppages)
  1843. kunmap(*ppages);
  1844. return err;
  1845. }
  1846. int xdr_decode_array2(const struct xdr_buf *buf, unsigned int base,
  1847. struct xdr_array2_desc *desc)
  1848. {
  1849. if (base >= buf->len)
  1850. return -EINVAL;
  1851. return xdr_xcode_array2(buf, base, desc, 0);
  1852. }
  1853. EXPORT_SYMBOL_GPL(xdr_decode_array2);
  1854. int xdr_encode_array2(const struct xdr_buf *buf, unsigned int base,
  1855. struct xdr_array2_desc *desc)
  1856. {
  1857. if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
  1858. buf->head->iov_len + buf->page_len + buf->tail->iov_len)
  1859. return -EINVAL;
  1860. return xdr_xcode_array2(buf, base, desc, 1);
  1861. }
  1862. EXPORT_SYMBOL_GPL(xdr_encode_array2);
  1863. int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset,
  1864. unsigned int len,
  1865. int (*actor)(struct scatterlist *, void *), void *data)
  1866. {
  1867. int i, ret = 0;
  1868. unsigned int page_len, thislen, page_offset;
  1869. struct scatterlist sg[1];
  1870. sg_init_table(sg, 1);
  1871. if (offset >= buf->head[0].iov_len) {
  1872. offset -= buf->head[0].iov_len;
  1873. } else {
  1874. thislen = buf->head[0].iov_len - offset;
  1875. if (thislen > len)
  1876. thislen = len;
  1877. sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
  1878. ret = actor(sg, data);
  1879. if (ret)
  1880. goto out;
  1881. offset = 0;
  1882. len -= thislen;
  1883. }
  1884. if (len == 0)
  1885. goto out;
  1886. if (offset >= buf->page_len) {
  1887. offset -= buf->page_len;
  1888. } else {
  1889. page_len = buf->page_len - offset;
  1890. if (page_len > len)
  1891. page_len = len;
  1892. len -= page_len;
  1893. page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
  1894. i = (offset + buf->page_base) >> PAGE_SHIFT;
  1895. thislen = PAGE_SIZE - page_offset;
  1896. do {
  1897. if (thislen > page_len)
  1898. thislen = page_len;
  1899. sg_set_page(sg, buf->pages[i], thislen, page_offset);
  1900. ret = actor(sg, data);
  1901. if (ret)
  1902. goto out;
  1903. page_len -= thislen;
  1904. i++;
  1905. page_offset = 0;
  1906. thislen = PAGE_SIZE;
  1907. } while (page_len != 0);
  1908. offset = 0;
  1909. }
  1910. if (len == 0)
  1911. goto out;
  1912. if (offset < buf->tail[0].iov_len) {
  1913. thislen = buf->tail[0].iov_len - offset;
  1914. if (thislen > len)
  1915. thislen = len;
  1916. sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
  1917. ret = actor(sg, data);
  1918. len -= thislen;
  1919. }
  1920. if (len != 0)
  1921. ret = -EINVAL;
  1922. out:
  1923. return ret;
  1924. }
  1925. EXPORT_SYMBOL_GPL(xdr_process_buf);
  1926. /**
  1927. * xdr_stream_decode_opaque - Decode variable length opaque
  1928. * @xdr: pointer to xdr_stream
  1929. * @ptr: location to store opaque data
  1930. * @size: size of storage buffer @ptr
  1931. *
  1932. * Return values:
  1933. * On success, returns size of object stored in *@ptr
  1934. * %-EBADMSG on XDR buffer overflow
  1935. * %-EMSGSIZE on overflow of storage buffer @ptr
  1936. */
  1937. ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
  1938. {
  1939. ssize_t ret;
  1940. void *p;
  1941. ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
  1942. if (ret <= 0)
  1943. return ret;
  1944. memcpy(ptr, p, ret);
  1945. return ret;
  1946. }
  1947. EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
  1948. /**
  1949. * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
  1950. * @xdr: pointer to xdr_stream
  1951. * @ptr: location to store pointer to opaque data
  1952. * @maxlen: maximum acceptable object size
  1953. * @gfp_flags: GFP mask to use
  1954. *
  1955. * Return values:
  1956. * On success, returns size of object stored in *@ptr
  1957. * %-EBADMSG on XDR buffer overflow
  1958. * %-EMSGSIZE if the size of the object would exceed @maxlen
  1959. * %-ENOMEM on memory allocation failure
  1960. */
  1961. ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
  1962. size_t maxlen, gfp_t gfp_flags)
  1963. {
  1964. ssize_t ret;
  1965. void *p;
  1966. ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
  1967. if (ret > 0) {
  1968. *ptr = kmemdup(p, ret, gfp_flags);
  1969. if (*ptr != NULL)
  1970. return ret;
  1971. ret = -ENOMEM;
  1972. }
  1973. *ptr = NULL;
  1974. return ret;
  1975. }
  1976. EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
  1977. /**
  1978. * xdr_stream_decode_string - Decode variable length string
  1979. * @xdr: pointer to xdr_stream
  1980. * @str: location to store string
  1981. * @size: size of storage buffer @str
  1982. *
  1983. * Return values:
  1984. * On success, returns length of NUL-terminated string stored in *@str
  1985. * %-EBADMSG on XDR buffer overflow
  1986. * %-EMSGSIZE on overflow of storage buffer @str
  1987. */
  1988. ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
  1989. {
  1990. ssize_t ret;
  1991. void *p;
  1992. ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
  1993. if (ret > 0) {
  1994. memcpy(str, p, ret);
  1995. str[ret] = '\0';
  1996. return strlen(str);
  1997. }
  1998. *str = '\0';
  1999. return ret;
  2000. }
  2001. EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
  2002. /**
  2003. * xdr_stream_decode_string_dup - Decode and duplicate variable length string
  2004. * @xdr: pointer to xdr_stream
  2005. * @str: location to store pointer to string
  2006. * @maxlen: maximum acceptable string length
  2007. * @gfp_flags: GFP mask to use
  2008. *
  2009. * Return values:
  2010. * On success, returns length of NUL-terminated string stored in *@ptr
  2011. * %-EBADMSG on XDR buffer overflow
  2012. * %-EMSGSIZE if the size of the string would exceed @maxlen
  2013. * %-ENOMEM on memory allocation failure
  2014. */
  2015. ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
  2016. size_t maxlen, gfp_t gfp_flags)
  2017. {
  2018. void *p;
  2019. ssize_t ret;
  2020. ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
  2021. if (ret > 0) {
  2022. char *s = kmemdup_nul(p, ret, gfp_flags);
  2023. if (s != NULL) {
  2024. *str = s;
  2025. return strlen(s);
  2026. }
  2027. ret = -ENOMEM;
  2028. }
  2029. *str = NULL;
  2030. return ret;
  2031. }
  2032. EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);