flexfilelayout.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Module for pnfs flexfile layout driver.
  4. *
  5. * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
  6. *
  7. * Tao Peng <[email protected]>
  8. */
  9. #include <linux/nfs_fs.h>
  10. #include <linux/nfs_mount.h>
  11. #include <linux/nfs_page.h>
  12. #include <linux/module.h>
  13. #include <linux/sched/mm.h>
  14. #include <linux/sunrpc/metrics.h>
  15. #include "flexfilelayout.h"
  16. #include "../nfs4session.h"
  17. #include "../nfs4idmap.h"
  18. #include "../internal.h"
  19. #include "../delegation.h"
  20. #include "../nfs4trace.h"
  21. #include "../iostat.h"
  22. #include "../nfs.h"
  23. #include "../nfs42.h"
  24. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  25. #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
  26. #define FF_LAYOUTRETURN_MAXERR 20
  27. enum nfs4_ff_op_type {
  28. NFS4_FF_OP_LAYOUTSTATS,
  29. NFS4_FF_OP_LAYOUTRETURN,
  30. };
  31. static unsigned short io_maxretrans;
  32. static const struct pnfs_commit_ops ff_layout_commit_ops;
  33. static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
  34. struct nfs_pgio_header *hdr);
  35. static int
  36. ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
  37. struct nfs42_layoutstat_devinfo *devinfo,
  38. int dev_limit, enum nfs4_ff_op_type type);
  39. static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
  40. const struct nfs42_layoutstat_devinfo *devinfo,
  41. struct nfs4_ff_layout_mirror *mirror);
  42. static struct pnfs_layout_hdr *
  43. ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
  44. {
  45. struct nfs4_flexfile_layout *ffl;
  46. ffl = kzalloc(sizeof(*ffl), gfp_flags);
  47. if (ffl) {
  48. pnfs_init_ds_commit_info(&ffl->commit_info);
  49. INIT_LIST_HEAD(&ffl->error_list);
  50. INIT_LIST_HEAD(&ffl->mirrors);
  51. ffl->last_report_time = ktime_get();
  52. ffl->commit_info.ops = &ff_layout_commit_ops;
  53. return &ffl->generic_hdr;
  54. } else
  55. return NULL;
  56. }
  57. static void
  58. ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
  59. {
  60. struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
  61. struct nfs4_ff_layout_ds_err *err, *n;
  62. list_for_each_entry_safe(err, n, &ffl->error_list, list) {
  63. list_del(&err->list);
  64. kfree(err);
  65. }
  66. kfree_rcu(ffl, generic_hdr.plh_rcu);
  67. }
  68. static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
  69. {
  70. __be32 *p;
  71. p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
  72. if (unlikely(p == NULL))
  73. return -ENOBUFS;
  74. stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
  75. memcpy(stateid->data, p, NFS4_STATEID_SIZE);
  76. dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
  77. p[0], p[1], p[2], p[3]);
  78. return 0;
  79. }
  80. static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
  81. {
  82. __be32 *p;
  83. p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
  84. if (unlikely(!p))
  85. return -ENOBUFS;
  86. memcpy(devid, p, NFS4_DEVICEID4_SIZE);
  87. nfs4_print_deviceid(devid);
  88. return 0;
  89. }
  90. static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
  91. {
  92. __be32 *p;
  93. p = xdr_inline_decode(xdr, 4);
  94. if (unlikely(!p))
  95. return -ENOBUFS;
  96. fh->size = be32_to_cpup(p++);
  97. if (fh->size > NFS_MAXFHSIZE) {
  98. printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
  99. fh->size);
  100. return -EOVERFLOW;
  101. }
  102. /* fh.data */
  103. p = xdr_inline_decode(xdr, fh->size);
  104. if (unlikely(!p))
  105. return -ENOBUFS;
  106. memcpy(&fh->data, p, fh->size);
  107. dprintk("%s: fh len %d\n", __func__, fh->size);
  108. return 0;
  109. }
  110. /*
  111. * Currently only stringified uids and gids are accepted.
  112. * I.e., kerberos is not supported to the DSes, so no pricipals.
  113. *
  114. * That means that one common function will suffice, but when
  115. * principals are added, this should be split to accomodate
  116. * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
  117. */
  118. static int
  119. decode_name(struct xdr_stream *xdr, u32 *id)
  120. {
  121. __be32 *p;
  122. int len;
  123. /* opaque_length(4)*/
  124. p = xdr_inline_decode(xdr, 4);
  125. if (unlikely(!p))
  126. return -ENOBUFS;
  127. len = be32_to_cpup(p++);
  128. if (len < 0)
  129. return -EINVAL;
  130. dprintk("%s: len %u\n", __func__, len);
  131. /* opaque body */
  132. p = xdr_inline_decode(xdr, len);
  133. if (unlikely(!p))
  134. return -ENOBUFS;
  135. if (!nfs_map_string_to_numeric((char *)p, len, id))
  136. return -EINVAL;
  137. return 0;
  138. }
  139. static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
  140. const struct nfs4_ff_layout_mirror *m2)
  141. {
  142. int i, j;
  143. if (m1->fh_versions_cnt != m2->fh_versions_cnt)
  144. return false;
  145. for (i = 0; i < m1->fh_versions_cnt; i++) {
  146. bool found_fh = false;
  147. for (j = 0; j < m2->fh_versions_cnt; j++) {
  148. if (nfs_compare_fh(&m1->fh_versions[i],
  149. &m2->fh_versions[j]) == 0) {
  150. found_fh = true;
  151. break;
  152. }
  153. }
  154. if (!found_fh)
  155. return false;
  156. }
  157. return true;
  158. }
  159. static struct nfs4_ff_layout_mirror *
  160. ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
  161. struct nfs4_ff_layout_mirror *mirror)
  162. {
  163. struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
  164. struct nfs4_ff_layout_mirror *pos;
  165. struct inode *inode = lo->plh_inode;
  166. spin_lock(&inode->i_lock);
  167. list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
  168. if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
  169. continue;
  170. if (!ff_mirror_match_fh(mirror, pos))
  171. continue;
  172. if (refcount_inc_not_zero(&pos->ref)) {
  173. spin_unlock(&inode->i_lock);
  174. return pos;
  175. }
  176. }
  177. list_add(&mirror->mirrors, &ff_layout->mirrors);
  178. mirror->layout = lo;
  179. spin_unlock(&inode->i_lock);
  180. return mirror;
  181. }
  182. static void
  183. ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
  184. {
  185. struct inode *inode;
  186. if (mirror->layout == NULL)
  187. return;
  188. inode = mirror->layout->plh_inode;
  189. spin_lock(&inode->i_lock);
  190. list_del(&mirror->mirrors);
  191. spin_unlock(&inode->i_lock);
  192. mirror->layout = NULL;
  193. }
  194. static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
  195. {
  196. struct nfs4_ff_layout_mirror *mirror;
  197. mirror = kzalloc(sizeof(*mirror), gfp_flags);
  198. if (mirror != NULL) {
  199. spin_lock_init(&mirror->lock);
  200. refcount_set(&mirror->ref, 1);
  201. INIT_LIST_HEAD(&mirror->mirrors);
  202. }
  203. return mirror;
  204. }
  205. static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
  206. {
  207. const struct cred *cred;
  208. ff_layout_remove_mirror(mirror);
  209. kfree(mirror->fh_versions);
  210. cred = rcu_access_pointer(mirror->ro_cred);
  211. put_cred(cred);
  212. cred = rcu_access_pointer(mirror->rw_cred);
  213. put_cred(cred);
  214. nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
  215. kfree(mirror);
  216. }
  217. static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
  218. {
  219. if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
  220. ff_layout_free_mirror(mirror);
  221. }
  222. static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
  223. {
  224. u32 i;
  225. for (i = 0; i < fls->mirror_array_cnt; i++)
  226. ff_layout_put_mirror(fls->mirror_array[i]);
  227. }
  228. static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
  229. {
  230. if (fls) {
  231. ff_layout_free_mirror_array(fls);
  232. kfree(fls);
  233. }
  234. }
  235. static bool
  236. ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
  237. struct pnfs_layout_segment *l2)
  238. {
  239. const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
  240. const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
  241. u32 i;
  242. if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
  243. return false;
  244. for (i = 0; i < fl1->mirror_array_cnt; i++) {
  245. if (fl1->mirror_array[i] != fl2->mirror_array[i])
  246. return false;
  247. }
  248. return true;
  249. }
  250. static bool
  251. ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
  252. const struct pnfs_layout_range *l2)
  253. {
  254. u64 end1, end2;
  255. if (l1->iomode != l2->iomode)
  256. return l1->iomode != IOMODE_READ;
  257. end1 = pnfs_calc_offset_end(l1->offset, l1->length);
  258. end2 = pnfs_calc_offset_end(l2->offset, l2->length);
  259. if (end1 < l2->offset)
  260. return false;
  261. if (end2 < l1->offset)
  262. return true;
  263. return l2->offset <= l1->offset;
  264. }
  265. static bool
  266. ff_lseg_merge(struct pnfs_layout_segment *new,
  267. struct pnfs_layout_segment *old)
  268. {
  269. u64 new_end, old_end;
  270. if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
  271. return false;
  272. if (new->pls_range.iomode != old->pls_range.iomode)
  273. return false;
  274. old_end = pnfs_calc_offset_end(old->pls_range.offset,
  275. old->pls_range.length);
  276. if (old_end < new->pls_range.offset)
  277. return false;
  278. new_end = pnfs_calc_offset_end(new->pls_range.offset,
  279. new->pls_range.length);
  280. if (new_end < old->pls_range.offset)
  281. return false;
  282. if (!ff_lseg_match_mirrors(new, old))
  283. return false;
  284. /* Mergeable: copy info from 'old' to 'new' */
  285. if (new_end < old_end)
  286. new_end = old_end;
  287. if (new->pls_range.offset < old->pls_range.offset)
  288. new->pls_range.offset = old->pls_range.offset;
  289. new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
  290. new_end);
  291. if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
  292. set_bit(NFS_LSEG_ROC, &new->pls_flags);
  293. return true;
  294. }
  295. static void
  296. ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
  297. struct pnfs_layout_segment *lseg,
  298. struct list_head *free_me)
  299. {
  300. pnfs_generic_layout_insert_lseg(lo, lseg,
  301. ff_lseg_range_is_after,
  302. ff_lseg_merge,
  303. free_me);
  304. }
  305. static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
  306. {
  307. int i, j;
  308. for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
  309. for (j = i + 1; j < fls->mirror_array_cnt; j++)
  310. if (fls->mirror_array[i]->efficiency <
  311. fls->mirror_array[j]->efficiency)
  312. swap(fls->mirror_array[i],
  313. fls->mirror_array[j]);
  314. }
  315. }
  316. static struct pnfs_layout_segment *
  317. ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
  318. struct nfs4_layoutget_res *lgr,
  319. gfp_t gfp_flags)
  320. {
  321. struct pnfs_layout_segment *ret;
  322. struct nfs4_ff_layout_segment *fls = NULL;
  323. struct xdr_stream stream;
  324. struct xdr_buf buf;
  325. struct page *scratch;
  326. u64 stripe_unit;
  327. u32 mirror_array_cnt;
  328. __be32 *p;
  329. int i, rc;
  330. dprintk("--> %s\n", __func__);
  331. scratch = alloc_page(gfp_flags);
  332. if (!scratch)
  333. return ERR_PTR(-ENOMEM);
  334. xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
  335. lgr->layoutp->len);
  336. xdr_set_scratch_page(&stream, scratch);
  337. /* stripe unit and mirror_array_cnt */
  338. rc = -EIO;
  339. p = xdr_inline_decode(&stream, 8 + 4);
  340. if (!p)
  341. goto out_err_free;
  342. p = xdr_decode_hyper(p, &stripe_unit);
  343. mirror_array_cnt = be32_to_cpup(p++);
  344. dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
  345. stripe_unit, mirror_array_cnt);
  346. if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
  347. mirror_array_cnt == 0)
  348. goto out_err_free;
  349. rc = -ENOMEM;
  350. fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
  351. gfp_flags);
  352. if (!fls)
  353. goto out_err_free;
  354. fls->mirror_array_cnt = mirror_array_cnt;
  355. fls->stripe_unit = stripe_unit;
  356. for (i = 0; i < fls->mirror_array_cnt; i++) {
  357. struct nfs4_ff_layout_mirror *mirror;
  358. struct cred *kcred;
  359. const struct cred __rcu *cred;
  360. kuid_t uid;
  361. kgid_t gid;
  362. u32 ds_count, fh_count, id;
  363. int j;
  364. rc = -EIO;
  365. p = xdr_inline_decode(&stream, 4);
  366. if (!p)
  367. goto out_err_free;
  368. ds_count = be32_to_cpup(p);
  369. /* FIXME: allow for striping? */
  370. if (ds_count != 1)
  371. goto out_err_free;
  372. fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
  373. if (fls->mirror_array[i] == NULL) {
  374. rc = -ENOMEM;
  375. goto out_err_free;
  376. }
  377. fls->mirror_array[i]->ds_count = ds_count;
  378. /* deviceid */
  379. rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
  380. if (rc)
  381. goto out_err_free;
  382. /* efficiency */
  383. rc = -EIO;
  384. p = xdr_inline_decode(&stream, 4);
  385. if (!p)
  386. goto out_err_free;
  387. fls->mirror_array[i]->efficiency = be32_to_cpup(p);
  388. /* stateid */
  389. rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
  390. if (rc)
  391. goto out_err_free;
  392. /* fh */
  393. rc = -EIO;
  394. p = xdr_inline_decode(&stream, 4);
  395. if (!p)
  396. goto out_err_free;
  397. fh_count = be32_to_cpup(p);
  398. fls->mirror_array[i]->fh_versions =
  399. kcalloc(fh_count, sizeof(struct nfs_fh),
  400. gfp_flags);
  401. if (fls->mirror_array[i]->fh_versions == NULL) {
  402. rc = -ENOMEM;
  403. goto out_err_free;
  404. }
  405. for (j = 0; j < fh_count; j++) {
  406. rc = decode_nfs_fh(&stream,
  407. &fls->mirror_array[i]->fh_versions[j]);
  408. if (rc)
  409. goto out_err_free;
  410. }
  411. fls->mirror_array[i]->fh_versions_cnt = fh_count;
  412. /* user */
  413. rc = decode_name(&stream, &id);
  414. if (rc)
  415. goto out_err_free;
  416. uid = make_kuid(&init_user_ns, id);
  417. /* group */
  418. rc = decode_name(&stream, &id);
  419. if (rc)
  420. goto out_err_free;
  421. gid = make_kgid(&init_user_ns, id);
  422. if (gfp_flags & __GFP_FS)
  423. kcred = prepare_kernel_cred(NULL);
  424. else {
  425. unsigned int nofs_flags = memalloc_nofs_save();
  426. kcred = prepare_kernel_cred(NULL);
  427. memalloc_nofs_restore(nofs_flags);
  428. }
  429. rc = -ENOMEM;
  430. if (!kcred)
  431. goto out_err_free;
  432. kcred->fsuid = uid;
  433. kcred->fsgid = gid;
  434. cred = RCU_INITIALIZER(kcred);
  435. if (lgr->range.iomode == IOMODE_READ)
  436. rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
  437. else
  438. rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
  439. mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
  440. if (mirror != fls->mirror_array[i]) {
  441. /* swap cred ptrs so free_mirror will clean up old */
  442. if (lgr->range.iomode == IOMODE_READ) {
  443. cred = xchg(&mirror->ro_cred, cred);
  444. rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
  445. } else {
  446. cred = xchg(&mirror->rw_cred, cred);
  447. rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
  448. }
  449. ff_layout_free_mirror(fls->mirror_array[i]);
  450. fls->mirror_array[i] = mirror;
  451. }
  452. dprintk("%s: iomode %s uid %u gid %u\n", __func__,
  453. lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
  454. from_kuid(&init_user_ns, uid),
  455. from_kgid(&init_user_ns, gid));
  456. }
  457. p = xdr_inline_decode(&stream, 4);
  458. if (!p)
  459. goto out_sort_mirrors;
  460. fls->flags = be32_to_cpup(p);
  461. p = xdr_inline_decode(&stream, 4);
  462. if (!p)
  463. goto out_sort_mirrors;
  464. for (i=0; i < fls->mirror_array_cnt; i++)
  465. fls->mirror_array[i]->report_interval = be32_to_cpup(p);
  466. out_sort_mirrors:
  467. ff_layout_sort_mirrors(fls);
  468. ret = &fls->generic_hdr;
  469. dprintk("<-- %s (success)\n", __func__);
  470. out_free_page:
  471. __free_page(scratch);
  472. return ret;
  473. out_err_free:
  474. _ff_layout_free_lseg(fls);
  475. ret = ERR_PTR(rc);
  476. dprintk("<-- %s (%d)\n", __func__, rc);
  477. goto out_free_page;
  478. }
  479. static void
  480. ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
  481. {
  482. struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
  483. dprintk("--> %s\n", __func__);
  484. if (lseg->pls_range.iomode == IOMODE_RW) {
  485. struct nfs4_flexfile_layout *ffl;
  486. struct inode *inode;
  487. ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
  488. inode = ffl->generic_hdr.plh_inode;
  489. spin_lock(&inode->i_lock);
  490. pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
  491. spin_unlock(&inode->i_lock);
  492. }
  493. _ff_layout_free_lseg(fls);
  494. }
  495. static void
  496. nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
  497. {
  498. /* first IO request? */
  499. if (atomic_inc_return(&timer->n_ops) == 1) {
  500. timer->start_time = now;
  501. }
  502. }
  503. static ktime_t
  504. nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
  505. {
  506. ktime_t start;
  507. if (atomic_dec_return(&timer->n_ops) < 0)
  508. WARN_ON_ONCE(1);
  509. start = timer->start_time;
  510. timer->start_time = now;
  511. return ktime_sub(now, start);
  512. }
  513. static bool
  514. nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
  515. struct nfs4_ff_layoutstat *layoutstat,
  516. ktime_t now)
  517. {
  518. s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
  519. struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
  520. nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
  521. if (!mirror->start_time)
  522. mirror->start_time = now;
  523. if (mirror->report_interval != 0)
  524. report_interval = (s64)mirror->report_interval * 1000LL;
  525. else if (layoutstats_timer != 0)
  526. report_interval = (s64)layoutstats_timer * 1000LL;
  527. if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
  528. report_interval) {
  529. ffl->last_report_time = now;
  530. return true;
  531. }
  532. return false;
  533. }
  534. static void
  535. nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
  536. __u64 requested)
  537. {
  538. struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
  539. iostat->ops_requested++;
  540. iostat->bytes_requested += requested;
  541. }
  542. static void
  543. nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
  544. __u64 requested,
  545. __u64 completed,
  546. ktime_t time_completed,
  547. ktime_t time_started)
  548. {
  549. struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
  550. ktime_t completion_time = ktime_sub(time_completed, time_started);
  551. ktime_t timer;
  552. iostat->ops_completed++;
  553. iostat->bytes_completed += completed;
  554. iostat->bytes_not_delivered += requested - completed;
  555. timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
  556. iostat->total_busy_time =
  557. ktime_add(iostat->total_busy_time, timer);
  558. iostat->aggregate_completion_time =
  559. ktime_add(iostat->aggregate_completion_time,
  560. completion_time);
  561. }
  562. static void
  563. nfs4_ff_layout_stat_io_start_read(struct inode *inode,
  564. struct nfs4_ff_layout_mirror *mirror,
  565. __u64 requested, ktime_t now)
  566. {
  567. bool report;
  568. spin_lock(&mirror->lock);
  569. report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
  570. nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
  571. set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
  572. spin_unlock(&mirror->lock);
  573. if (report)
  574. pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
  575. }
  576. static void
  577. nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
  578. struct nfs4_ff_layout_mirror *mirror,
  579. __u64 requested,
  580. __u64 completed)
  581. {
  582. spin_lock(&mirror->lock);
  583. nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
  584. requested, completed,
  585. ktime_get(), task->tk_start);
  586. set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
  587. spin_unlock(&mirror->lock);
  588. }
  589. static void
  590. nfs4_ff_layout_stat_io_start_write(struct inode *inode,
  591. struct nfs4_ff_layout_mirror *mirror,
  592. __u64 requested, ktime_t now)
  593. {
  594. bool report;
  595. spin_lock(&mirror->lock);
  596. report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
  597. nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
  598. set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
  599. spin_unlock(&mirror->lock);
  600. if (report)
  601. pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
  602. }
  603. static void
  604. nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
  605. struct nfs4_ff_layout_mirror *mirror,
  606. __u64 requested,
  607. __u64 completed,
  608. enum nfs3_stable_how committed)
  609. {
  610. if (committed == NFS_UNSTABLE)
  611. requested = completed = 0;
  612. spin_lock(&mirror->lock);
  613. nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
  614. requested, completed, ktime_get(), task->tk_start);
  615. set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
  616. spin_unlock(&mirror->lock);
  617. }
  618. static void
  619. ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
  620. {
  621. struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
  622. if (devid)
  623. nfs4_mark_deviceid_unavailable(devid);
  624. }
  625. static void
  626. ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
  627. {
  628. struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
  629. if (devid)
  630. nfs4_mark_deviceid_available(devid);
  631. }
  632. static struct nfs4_pnfs_ds *
  633. ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
  634. u32 start_idx, u32 *best_idx,
  635. bool check_device)
  636. {
  637. struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
  638. struct nfs4_ff_layout_mirror *mirror;
  639. struct nfs4_pnfs_ds *ds;
  640. u32 idx;
  641. /* mirrors are initially sorted by efficiency */
  642. for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
  643. mirror = FF_LAYOUT_COMP(lseg, idx);
  644. ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
  645. if (!ds)
  646. continue;
  647. if (check_device &&
  648. nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
  649. continue;
  650. *best_idx = idx;
  651. return ds;
  652. }
  653. return NULL;
  654. }
  655. static struct nfs4_pnfs_ds *
  656. ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
  657. u32 start_idx, u32 *best_idx)
  658. {
  659. return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
  660. }
  661. static struct nfs4_pnfs_ds *
  662. ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
  663. u32 start_idx, u32 *best_idx)
  664. {
  665. return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
  666. }
  667. static struct nfs4_pnfs_ds *
  668. ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
  669. u32 start_idx, u32 *best_idx)
  670. {
  671. struct nfs4_pnfs_ds *ds;
  672. ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
  673. if (ds)
  674. return ds;
  675. return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
  676. }
  677. static struct nfs4_pnfs_ds *
  678. ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
  679. u32 *best_idx)
  680. {
  681. struct pnfs_layout_segment *lseg = pgio->pg_lseg;
  682. struct nfs4_pnfs_ds *ds;
  683. ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
  684. best_idx);
  685. if (ds || !pgio->pg_mirror_idx)
  686. return ds;
  687. return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
  688. }
  689. static void
  690. ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
  691. struct nfs_page *req,
  692. bool strict_iomode)
  693. {
  694. pnfs_put_lseg(pgio->pg_lseg);
  695. pgio->pg_lseg =
  696. pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
  697. req_offset(req), req->wb_bytes, IOMODE_READ,
  698. strict_iomode, nfs_io_gfp_mask());
  699. if (IS_ERR(pgio->pg_lseg)) {
  700. pgio->pg_error = PTR_ERR(pgio->pg_lseg);
  701. pgio->pg_lseg = NULL;
  702. }
  703. }
  704. static void
  705. ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
  706. struct nfs_page *req)
  707. {
  708. pnfs_generic_pg_check_layout(pgio);
  709. pnfs_generic_pg_check_range(pgio, req);
  710. }
  711. static void
  712. ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
  713. struct nfs_page *req)
  714. {
  715. struct nfs_pgio_mirror *pgm;
  716. struct nfs4_ff_layout_mirror *mirror;
  717. struct nfs4_pnfs_ds *ds;
  718. u32 ds_idx;
  719. retry:
  720. ff_layout_pg_check_layout(pgio, req);
  721. /* Use full layout for now */
  722. if (!pgio->pg_lseg) {
  723. ff_layout_pg_get_read(pgio, req, false);
  724. if (!pgio->pg_lseg)
  725. goto out_nolseg;
  726. }
  727. if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
  728. ff_layout_pg_get_read(pgio, req, true);
  729. if (!pgio->pg_lseg)
  730. goto out_nolseg;
  731. }
  732. ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
  733. if (!ds) {
  734. if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
  735. goto out_mds;
  736. pnfs_generic_pg_cleanup(pgio);
  737. /* Sleep for 1 second before retrying */
  738. ssleep(1);
  739. goto retry;
  740. }
  741. mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
  742. pgm = &pgio->pg_mirrors[0];
  743. pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
  744. pgio->pg_mirror_idx = ds_idx;
  745. if (NFS_SERVER(pgio->pg_inode)->flags &
  746. (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
  747. pgio->pg_maxretrans = io_maxretrans;
  748. return;
  749. out_nolseg:
  750. if (pgio->pg_error < 0)
  751. return;
  752. out_mds:
  753. trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
  754. 0, NFS4_MAX_UINT64, IOMODE_READ,
  755. NFS_I(pgio->pg_inode)->layout,
  756. pgio->pg_lseg);
  757. pgio->pg_maxretrans = 0;
  758. nfs_pageio_reset_read_mds(pgio);
  759. }
  760. static void
  761. ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
  762. struct nfs_page *req)
  763. {
  764. struct nfs4_ff_layout_mirror *mirror;
  765. struct nfs_pgio_mirror *pgm;
  766. struct nfs4_pnfs_ds *ds;
  767. u32 i;
  768. retry:
  769. ff_layout_pg_check_layout(pgio, req);
  770. if (!pgio->pg_lseg) {
  771. pgio->pg_lseg =
  772. pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
  773. req_offset(req), req->wb_bytes,
  774. IOMODE_RW, false, nfs_io_gfp_mask());
  775. if (IS_ERR(pgio->pg_lseg)) {
  776. pgio->pg_error = PTR_ERR(pgio->pg_lseg);
  777. pgio->pg_lseg = NULL;
  778. return;
  779. }
  780. }
  781. /* If no lseg, fall back to write through mds */
  782. if (pgio->pg_lseg == NULL)
  783. goto out_mds;
  784. /* Use a direct mapping of ds_idx to pgio mirror_idx */
  785. if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
  786. goto out_eagain;
  787. for (i = 0; i < pgio->pg_mirror_count; i++) {
  788. mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
  789. ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
  790. if (!ds) {
  791. if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
  792. goto out_mds;
  793. pnfs_generic_pg_cleanup(pgio);
  794. /* Sleep for 1 second before retrying */
  795. ssleep(1);
  796. goto retry;
  797. }
  798. pgm = &pgio->pg_mirrors[i];
  799. pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
  800. }
  801. if (NFS_SERVER(pgio->pg_inode)->flags &
  802. (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
  803. pgio->pg_maxretrans = io_maxretrans;
  804. return;
  805. out_eagain:
  806. pnfs_generic_pg_cleanup(pgio);
  807. pgio->pg_error = -EAGAIN;
  808. return;
  809. out_mds:
  810. trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
  811. 0, NFS4_MAX_UINT64, IOMODE_RW,
  812. NFS_I(pgio->pg_inode)->layout,
  813. pgio->pg_lseg);
  814. pgio->pg_maxretrans = 0;
  815. nfs_pageio_reset_write_mds(pgio);
  816. pgio->pg_error = -EAGAIN;
  817. }
  818. static unsigned int
  819. ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
  820. struct nfs_page *req)
  821. {
  822. if (!pgio->pg_lseg) {
  823. pgio->pg_lseg =
  824. pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
  825. req_offset(req), req->wb_bytes,
  826. IOMODE_RW, false, nfs_io_gfp_mask());
  827. if (IS_ERR(pgio->pg_lseg)) {
  828. pgio->pg_error = PTR_ERR(pgio->pg_lseg);
  829. pgio->pg_lseg = NULL;
  830. goto out;
  831. }
  832. }
  833. if (pgio->pg_lseg)
  834. return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
  835. trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
  836. 0, NFS4_MAX_UINT64, IOMODE_RW,
  837. NFS_I(pgio->pg_inode)->layout,
  838. pgio->pg_lseg);
  839. /* no lseg means that pnfs is not in use, so no mirroring here */
  840. nfs_pageio_reset_write_mds(pgio);
  841. out:
  842. return 1;
  843. }
  844. static u32
  845. ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
  846. {
  847. u32 old = desc->pg_mirror_idx;
  848. desc->pg_mirror_idx = idx;
  849. return old;
  850. }
  851. static struct nfs_pgio_mirror *
  852. ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
  853. {
  854. return &desc->pg_mirrors[idx];
  855. }
  856. static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
  857. .pg_init = ff_layout_pg_init_read,
  858. .pg_test = pnfs_generic_pg_test,
  859. .pg_doio = pnfs_generic_pg_readpages,
  860. .pg_cleanup = pnfs_generic_pg_cleanup,
  861. };
  862. static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
  863. .pg_init = ff_layout_pg_init_write,
  864. .pg_test = pnfs_generic_pg_test,
  865. .pg_doio = pnfs_generic_pg_writepages,
  866. .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
  867. .pg_cleanup = pnfs_generic_pg_cleanup,
  868. .pg_get_mirror = ff_layout_pg_get_mirror_write,
  869. .pg_set_mirror = ff_layout_pg_set_mirror_write,
  870. };
  871. static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
  872. {
  873. struct rpc_task *task = &hdr->task;
  874. pnfs_layoutcommit_inode(hdr->inode, false);
  875. if (retry_pnfs) {
  876. dprintk("%s Reset task %5u for i/o through pNFS "
  877. "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
  878. hdr->task.tk_pid,
  879. hdr->inode->i_sb->s_id,
  880. (unsigned long long)NFS_FILEID(hdr->inode),
  881. hdr->args.count,
  882. (unsigned long long)hdr->args.offset);
  883. hdr->completion_ops->reschedule_io(hdr);
  884. return;
  885. }
  886. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  887. dprintk("%s Reset task %5u for i/o through MDS "
  888. "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
  889. hdr->task.tk_pid,
  890. hdr->inode->i_sb->s_id,
  891. (unsigned long long)NFS_FILEID(hdr->inode),
  892. hdr->args.count,
  893. (unsigned long long)hdr->args.offset);
  894. trace_pnfs_mds_fallback_write_done(hdr->inode,
  895. hdr->args.offset, hdr->args.count,
  896. IOMODE_RW, NFS_I(hdr->inode)->layout,
  897. hdr->lseg);
  898. task->tk_status = pnfs_write_done_resend_to_mds(hdr);
  899. }
  900. }
  901. static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
  902. {
  903. u32 idx = hdr->pgio_mirror_idx + 1;
  904. u32 new_idx = 0;
  905. if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
  906. ff_layout_send_layouterror(hdr->lseg);
  907. else
  908. pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
  909. pnfs_read_resend_pnfs(hdr, new_idx);
  910. }
  911. static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
  912. {
  913. struct rpc_task *task = &hdr->task;
  914. pnfs_layoutcommit_inode(hdr->inode, false);
  915. pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
  916. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  917. dprintk("%s Reset task %5u for i/o through MDS "
  918. "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
  919. hdr->task.tk_pid,
  920. hdr->inode->i_sb->s_id,
  921. (unsigned long long)NFS_FILEID(hdr->inode),
  922. hdr->args.count,
  923. (unsigned long long)hdr->args.offset);
  924. trace_pnfs_mds_fallback_read_done(hdr->inode,
  925. hdr->args.offset, hdr->args.count,
  926. IOMODE_READ, NFS_I(hdr->inode)->layout,
  927. hdr->lseg);
  928. task->tk_status = pnfs_read_done_resend_to_mds(hdr);
  929. }
  930. }
  931. static int ff_layout_async_handle_error_v4(struct rpc_task *task,
  932. struct nfs4_state *state,
  933. struct nfs_client *clp,
  934. struct pnfs_layout_segment *lseg,
  935. u32 idx)
  936. {
  937. struct pnfs_layout_hdr *lo = lseg->pls_layout;
  938. struct inode *inode = lo->plh_inode;
  939. struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
  940. struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
  941. switch (task->tk_status) {
  942. case -NFS4ERR_BADSESSION:
  943. case -NFS4ERR_BADSLOT:
  944. case -NFS4ERR_BAD_HIGH_SLOT:
  945. case -NFS4ERR_DEADSESSION:
  946. case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
  947. case -NFS4ERR_SEQ_FALSE_RETRY:
  948. case -NFS4ERR_SEQ_MISORDERED:
  949. dprintk("%s ERROR %d, Reset session. Exchangeid "
  950. "flags 0x%x\n", __func__, task->tk_status,
  951. clp->cl_exchange_flags);
  952. nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
  953. break;
  954. case -NFS4ERR_DELAY:
  955. case -NFS4ERR_GRACE:
  956. rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
  957. break;
  958. case -NFS4ERR_RETRY_UNCACHED_REP:
  959. break;
  960. /* Invalidate Layout errors */
  961. case -NFS4ERR_PNFS_NO_LAYOUT:
  962. case -ESTALE: /* mapped NFS4ERR_STALE */
  963. case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
  964. case -EISDIR: /* mapped NFS4ERR_ISDIR */
  965. case -NFS4ERR_FHEXPIRED:
  966. case -NFS4ERR_WRONG_TYPE:
  967. dprintk("%s Invalid layout error %d\n", __func__,
  968. task->tk_status);
  969. /*
  970. * Destroy layout so new i/o will get a new layout.
  971. * Layout will not be destroyed until all current lseg
  972. * references are put. Mark layout as invalid to resend failed
  973. * i/o and all i/o waiting on the slot table to the MDS until
  974. * layout is destroyed and a new valid layout is obtained.
  975. */
  976. pnfs_destroy_layout(NFS_I(inode));
  977. rpc_wake_up(&tbl->slot_tbl_waitq);
  978. goto reset;
  979. /* RPC connection errors */
  980. case -ECONNREFUSED:
  981. case -EHOSTDOWN:
  982. case -EHOSTUNREACH:
  983. case -ENETUNREACH:
  984. case -EIO:
  985. case -ETIMEDOUT:
  986. case -EPIPE:
  987. case -EPROTO:
  988. case -ENODEV:
  989. dprintk("%s DS connection error %d\n", __func__,
  990. task->tk_status);
  991. nfs4_delete_deviceid(devid->ld, devid->nfs_client,
  992. &devid->deviceid);
  993. rpc_wake_up(&tbl->slot_tbl_waitq);
  994. fallthrough;
  995. default:
  996. if (ff_layout_avoid_mds_available_ds(lseg))
  997. return -NFS4ERR_RESET_TO_PNFS;
  998. reset:
  999. dprintk("%s Retry through MDS. Error %d\n", __func__,
  1000. task->tk_status);
  1001. return -NFS4ERR_RESET_TO_MDS;
  1002. }
  1003. task->tk_status = 0;
  1004. return -EAGAIN;
  1005. }
  1006. /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
  1007. static int ff_layout_async_handle_error_v3(struct rpc_task *task,
  1008. struct pnfs_layout_segment *lseg,
  1009. u32 idx)
  1010. {
  1011. struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
  1012. switch (task->tk_status) {
  1013. /* File access problems. Don't mark the device as unavailable */
  1014. case -EACCES:
  1015. case -ESTALE:
  1016. case -EISDIR:
  1017. case -EBADHANDLE:
  1018. case -ELOOP:
  1019. case -ENOSPC:
  1020. break;
  1021. case -EJUKEBOX:
  1022. nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
  1023. goto out_retry;
  1024. default:
  1025. dprintk("%s DS connection error %d\n", __func__,
  1026. task->tk_status);
  1027. nfs4_delete_deviceid(devid->ld, devid->nfs_client,
  1028. &devid->deviceid);
  1029. }
  1030. /* FIXME: Need to prevent infinite looping here. */
  1031. return -NFS4ERR_RESET_TO_PNFS;
  1032. out_retry:
  1033. task->tk_status = 0;
  1034. rpc_restart_call_prepare(task);
  1035. rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
  1036. return -EAGAIN;
  1037. }
  1038. static int ff_layout_async_handle_error(struct rpc_task *task,
  1039. struct nfs4_state *state,
  1040. struct nfs_client *clp,
  1041. struct pnfs_layout_segment *lseg,
  1042. u32 idx)
  1043. {
  1044. int vers = clp->cl_nfs_mod->rpc_vers->number;
  1045. if (task->tk_status >= 0) {
  1046. ff_layout_mark_ds_reachable(lseg, idx);
  1047. return 0;
  1048. }
  1049. /* Handle the case of an invalid layout segment */
  1050. if (!pnfs_is_valid_lseg(lseg))
  1051. return -NFS4ERR_RESET_TO_PNFS;
  1052. switch (vers) {
  1053. case 3:
  1054. return ff_layout_async_handle_error_v3(task, lseg, idx);
  1055. case 4:
  1056. return ff_layout_async_handle_error_v4(task, state, clp,
  1057. lseg, idx);
  1058. default:
  1059. /* should never happen */
  1060. WARN_ON_ONCE(1);
  1061. return 0;
  1062. }
  1063. }
  1064. static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
  1065. u32 idx, u64 offset, u64 length,
  1066. u32 *op_status, int opnum, int error)
  1067. {
  1068. struct nfs4_ff_layout_mirror *mirror;
  1069. u32 status = *op_status;
  1070. int err;
  1071. if (status == 0) {
  1072. switch (error) {
  1073. case -ETIMEDOUT:
  1074. case -EPFNOSUPPORT:
  1075. case -EPROTONOSUPPORT:
  1076. case -EOPNOTSUPP:
  1077. case -EINVAL:
  1078. case -ECONNREFUSED:
  1079. case -ECONNRESET:
  1080. case -EHOSTDOWN:
  1081. case -EHOSTUNREACH:
  1082. case -ENETUNREACH:
  1083. case -EADDRINUSE:
  1084. case -ENOBUFS:
  1085. case -EPIPE:
  1086. case -EPERM:
  1087. case -EPROTO:
  1088. case -ENODEV:
  1089. *op_status = status = NFS4ERR_NXIO;
  1090. break;
  1091. case -EACCES:
  1092. *op_status = status = NFS4ERR_ACCESS;
  1093. break;
  1094. default:
  1095. return;
  1096. }
  1097. }
  1098. mirror = FF_LAYOUT_COMP(lseg, idx);
  1099. err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
  1100. mirror, offset, length, status, opnum,
  1101. nfs_io_gfp_mask());
  1102. switch (status) {
  1103. case NFS4ERR_DELAY:
  1104. case NFS4ERR_GRACE:
  1105. break;
  1106. case NFS4ERR_NXIO:
  1107. ff_layout_mark_ds_unreachable(lseg, idx);
  1108. /*
  1109. * Don't return the layout if this is a read and we still
  1110. * have layouts to try
  1111. */
  1112. if (opnum == OP_READ)
  1113. break;
  1114. fallthrough;
  1115. default:
  1116. pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
  1117. lseg);
  1118. }
  1119. dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
  1120. }
  1121. /* NFS_PROTO call done callback routines */
  1122. static int ff_layout_read_done_cb(struct rpc_task *task,
  1123. struct nfs_pgio_header *hdr)
  1124. {
  1125. int err;
  1126. if (task->tk_status < 0) {
  1127. ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
  1128. hdr->args.offset, hdr->args.count,
  1129. &hdr->res.op_status, OP_READ,
  1130. task->tk_status);
  1131. trace_ff_layout_read_error(hdr);
  1132. }
  1133. err = ff_layout_async_handle_error(task, hdr->args.context->state,
  1134. hdr->ds_clp, hdr->lseg,
  1135. hdr->pgio_mirror_idx);
  1136. trace_nfs4_pnfs_read(hdr, err);
  1137. clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
  1138. clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
  1139. switch (err) {
  1140. case -NFS4ERR_RESET_TO_PNFS:
  1141. set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
  1142. return task->tk_status;
  1143. case -NFS4ERR_RESET_TO_MDS:
  1144. set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
  1145. return task->tk_status;
  1146. case -EAGAIN:
  1147. goto out_eagain;
  1148. }
  1149. return 0;
  1150. out_eagain:
  1151. rpc_restart_call_prepare(task);
  1152. return -EAGAIN;
  1153. }
  1154. static bool
  1155. ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
  1156. {
  1157. return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
  1158. }
  1159. /*
  1160. * We reference the rpc_cred of the first WRITE that triggers the need for
  1161. * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
  1162. * rfc5661 is not clear about which credential should be used.
  1163. *
  1164. * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
  1165. * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
  1166. * we always send layoutcommit after DS writes.
  1167. */
  1168. static void
  1169. ff_layout_set_layoutcommit(struct inode *inode,
  1170. struct pnfs_layout_segment *lseg,
  1171. loff_t end_offset)
  1172. {
  1173. if (!ff_layout_need_layoutcommit(lseg))
  1174. return;
  1175. pnfs_set_layoutcommit(inode, lseg, end_offset);
  1176. dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
  1177. (unsigned long long) NFS_I(inode)->layout->plh_lwb);
  1178. }
  1179. static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
  1180. struct nfs_pgio_header *hdr)
  1181. {
  1182. if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
  1183. return;
  1184. nfs4_ff_layout_stat_io_start_read(hdr->inode,
  1185. FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
  1186. hdr->args.count,
  1187. task->tk_start);
  1188. }
  1189. static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
  1190. struct nfs_pgio_header *hdr)
  1191. {
  1192. if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
  1193. return;
  1194. nfs4_ff_layout_stat_io_end_read(task,
  1195. FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
  1196. hdr->args.count,
  1197. hdr->res.count);
  1198. set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
  1199. }
  1200. static int ff_layout_read_prepare_common(struct rpc_task *task,
  1201. struct nfs_pgio_header *hdr)
  1202. {
  1203. if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
  1204. rpc_exit(task, -EIO);
  1205. return -EIO;
  1206. }
  1207. if (!pnfs_is_valid_lseg(hdr->lseg)) {
  1208. rpc_exit(task, -EAGAIN);
  1209. return -EAGAIN;
  1210. }
  1211. ff_layout_read_record_layoutstats_start(task, hdr);
  1212. return 0;
  1213. }
  1214. /*
  1215. * Call ops for the async read/write cases
  1216. * In the case of dense layouts, the offset needs to be reset to its
  1217. * original value.
  1218. */
  1219. static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
  1220. {
  1221. struct nfs_pgio_header *hdr = data;
  1222. if (ff_layout_read_prepare_common(task, hdr))
  1223. return;
  1224. rpc_call_start(task);
  1225. }
  1226. static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
  1227. {
  1228. struct nfs_pgio_header *hdr = data;
  1229. if (nfs4_setup_sequence(hdr->ds_clp,
  1230. &hdr->args.seq_args,
  1231. &hdr->res.seq_res,
  1232. task))
  1233. return;
  1234. ff_layout_read_prepare_common(task, hdr);
  1235. }
  1236. static void ff_layout_read_call_done(struct rpc_task *task, void *data)
  1237. {
  1238. struct nfs_pgio_header *hdr = data;
  1239. if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
  1240. task->tk_status == 0) {
  1241. nfs4_sequence_done(task, &hdr->res.seq_res);
  1242. return;
  1243. }
  1244. /* Note this may cause RPC to be resent */
  1245. hdr->mds_ops->rpc_call_done(task, hdr);
  1246. }
  1247. static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
  1248. {
  1249. struct nfs_pgio_header *hdr = data;
  1250. ff_layout_read_record_layoutstats_done(task, hdr);
  1251. rpc_count_iostats_metrics(task,
  1252. &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
  1253. }
  1254. static void ff_layout_read_release(void *data)
  1255. {
  1256. struct nfs_pgio_header *hdr = data;
  1257. ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
  1258. if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
  1259. ff_layout_resend_pnfs_read(hdr);
  1260. else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
  1261. ff_layout_reset_read(hdr);
  1262. pnfs_generic_rw_release(data);
  1263. }
  1264. static int ff_layout_write_done_cb(struct rpc_task *task,
  1265. struct nfs_pgio_header *hdr)
  1266. {
  1267. loff_t end_offs = 0;
  1268. int err;
  1269. if (task->tk_status < 0) {
  1270. ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
  1271. hdr->args.offset, hdr->args.count,
  1272. &hdr->res.op_status, OP_WRITE,
  1273. task->tk_status);
  1274. trace_ff_layout_write_error(hdr);
  1275. }
  1276. err = ff_layout_async_handle_error(task, hdr->args.context->state,
  1277. hdr->ds_clp, hdr->lseg,
  1278. hdr->pgio_mirror_idx);
  1279. trace_nfs4_pnfs_write(hdr, err);
  1280. clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
  1281. clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
  1282. switch (err) {
  1283. case -NFS4ERR_RESET_TO_PNFS:
  1284. set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
  1285. return task->tk_status;
  1286. case -NFS4ERR_RESET_TO_MDS:
  1287. set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
  1288. return task->tk_status;
  1289. case -EAGAIN:
  1290. return -EAGAIN;
  1291. }
  1292. if (hdr->res.verf->committed == NFS_FILE_SYNC ||
  1293. hdr->res.verf->committed == NFS_DATA_SYNC)
  1294. end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
  1295. /* Note: if the write is unstable, don't set end_offs until commit */
  1296. ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
  1297. /* zero out fattr since we don't care DS attr at all */
  1298. hdr->fattr.valid = 0;
  1299. if (task->tk_status >= 0)
  1300. nfs_writeback_update_inode(hdr);
  1301. return 0;
  1302. }
  1303. static int ff_layout_commit_done_cb(struct rpc_task *task,
  1304. struct nfs_commit_data *data)
  1305. {
  1306. int err;
  1307. if (task->tk_status < 0) {
  1308. ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
  1309. data->args.offset, data->args.count,
  1310. &data->res.op_status, OP_COMMIT,
  1311. task->tk_status);
  1312. trace_ff_layout_commit_error(data);
  1313. }
  1314. err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
  1315. data->lseg, data->ds_commit_index);
  1316. trace_nfs4_pnfs_commit_ds(data, err);
  1317. switch (err) {
  1318. case -NFS4ERR_RESET_TO_PNFS:
  1319. pnfs_generic_prepare_to_resend_writes(data);
  1320. return -EAGAIN;
  1321. case -NFS4ERR_RESET_TO_MDS:
  1322. pnfs_generic_prepare_to_resend_writes(data);
  1323. return -EAGAIN;
  1324. case -EAGAIN:
  1325. rpc_restart_call_prepare(task);
  1326. return -EAGAIN;
  1327. }
  1328. ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
  1329. return 0;
  1330. }
  1331. static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
  1332. struct nfs_pgio_header *hdr)
  1333. {
  1334. if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
  1335. return;
  1336. nfs4_ff_layout_stat_io_start_write(hdr->inode,
  1337. FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
  1338. hdr->args.count,
  1339. task->tk_start);
  1340. }
  1341. static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
  1342. struct nfs_pgio_header *hdr)
  1343. {
  1344. if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
  1345. return;
  1346. nfs4_ff_layout_stat_io_end_write(task,
  1347. FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
  1348. hdr->args.count, hdr->res.count,
  1349. hdr->res.verf->committed);
  1350. set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
  1351. }
  1352. static int ff_layout_write_prepare_common(struct rpc_task *task,
  1353. struct nfs_pgio_header *hdr)
  1354. {
  1355. if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
  1356. rpc_exit(task, -EIO);
  1357. return -EIO;
  1358. }
  1359. if (!pnfs_is_valid_lseg(hdr->lseg)) {
  1360. rpc_exit(task, -EAGAIN);
  1361. return -EAGAIN;
  1362. }
  1363. ff_layout_write_record_layoutstats_start(task, hdr);
  1364. return 0;
  1365. }
  1366. static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
  1367. {
  1368. struct nfs_pgio_header *hdr = data;
  1369. if (ff_layout_write_prepare_common(task, hdr))
  1370. return;
  1371. rpc_call_start(task);
  1372. }
  1373. static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
  1374. {
  1375. struct nfs_pgio_header *hdr = data;
  1376. if (nfs4_setup_sequence(hdr->ds_clp,
  1377. &hdr->args.seq_args,
  1378. &hdr->res.seq_res,
  1379. task))
  1380. return;
  1381. ff_layout_write_prepare_common(task, hdr);
  1382. }
  1383. static void ff_layout_write_call_done(struct rpc_task *task, void *data)
  1384. {
  1385. struct nfs_pgio_header *hdr = data;
  1386. if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
  1387. task->tk_status == 0) {
  1388. nfs4_sequence_done(task, &hdr->res.seq_res);
  1389. return;
  1390. }
  1391. /* Note this may cause RPC to be resent */
  1392. hdr->mds_ops->rpc_call_done(task, hdr);
  1393. }
  1394. static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
  1395. {
  1396. struct nfs_pgio_header *hdr = data;
  1397. ff_layout_write_record_layoutstats_done(task, hdr);
  1398. rpc_count_iostats_metrics(task,
  1399. &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
  1400. }
  1401. static void ff_layout_write_release(void *data)
  1402. {
  1403. struct nfs_pgio_header *hdr = data;
  1404. ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
  1405. if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
  1406. ff_layout_send_layouterror(hdr->lseg);
  1407. ff_layout_reset_write(hdr, true);
  1408. } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
  1409. ff_layout_reset_write(hdr, false);
  1410. pnfs_generic_rw_release(data);
  1411. }
  1412. static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
  1413. struct nfs_commit_data *cdata)
  1414. {
  1415. if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
  1416. return;
  1417. nfs4_ff_layout_stat_io_start_write(cdata->inode,
  1418. FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
  1419. 0, task->tk_start);
  1420. }
  1421. static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
  1422. struct nfs_commit_data *cdata)
  1423. {
  1424. struct nfs_page *req;
  1425. __u64 count = 0;
  1426. if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
  1427. return;
  1428. if (task->tk_status == 0) {
  1429. list_for_each_entry(req, &cdata->pages, wb_list)
  1430. count += req->wb_bytes;
  1431. }
  1432. nfs4_ff_layout_stat_io_end_write(task,
  1433. FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
  1434. count, count, NFS_FILE_SYNC);
  1435. set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
  1436. }
  1437. static int ff_layout_commit_prepare_common(struct rpc_task *task,
  1438. struct nfs_commit_data *cdata)
  1439. {
  1440. if (!pnfs_is_valid_lseg(cdata->lseg)) {
  1441. rpc_exit(task, -EAGAIN);
  1442. return -EAGAIN;
  1443. }
  1444. ff_layout_commit_record_layoutstats_start(task, cdata);
  1445. return 0;
  1446. }
  1447. static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
  1448. {
  1449. if (ff_layout_commit_prepare_common(task, data))
  1450. return;
  1451. rpc_call_start(task);
  1452. }
  1453. static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
  1454. {
  1455. struct nfs_commit_data *wdata = data;
  1456. if (nfs4_setup_sequence(wdata->ds_clp,
  1457. &wdata->args.seq_args,
  1458. &wdata->res.seq_res,
  1459. task))
  1460. return;
  1461. ff_layout_commit_prepare_common(task, data);
  1462. }
  1463. static void ff_layout_commit_done(struct rpc_task *task, void *data)
  1464. {
  1465. pnfs_generic_write_commit_done(task, data);
  1466. }
  1467. static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
  1468. {
  1469. struct nfs_commit_data *cdata = data;
  1470. ff_layout_commit_record_layoutstats_done(task, cdata);
  1471. rpc_count_iostats_metrics(task,
  1472. &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
  1473. }
  1474. static void ff_layout_commit_release(void *data)
  1475. {
  1476. struct nfs_commit_data *cdata = data;
  1477. ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
  1478. pnfs_generic_commit_release(data);
  1479. }
  1480. static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
  1481. .rpc_call_prepare = ff_layout_read_prepare_v3,
  1482. .rpc_call_done = ff_layout_read_call_done,
  1483. .rpc_count_stats = ff_layout_read_count_stats,
  1484. .rpc_release = ff_layout_read_release,
  1485. };
  1486. static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
  1487. .rpc_call_prepare = ff_layout_read_prepare_v4,
  1488. .rpc_call_done = ff_layout_read_call_done,
  1489. .rpc_count_stats = ff_layout_read_count_stats,
  1490. .rpc_release = ff_layout_read_release,
  1491. };
  1492. static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
  1493. .rpc_call_prepare = ff_layout_write_prepare_v3,
  1494. .rpc_call_done = ff_layout_write_call_done,
  1495. .rpc_count_stats = ff_layout_write_count_stats,
  1496. .rpc_release = ff_layout_write_release,
  1497. };
  1498. static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
  1499. .rpc_call_prepare = ff_layout_write_prepare_v4,
  1500. .rpc_call_done = ff_layout_write_call_done,
  1501. .rpc_count_stats = ff_layout_write_count_stats,
  1502. .rpc_release = ff_layout_write_release,
  1503. };
  1504. static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
  1505. .rpc_call_prepare = ff_layout_commit_prepare_v3,
  1506. .rpc_call_done = ff_layout_commit_done,
  1507. .rpc_count_stats = ff_layout_commit_count_stats,
  1508. .rpc_release = ff_layout_commit_release,
  1509. };
  1510. static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
  1511. .rpc_call_prepare = ff_layout_commit_prepare_v4,
  1512. .rpc_call_done = ff_layout_commit_done,
  1513. .rpc_count_stats = ff_layout_commit_count_stats,
  1514. .rpc_release = ff_layout_commit_release,
  1515. };
  1516. static enum pnfs_try_status
  1517. ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
  1518. {
  1519. struct pnfs_layout_segment *lseg = hdr->lseg;
  1520. struct nfs4_pnfs_ds *ds;
  1521. struct rpc_clnt *ds_clnt;
  1522. struct nfs4_ff_layout_mirror *mirror;
  1523. const struct cred *ds_cred;
  1524. loff_t offset = hdr->args.offset;
  1525. u32 idx = hdr->pgio_mirror_idx;
  1526. int vers;
  1527. struct nfs_fh *fh;
  1528. dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
  1529. __func__, hdr->inode->i_ino,
  1530. hdr->args.pgbase, (size_t)hdr->args.count, offset);
  1531. mirror = FF_LAYOUT_COMP(lseg, idx);
  1532. ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
  1533. if (!ds)
  1534. goto out_failed;
  1535. ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
  1536. hdr->inode);
  1537. if (IS_ERR(ds_clnt))
  1538. goto out_failed;
  1539. ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
  1540. if (!ds_cred)
  1541. goto out_failed;
  1542. vers = nfs4_ff_layout_ds_version(mirror);
  1543. dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
  1544. ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
  1545. hdr->pgio_done_cb = ff_layout_read_done_cb;
  1546. refcount_inc(&ds->ds_clp->cl_count);
  1547. hdr->ds_clp = ds->ds_clp;
  1548. fh = nfs4_ff_layout_select_ds_fh(mirror);
  1549. if (fh)
  1550. hdr->args.fh = fh;
  1551. nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
  1552. /*
  1553. * Note that if we ever decide to split across DSes,
  1554. * then we may need to handle dense-like offsets.
  1555. */
  1556. hdr->args.offset = offset;
  1557. hdr->mds_offset = offset;
  1558. /* Perform an asynchronous read to ds */
  1559. nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
  1560. vers == 3 ? &ff_layout_read_call_ops_v3 :
  1561. &ff_layout_read_call_ops_v4,
  1562. 0, RPC_TASK_SOFTCONN);
  1563. put_cred(ds_cred);
  1564. return PNFS_ATTEMPTED;
  1565. out_failed:
  1566. if (ff_layout_avoid_mds_available_ds(lseg))
  1567. return PNFS_TRY_AGAIN;
  1568. trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
  1569. hdr->args.offset, hdr->args.count,
  1570. IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
  1571. return PNFS_NOT_ATTEMPTED;
  1572. }
  1573. /* Perform async writes. */
  1574. static enum pnfs_try_status
  1575. ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
  1576. {
  1577. struct pnfs_layout_segment *lseg = hdr->lseg;
  1578. struct nfs4_pnfs_ds *ds;
  1579. struct rpc_clnt *ds_clnt;
  1580. struct nfs4_ff_layout_mirror *mirror;
  1581. const struct cred *ds_cred;
  1582. loff_t offset = hdr->args.offset;
  1583. int vers;
  1584. struct nfs_fh *fh;
  1585. u32 idx = hdr->pgio_mirror_idx;
  1586. mirror = FF_LAYOUT_COMP(lseg, idx);
  1587. ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
  1588. if (!ds)
  1589. goto out_failed;
  1590. ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
  1591. hdr->inode);
  1592. if (IS_ERR(ds_clnt))
  1593. goto out_failed;
  1594. ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
  1595. if (!ds_cred)
  1596. goto out_failed;
  1597. vers = nfs4_ff_layout_ds_version(mirror);
  1598. dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
  1599. __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
  1600. offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
  1601. vers);
  1602. hdr->pgio_done_cb = ff_layout_write_done_cb;
  1603. refcount_inc(&ds->ds_clp->cl_count);
  1604. hdr->ds_clp = ds->ds_clp;
  1605. hdr->ds_commit_idx = idx;
  1606. fh = nfs4_ff_layout_select_ds_fh(mirror);
  1607. if (fh)
  1608. hdr->args.fh = fh;
  1609. nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
  1610. /*
  1611. * Note that if we ever decide to split across DSes,
  1612. * then we may need to handle dense-like offsets.
  1613. */
  1614. hdr->args.offset = offset;
  1615. /* Perform an asynchronous write */
  1616. nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
  1617. vers == 3 ? &ff_layout_write_call_ops_v3 :
  1618. &ff_layout_write_call_ops_v4,
  1619. sync, RPC_TASK_SOFTCONN);
  1620. put_cred(ds_cred);
  1621. return PNFS_ATTEMPTED;
  1622. out_failed:
  1623. if (ff_layout_avoid_mds_available_ds(lseg))
  1624. return PNFS_TRY_AGAIN;
  1625. trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
  1626. hdr->args.offset, hdr->args.count,
  1627. IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
  1628. return PNFS_NOT_ATTEMPTED;
  1629. }
  1630. static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
  1631. {
  1632. return i;
  1633. }
  1634. static struct nfs_fh *
  1635. select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
  1636. {
  1637. struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
  1638. /* FIXME: Assume that there is only one NFS version available
  1639. * for the DS.
  1640. */
  1641. return &flseg->mirror_array[i]->fh_versions[0];
  1642. }
  1643. static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
  1644. {
  1645. struct pnfs_layout_segment *lseg = data->lseg;
  1646. struct nfs4_pnfs_ds *ds;
  1647. struct rpc_clnt *ds_clnt;
  1648. struct nfs4_ff_layout_mirror *mirror;
  1649. const struct cred *ds_cred;
  1650. u32 idx;
  1651. int vers, ret;
  1652. struct nfs_fh *fh;
  1653. if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
  1654. test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
  1655. goto out_err;
  1656. idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
  1657. mirror = FF_LAYOUT_COMP(lseg, idx);
  1658. ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
  1659. if (!ds)
  1660. goto out_err;
  1661. ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
  1662. data->inode);
  1663. if (IS_ERR(ds_clnt))
  1664. goto out_err;
  1665. ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
  1666. if (!ds_cred)
  1667. goto out_err;
  1668. vers = nfs4_ff_layout_ds_version(mirror);
  1669. dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
  1670. data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
  1671. vers);
  1672. data->commit_done_cb = ff_layout_commit_done_cb;
  1673. data->cred = ds_cred;
  1674. refcount_inc(&ds->ds_clp->cl_count);
  1675. data->ds_clp = ds->ds_clp;
  1676. fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
  1677. if (fh)
  1678. data->args.fh = fh;
  1679. ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
  1680. vers == 3 ? &ff_layout_commit_call_ops_v3 :
  1681. &ff_layout_commit_call_ops_v4,
  1682. how, RPC_TASK_SOFTCONN);
  1683. put_cred(ds_cred);
  1684. return ret;
  1685. out_err:
  1686. pnfs_generic_prepare_to_resend_writes(data);
  1687. pnfs_generic_commit_release(data);
  1688. return -EAGAIN;
  1689. }
  1690. static int
  1691. ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
  1692. int how, struct nfs_commit_info *cinfo)
  1693. {
  1694. return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
  1695. ff_layout_initiate_commit);
  1696. }
  1697. static bool ff_layout_match_rw(const struct rpc_task *task,
  1698. const struct nfs_pgio_header *hdr,
  1699. const struct pnfs_layout_segment *lseg)
  1700. {
  1701. return hdr->lseg == lseg;
  1702. }
  1703. static bool ff_layout_match_commit(const struct rpc_task *task,
  1704. const struct nfs_commit_data *cdata,
  1705. const struct pnfs_layout_segment *lseg)
  1706. {
  1707. return cdata->lseg == lseg;
  1708. }
  1709. static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
  1710. {
  1711. const struct rpc_call_ops *ops = task->tk_ops;
  1712. if (ops == &ff_layout_read_call_ops_v3 ||
  1713. ops == &ff_layout_read_call_ops_v4 ||
  1714. ops == &ff_layout_write_call_ops_v3 ||
  1715. ops == &ff_layout_write_call_ops_v4)
  1716. return ff_layout_match_rw(task, task->tk_calldata, data);
  1717. if (ops == &ff_layout_commit_call_ops_v3 ||
  1718. ops == &ff_layout_commit_call_ops_v4)
  1719. return ff_layout_match_commit(task, task->tk_calldata, data);
  1720. return false;
  1721. }
  1722. static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
  1723. {
  1724. struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
  1725. struct nfs4_ff_layout_mirror *mirror;
  1726. struct nfs4_ff_layout_ds *mirror_ds;
  1727. struct nfs4_pnfs_ds *ds;
  1728. struct nfs_client *ds_clp;
  1729. struct rpc_clnt *clnt;
  1730. u32 idx;
  1731. for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
  1732. mirror = flseg->mirror_array[idx];
  1733. mirror_ds = mirror->mirror_ds;
  1734. if (!mirror_ds)
  1735. continue;
  1736. ds = mirror->mirror_ds->ds;
  1737. if (!ds)
  1738. continue;
  1739. ds_clp = ds->ds_clp;
  1740. if (!ds_clp)
  1741. continue;
  1742. clnt = ds_clp->cl_rpcclient;
  1743. if (!clnt)
  1744. continue;
  1745. if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
  1746. continue;
  1747. rpc_clnt_disconnect(clnt);
  1748. }
  1749. }
  1750. static struct pnfs_ds_commit_info *
  1751. ff_layout_get_ds_info(struct inode *inode)
  1752. {
  1753. struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
  1754. if (layout == NULL)
  1755. return NULL;
  1756. return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
  1757. }
  1758. static void
  1759. ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
  1760. struct pnfs_layout_segment *lseg)
  1761. {
  1762. struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
  1763. struct inode *inode = lseg->pls_layout->plh_inode;
  1764. struct pnfs_commit_array *array, *new;
  1765. new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
  1766. nfs_io_gfp_mask());
  1767. if (new) {
  1768. spin_lock(&inode->i_lock);
  1769. array = pnfs_add_commit_array(fl_cinfo, new, lseg);
  1770. spin_unlock(&inode->i_lock);
  1771. if (array != new)
  1772. pnfs_free_commit_array(new);
  1773. }
  1774. }
  1775. static void
  1776. ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
  1777. struct inode *inode)
  1778. {
  1779. spin_lock(&inode->i_lock);
  1780. pnfs_generic_ds_cinfo_destroy(fl_cinfo);
  1781. spin_unlock(&inode->i_lock);
  1782. }
  1783. static void
  1784. ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
  1785. {
  1786. nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
  1787. id_node));
  1788. }
  1789. static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
  1790. const struct nfs4_layoutreturn_args *args,
  1791. const struct nfs4_flexfile_layoutreturn_args *ff_args)
  1792. {
  1793. __be32 *start;
  1794. start = xdr_reserve_space(xdr, 4);
  1795. if (unlikely(!start))
  1796. return -E2BIG;
  1797. *start = cpu_to_be32(ff_args->num_errors);
  1798. /* This assume we always return _ALL_ layouts */
  1799. return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
  1800. }
  1801. static void
  1802. encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
  1803. {
  1804. WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
  1805. }
  1806. static void
  1807. ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
  1808. const nfs4_stateid *stateid,
  1809. const struct nfs42_layoutstat_devinfo *devinfo)
  1810. {
  1811. __be32 *p;
  1812. p = xdr_reserve_space(xdr, 8 + 8);
  1813. p = xdr_encode_hyper(p, devinfo->offset);
  1814. p = xdr_encode_hyper(p, devinfo->length);
  1815. encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
  1816. p = xdr_reserve_space(xdr, 4*8);
  1817. p = xdr_encode_hyper(p, devinfo->read_count);
  1818. p = xdr_encode_hyper(p, devinfo->read_bytes);
  1819. p = xdr_encode_hyper(p, devinfo->write_count);
  1820. p = xdr_encode_hyper(p, devinfo->write_bytes);
  1821. encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
  1822. }
  1823. static void
  1824. ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
  1825. const nfs4_stateid *stateid,
  1826. const struct nfs42_layoutstat_devinfo *devinfo)
  1827. {
  1828. ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
  1829. ff_layout_encode_ff_layoutupdate(xdr, devinfo,
  1830. devinfo->ld_private.data);
  1831. }
  1832. /* report nothing for now */
  1833. static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
  1834. const struct nfs4_layoutreturn_args *args,
  1835. struct nfs4_flexfile_layoutreturn_args *ff_args)
  1836. {
  1837. __be32 *p;
  1838. int i;
  1839. p = xdr_reserve_space(xdr, 4);
  1840. *p = cpu_to_be32(ff_args->num_dev);
  1841. for (i = 0; i < ff_args->num_dev; i++)
  1842. ff_layout_encode_ff_iostat(xdr,
  1843. &args->layout->plh_stateid,
  1844. &ff_args->devinfo[i]);
  1845. }
  1846. static void
  1847. ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
  1848. unsigned int num_entries)
  1849. {
  1850. unsigned int i;
  1851. for (i = 0; i < num_entries; i++) {
  1852. if (!devinfo[i].ld_private.ops)
  1853. continue;
  1854. if (!devinfo[i].ld_private.ops->free)
  1855. continue;
  1856. devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
  1857. }
  1858. }
  1859. static struct nfs4_deviceid_node *
  1860. ff_layout_alloc_deviceid_node(struct nfs_server *server,
  1861. struct pnfs_device *pdev, gfp_t gfp_flags)
  1862. {
  1863. struct nfs4_ff_layout_ds *dsaddr;
  1864. dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
  1865. if (!dsaddr)
  1866. return NULL;
  1867. return &dsaddr->id_node;
  1868. }
  1869. static void
  1870. ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
  1871. const void *voidargs,
  1872. const struct nfs4_xdr_opaque_data *ff_opaque)
  1873. {
  1874. const struct nfs4_layoutreturn_args *args = voidargs;
  1875. struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
  1876. struct xdr_buf tmp_buf = {
  1877. .head = {
  1878. [0] = {
  1879. .iov_base = page_address(ff_args->pages[0]),
  1880. },
  1881. },
  1882. .buflen = PAGE_SIZE,
  1883. };
  1884. struct xdr_stream tmp_xdr;
  1885. __be32 *start;
  1886. dprintk("%s: Begin\n", __func__);
  1887. xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
  1888. ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
  1889. ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
  1890. start = xdr_reserve_space(xdr, 4);
  1891. *start = cpu_to_be32(tmp_buf.len);
  1892. xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
  1893. dprintk("%s: Return\n", __func__);
  1894. }
  1895. static void
  1896. ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
  1897. {
  1898. struct nfs4_flexfile_layoutreturn_args *ff_args;
  1899. if (!args->data)
  1900. return;
  1901. ff_args = args->data;
  1902. args->data = NULL;
  1903. ff_layout_free_ds_ioerr(&ff_args->errors);
  1904. ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
  1905. put_page(ff_args->pages[0]);
  1906. kfree(ff_args);
  1907. }
  1908. static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
  1909. .encode = ff_layout_encode_layoutreturn,
  1910. .free = ff_layout_free_layoutreturn,
  1911. };
  1912. static int
  1913. ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
  1914. {
  1915. struct nfs4_flexfile_layoutreturn_args *ff_args;
  1916. struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
  1917. ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
  1918. if (!ff_args)
  1919. goto out_nomem;
  1920. ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
  1921. if (!ff_args->pages[0])
  1922. goto out_nomem_free;
  1923. INIT_LIST_HEAD(&ff_args->errors);
  1924. ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
  1925. &args->range, &ff_args->errors,
  1926. FF_LAYOUTRETURN_MAXERR);
  1927. spin_lock(&args->inode->i_lock);
  1928. ff_args->num_dev = ff_layout_mirror_prepare_stats(
  1929. &ff_layout->generic_hdr, &ff_args->devinfo[0],
  1930. ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
  1931. spin_unlock(&args->inode->i_lock);
  1932. args->ld_private->ops = &layoutreturn_ops;
  1933. args->ld_private->data = ff_args;
  1934. return 0;
  1935. out_nomem_free:
  1936. kfree(ff_args);
  1937. out_nomem:
  1938. return -ENOMEM;
  1939. }
  1940. #ifdef CONFIG_NFS_V4_2
  1941. void
  1942. ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
  1943. {
  1944. struct pnfs_layout_hdr *lo = lseg->pls_layout;
  1945. struct nfs42_layout_error *errors;
  1946. LIST_HEAD(head);
  1947. if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
  1948. return;
  1949. ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
  1950. if (list_empty(&head))
  1951. return;
  1952. errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
  1953. nfs_io_gfp_mask());
  1954. if (errors != NULL) {
  1955. const struct nfs4_ff_layout_ds_err *pos;
  1956. size_t n = 0;
  1957. list_for_each_entry(pos, &head, list) {
  1958. errors[n].offset = pos->offset;
  1959. errors[n].length = pos->length;
  1960. nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
  1961. errors[n].errors[0].dev_id = pos->deviceid;
  1962. errors[n].errors[0].status = pos->status;
  1963. errors[n].errors[0].opnum = pos->opnum;
  1964. n++;
  1965. if (!list_is_last(&pos->list, &head) &&
  1966. n < NFS42_LAYOUTERROR_MAX)
  1967. continue;
  1968. if (nfs42_proc_layouterror(lseg, errors, n) < 0)
  1969. break;
  1970. n = 0;
  1971. }
  1972. kfree(errors);
  1973. }
  1974. ff_layout_free_ds_ioerr(&head);
  1975. }
  1976. #else
  1977. void
  1978. ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
  1979. {
  1980. }
  1981. #endif
  1982. static int
  1983. ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
  1984. {
  1985. const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
  1986. return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
  1987. }
  1988. static size_t
  1989. ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
  1990. const int buflen)
  1991. {
  1992. const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
  1993. const struct in6_addr *addr = &sin6->sin6_addr;
  1994. /*
  1995. * RFC 4291, Section 2.2.2
  1996. *
  1997. * Shorthanded ANY address
  1998. */
  1999. if (ipv6_addr_any(addr))
  2000. return snprintf(buf, buflen, "::");
  2001. /*
  2002. * RFC 4291, Section 2.2.2
  2003. *
  2004. * Shorthanded loopback address
  2005. */
  2006. if (ipv6_addr_loopback(addr))
  2007. return snprintf(buf, buflen, "::1");
  2008. /*
  2009. * RFC 4291, Section 2.2.3
  2010. *
  2011. * Special presentation address format for mapped v4
  2012. * addresses.
  2013. */
  2014. if (ipv6_addr_v4mapped(addr))
  2015. return snprintf(buf, buflen, "::ffff:%pI4",
  2016. &addr->s6_addr32[3]);
  2017. /*
  2018. * RFC 4291, Section 2.2.1
  2019. */
  2020. return snprintf(buf, buflen, "%pI6c", addr);
  2021. }
  2022. /* Derived from rpc_sockaddr2uaddr */
  2023. static void
  2024. ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
  2025. {
  2026. struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
  2027. char portbuf[RPCBIND_MAXUADDRPLEN];
  2028. char addrbuf[RPCBIND_MAXUADDRLEN];
  2029. unsigned short port;
  2030. int len, netid_len;
  2031. __be32 *p;
  2032. switch (sap->sa_family) {
  2033. case AF_INET:
  2034. if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
  2035. return;
  2036. port = ntohs(((struct sockaddr_in *)sap)->sin_port);
  2037. break;
  2038. case AF_INET6:
  2039. if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
  2040. return;
  2041. port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
  2042. break;
  2043. default:
  2044. WARN_ON_ONCE(1);
  2045. return;
  2046. }
  2047. snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
  2048. len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
  2049. netid_len = strlen(da->da_netid);
  2050. p = xdr_reserve_space(xdr, 4 + netid_len);
  2051. xdr_encode_opaque(p, da->da_netid, netid_len);
  2052. p = xdr_reserve_space(xdr, 4 + len);
  2053. xdr_encode_opaque(p, addrbuf, len);
  2054. }
  2055. static void
  2056. ff_layout_encode_nfstime(struct xdr_stream *xdr,
  2057. ktime_t t)
  2058. {
  2059. struct timespec64 ts;
  2060. __be32 *p;
  2061. p = xdr_reserve_space(xdr, 12);
  2062. ts = ktime_to_timespec64(t);
  2063. p = xdr_encode_hyper(p, ts.tv_sec);
  2064. *p++ = cpu_to_be32(ts.tv_nsec);
  2065. }
  2066. static void
  2067. ff_layout_encode_io_latency(struct xdr_stream *xdr,
  2068. struct nfs4_ff_io_stat *stat)
  2069. {
  2070. __be32 *p;
  2071. p = xdr_reserve_space(xdr, 5 * 8);
  2072. p = xdr_encode_hyper(p, stat->ops_requested);
  2073. p = xdr_encode_hyper(p, stat->bytes_requested);
  2074. p = xdr_encode_hyper(p, stat->ops_completed);
  2075. p = xdr_encode_hyper(p, stat->bytes_completed);
  2076. p = xdr_encode_hyper(p, stat->bytes_not_delivered);
  2077. ff_layout_encode_nfstime(xdr, stat->total_busy_time);
  2078. ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
  2079. }
  2080. static void
  2081. ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
  2082. const struct nfs42_layoutstat_devinfo *devinfo,
  2083. struct nfs4_ff_layout_mirror *mirror)
  2084. {
  2085. struct nfs4_pnfs_ds_addr *da;
  2086. struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
  2087. struct nfs_fh *fh = &mirror->fh_versions[0];
  2088. __be32 *p;
  2089. da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
  2090. dprintk("%s: DS %s: encoding address %s\n",
  2091. __func__, ds->ds_remotestr, da->da_remotestr);
  2092. /* netaddr4 */
  2093. ff_layout_encode_netaddr(xdr, da);
  2094. /* nfs_fh4 */
  2095. p = xdr_reserve_space(xdr, 4 + fh->size);
  2096. xdr_encode_opaque(p, fh->data, fh->size);
  2097. /* ff_io_latency4 read */
  2098. spin_lock(&mirror->lock);
  2099. ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
  2100. /* ff_io_latency4 write */
  2101. ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
  2102. spin_unlock(&mirror->lock);
  2103. /* nfstime4 */
  2104. ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
  2105. /* bool */
  2106. p = xdr_reserve_space(xdr, 4);
  2107. *p = cpu_to_be32(false);
  2108. }
  2109. static void
  2110. ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
  2111. const struct nfs4_xdr_opaque_data *opaque)
  2112. {
  2113. struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
  2114. struct nfs42_layoutstat_devinfo, ld_private);
  2115. __be32 *start;
  2116. /* layoutupdate length */
  2117. start = xdr_reserve_space(xdr, 4);
  2118. ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
  2119. *start = cpu_to_be32((xdr->p - start - 1) * 4);
  2120. }
  2121. static void
  2122. ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
  2123. {
  2124. struct nfs4_ff_layout_mirror *mirror = opaque->data;
  2125. ff_layout_put_mirror(mirror);
  2126. }
  2127. static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
  2128. .encode = ff_layout_encode_layoutstats,
  2129. .free = ff_layout_free_layoutstats,
  2130. };
  2131. static int
  2132. ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
  2133. struct nfs42_layoutstat_devinfo *devinfo,
  2134. int dev_limit, enum nfs4_ff_op_type type)
  2135. {
  2136. struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
  2137. struct nfs4_ff_layout_mirror *mirror;
  2138. struct nfs4_deviceid_node *dev;
  2139. int i = 0;
  2140. list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
  2141. if (i >= dev_limit)
  2142. break;
  2143. if (IS_ERR_OR_NULL(mirror->mirror_ds))
  2144. continue;
  2145. if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
  2146. &mirror->flags) &&
  2147. type != NFS4_FF_OP_LAYOUTRETURN)
  2148. continue;
  2149. /* mirror refcount put in cleanup_layoutstats */
  2150. if (!refcount_inc_not_zero(&mirror->ref))
  2151. continue;
  2152. dev = &mirror->mirror_ds->id_node;
  2153. memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
  2154. devinfo->offset = 0;
  2155. devinfo->length = NFS4_MAX_UINT64;
  2156. spin_lock(&mirror->lock);
  2157. devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
  2158. devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
  2159. devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
  2160. devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
  2161. spin_unlock(&mirror->lock);
  2162. devinfo->layout_type = LAYOUT_FLEX_FILES;
  2163. devinfo->ld_private.ops = &layoutstat_ops;
  2164. devinfo->ld_private.data = mirror;
  2165. devinfo++;
  2166. i++;
  2167. }
  2168. return i;
  2169. }
  2170. static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
  2171. {
  2172. struct pnfs_layout_hdr *lo;
  2173. struct nfs4_flexfile_layout *ff_layout;
  2174. const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
  2175. /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
  2176. args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
  2177. nfs_io_gfp_mask());
  2178. if (!args->devinfo)
  2179. return -ENOMEM;
  2180. spin_lock(&args->inode->i_lock);
  2181. lo = NFS_I(args->inode)->layout;
  2182. if (lo && pnfs_layout_is_valid(lo)) {
  2183. ff_layout = FF_LAYOUT_FROM_HDR(lo);
  2184. args->num_dev = ff_layout_mirror_prepare_stats(
  2185. &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
  2186. NFS4_FF_OP_LAYOUTSTATS);
  2187. } else
  2188. args->num_dev = 0;
  2189. spin_unlock(&args->inode->i_lock);
  2190. if (!args->num_dev) {
  2191. kfree(args->devinfo);
  2192. args->devinfo = NULL;
  2193. return -ENOENT;
  2194. }
  2195. return 0;
  2196. }
  2197. static int
  2198. ff_layout_set_layoutdriver(struct nfs_server *server,
  2199. const struct nfs_fh *dummy)
  2200. {
  2201. #if IS_ENABLED(CONFIG_NFS_V4_2)
  2202. server->caps |= NFS_CAP_LAYOUTSTATS;
  2203. #endif
  2204. return 0;
  2205. }
  2206. static const struct pnfs_commit_ops ff_layout_commit_ops = {
  2207. .setup_ds_info = ff_layout_setup_ds_info,
  2208. .release_ds_info = ff_layout_release_ds_info,
  2209. .mark_request_commit = pnfs_layout_mark_request_commit,
  2210. .clear_request_commit = pnfs_generic_clear_request_commit,
  2211. .scan_commit_lists = pnfs_generic_scan_commit_lists,
  2212. .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
  2213. .commit_pagelist = ff_layout_commit_pagelist,
  2214. };
  2215. static struct pnfs_layoutdriver_type flexfilelayout_type = {
  2216. .id = LAYOUT_FLEX_FILES,
  2217. .name = "LAYOUT_FLEX_FILES",
  2218. .owner = THIS_MODULE,
  2219. .flags = PNFS_LAYOUTGET_ON_OPEN,
  2220. .max_layoutget_response = 4096, /* 1 page or so... */
  2221. .set_layoutdriver = ff_layout_set_layoutdriver,
  2222. .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
  2223. .free_layout_hdr = ff_layout_free_layout_hdr,
  2224. .alloc_lseg = ff_layout_alloc_lseg,
  2225. .free_lseg = ff_layout_free_lseg,
  2226. .add_lseg = ff_layout_add_lseg,
  2227. .pg_read_ops = &ff_layout_pg_read_ops,
  2228. .pg_write_ops = &ff_layout_pg_write_ops,
  2229. .get_ds_info = ff_layout_get_ds_info,
  2230. .free_deviceid_node = ff_layout_free_deviceid_node,
  2231. .read_pagelist = ff_layout_read_pagelist,
  2232. .write_pagelist = ff_layout_write_pagelist,
  2233. .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
  2234. .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
  2235. .sync = pnfs_nfs_generic_sync,
  2236. .prepare_layoutstats = ff_layout_prepare_layoutstats,
  2237. .cancel_io = ff_layout_cancel_io,
  2238. };
  2239. static int __init nfs4flexfilelayout_init(void)
  2240. {
  2241. printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
  2242. __func__);
  2243. return pnfs_register_layoutdriver(&flexfilelayout_type);
  2244. }
  2245. static void __exit nfs4flexfilelayout_exit(void)
  2246. {
  2247. printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
  2248. __func__);
  2249. pnfs_unregister_layoutdriver(&flexfilelayout_type);
  2250. }
  2251. MODULE_ALIAS("nfs-layouttype4-4");
  2252. MODULE_LICENSE("GPL");
  2253. MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
  2254. module_init(nfs4flexfilelayout_init);
  2255. module_exit(nfs4flexfilelayout_exit);
  2256. module_param(io_maxretrans, ushort, 0644);
  2257. MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client "
  2258. "retries an I/O request before returning an error. ");