nfs42proc.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2014 Anna Schumaker <[email protected]>
  4. */
  5. #include <linux/fs.h>
  6. #include <linux/sunrpc/addr.h>
  7. #include <linux/sunrpc/sched.h>
  8. #include <linux/nfs.h>
  9. #include <linux/nfs3.h>
  10. #include <linux/nfs4.h>
  11. #include <linux/nfs_xdr.h>
  12. #include <linux/nfs_fs.h>
  13. #include "nfs4_fs.h"
  14. #include "nfs42.h"
  15. #include "iostat.h"
  16. #include "pnfs.h"
  17. #include "nfs4session.h"
  18. #include "internal.h"
  19. #include "delegation.h"
  20. #include "nfs4trace.h"
  21. #define NFSDBG_FACILITY NFSDBG_PROC
  22. static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
  23. static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
  24. {
  25. struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client;
  26. unsigned short port = 2049;
  27. rcu_read_lock();
  28. naddr->netid_len = scnprintf(naddr->netid,
  29. sizeof(naddr->netid), "%s",
  30. rpc_peeraddr2str(clp->cl_rpcclient,
  31. RPC_DISPLAY_NETID));
  32. naddr->addr_len = scnprintf(naddr->addr,
  33. sizeof(naddr->addr),
  34. "%s.%u.%u",
  35. rpc_peeraddr2str(clp->cl_rpcclient,
  36. RPC_DISPLAY_ADDR),
  37. port >> 8, port & 255);
  38. rcu_read_unlock();
  39. }
  40. static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
  41. struct nfs_lock_context *lock, loff_t offset, loff_t len)
  42. {
  43. struct inode *inode = file_inode(filep);
  44. struct nfs_server *server = NFS_SERVER(inode);
  45. u32 bitmask[NFS_BITMASK_SZ];
  46. struct nfs42_falloc_args args = {
  47. .falloc_fh = NFS_FH(inode),
  48. .falloc_offset = offset,
  49. .falloc_length = len,
  50. .falloc_bitmask = bitmask,
  51. };
  52. struct nfs42_falloc_res res = {
  53. .falloc_server = server,
  54. };
  55. int status;
  56. msg->rpc_argp = &args;
  57. msg->rpc_resp = &res;
  58. status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
  59. lock, FMODE_WRITE);
  60. if (status) {
  61. if (status == -EAGAIN)
  62. status = -NFS4ERR_BAD_STATEID;
  63. return status;
  64. }
  65. nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode,
  66. NFS_INO_INVALID_BLOCKS);
  67. res.falloc_fattr = nfs_alloc_fattr();
  68. if (!res.falloc_fattr)
  69. return -ENOMEM;
  70. status = nfs4_call_sync(server->client, server, msg,
  71. &args.seq_args, &res.seq_res, 0);
  72. if (status == 0) {
  73. if (nfs_should_remove_suid(inode)) {
  74. spin_lock(&inode->i_lock);
  75. nfs_set_cache_invalid(inode,
  76. NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
  77. spin_unlock(&inode->i_lock);
  78. }
  79. status = nfs_post_op_update_inode_force_wcc(inode,
  80. res.falloc_fattr);
  81. }
  82. if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
  83. trace_nfs4_fallocate(inode, &args, status);
  84. else
  85. trace_nfs4_deallocate(inode, &args, status);
  86. kfree(res.falloc_fattr);
  87. return status;
  88. }
  89. static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
  90. loff_t offset, loff_t len)
  91. {
  92. struct inode *inode = file_inode(filep);
  93. struct nfs_server *server = NFS_SERVER(inode);
  94. struct nfs4_exception exception = { };
  95. struct nfs_lock_context *lock;
  96. int err;
  97. lock = nfs_get_lock_context(nfs_file_open_context(filep));
  98. if (IS_ERR(lock))
  99. return PTR_ERR(lock);
  100. exception.inode = inode;
  101. exception.state = lock->open_context->state;
  102. err = nfs_sync_inode(inode);
  103. if (err)
  104. goto out;
  105. do {
  106. err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
  107. if (err == -ENOTSUPP) {
  108. err = -EOPNOTSUPP;
  109. break;
  110. }
  111. err = nfs4_handle_exception(server, err, &exception);
  112. } while (exception.retry);
  113. out:
  114. nfs_put_lock_context(lock);
  115. return err;
  116. }
  117. int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
  118. {
  119. struct rpc_message msg = {
  120. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
  121. };
  122. struct inode *inode = file_inode(filep);
  123. int err;
  124. if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
  125. return -EOPNOTSUPP;
  126. inode_lock(inode);
  127. err = nfs42_proc_fallocate(&msg, filep, offset, len);
  128. if (err == -EOPNOTSUPP)
  129. NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
  130. inode_unlock(inode);
  131. return err;
  132. }
  133. int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
  134. {
  135. struct rpc_message msg = {
  136. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
  137. };
  138. struct inode *inode = file_inode(filep);
  139. int err;
  140. if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
  141. return -EOPNOTSUPP;
  142. inode_lock(inode);
  143. err = nfs42_proc_fallocate(&msg, filep, offset, len);
  144. if (err == 0)
  145. truncate_pagecache_range(inode, offset, (offset + len) -1);
  146. if (err == -EOPNOTSUPP)
  147. NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
  148. inode_unlock(inode);
  149. return err;
  150. }
  151. static int handle_async_copy(struct nfs42_copy_res *res,
  152. struct nfs_server *dst_server,
  153. struct nfs_server *src_server,
  154. struct file *src,
  155. struct file *dst,
  156. nfs4_stateid *src_stateid,
  157. bool *restart)
  158. {
  159. struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
  160. int status = NFS4_OK;
  161. struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
  162. struct nfs_open_context *src_ctx = nfs_file_open_context(src);
  163. copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
  164. if (!copy)
  165. return -ENOMEM;
  166. spin_lock(&dst_server->nfs_client->cl_lock);
  167. list_for_each_entry(iter,
  168. &dst_server->nfs_client->pending_cb_stateids,
  169. copies) {
  170. if (memcmp(&res->write_res.stateid, &iter->stateid,
  171. NFS4_STATEID_SIZE))
  172. continue;
  173. tmp_copy = iter;
  174. list_del(&iter->copies);
  175. break;
  176. }
  177. if (tmp_copy) {
  178. spin_unlock(&dst_server->nfs_client->cl_lock);
  179. kfree(copy);
  180. copy = tmp_copy;
  181. goto out;
  182. }
  183. memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
  184. init_completion(&copy->completion);
  185. copy->parent_dst_state = dst_ctx->state;
  186. copy->parent_src_state = src_ctx->state;
  187. list_add_tail(&copy->copies, &dst_server->ss_copies);
  188. spin_unlock(&dst_server->nfs_client->cl_lock);
  189. if (dst_server != src_server) {
  190. spin_lock(&src_server->nfs_client->cl_lock);
  191. list_add_tail(&copy->src_copies, &src_server->ss_copies);
  192. spin_unlock(&src_server->nfs_client->cl_lock);
  193. }
  194. status = wait_for_completion_interruptible(&copy->completion);
  195. spin_lock(&dst_server->nfs_client->cl_lock);
  196. list_del_init(&copy->copies);
  197. spin_unlock(&dst_server->nfs_client->cl_lock);
  198. if (dst_server != src_server) {
  199. spin_lock(&src_server->nfs_client->cl_lock);
  200. list_del_init(&copy->src_copies);
  201. spin_unlock(&src_server->nfs_client->cl_lock);
  202. }
  203. if (status == -ERESTARTSYS) {
  204. goto out_cancel;
  205. } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
  206. status = -EAGAIN;
  207. *restart = true;
  208. goto out_cancel;
  209. }
  210. out:
  211. res->write_res.count = copy->count;
  212. memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
  213. status = -copy->error;
  214. out_free:
  215. kfree(copy);
  216. return status;
  217. out_cancel:
  218. nfs42_do_offload_cancel_async(dst, &copy->stateid);
  219. if (!nfs42_files_from_same_server(src, dst))
  220. nfs42_do_offload_cancel_async(src, src_stateid);
  221. goto out_free;
  222. }
  223. static int process_copy_commit(struct file *dst, loff_t pos_dst,
  224. struct nfs42_copy_res *res)
  225. {
  226. struct nfs_commitres cres;
  227. int status = -ENOMEM;
  228. cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL);
  229. if (!cres.verf)
  230. goto out;
  231. status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
  232. if (status)
  233. goto out_free;
  234. if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
  235. &cres.verf->verifier)) {
  236. dprintk("commit verf differs from copy verf\n");
  237. status = -EAGAIN;
  238. }
  239. out_free:
  240. kfree(cres.verf);
  241. out:
  242. return status;
  243. }
  244. /**
  245. * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
  246. * @inode: pointer to destination inode
  247. * @pos: destination offset
  248. * @len: copy length
  249. *
  250. * Punch a hole in the inode page cache, so that the NFS client will
  251. * know to retrieve new data.
  252. * Update the file size if necessary, and then mark the inode as having
  253. * invalid cached values for change attribute, ctime, mtime and space used.
  254. */
  255. static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
  256. {
  257. loff_t newsize = pos + len;
  258. loff_t end = newsize - 1;
  259. WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
  260. pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
  261. spin_lock(&inode->i_lock);
  262. if (newsize > i_size_read(inode))
  263. i_size_write(inode, newsize);
  264. nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
  265. NFS_INO_INVALID_CTIME |
  266. NFS_INO_INVALID_MTIME |
  267. NFS_INO_INVALID_BLOCKS);
  268. spin_unlock(&inode->i_lock);
  269. }
  270. static ssize_t _nfs42_proc_copy(struct file *src,
  271. struct nfs_lock_context *src_lock,
  272. struct file *dst,
  273. struct nfs_lock_context *dst_lock,
  274. struct nfs42_copy_args *args,
  275. struct nfs42_copy_res *res,
  276. struct nl4_server *nss,
  277. nfs4_stateid *cnr_stateid,
  278. bool *restart)
  279. {
  280. struct rpc_message msg = {
  281. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
  282. .rpc_argp = args,
  283. .rpc_resp = res,
  284. };
  285. struct inode *dst_inode = file_inode(dst);
  286. struct inode *src_inode = file_inode(src);
  287. struct nfs_server *dst_server = NFS_SERVER(dst_inode);
  288. struct nfs_server *src_server = NFS_SERVER(src_inode);
  289. loff_t pos_src = args->src_pos;
  290. loff_t pos_dst = args->dst_pos;
  291. size_t count = args->count;
  292. ssize_t status;
  293. if (nss) {
  294. args->cp_src = nss;
  295. nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
  296. } else {
  297. status = nfs4_set_rw_stateid(&args->src_stateid,
  298. src_lock->open_context, src_lock, FMODE_READ);
  299. if (status) {
  300. if (status == -EAGAIN)
  301. status = -NFS4ERR_BAD_STATEID;
  302. return status;
  303. }
  304. }
  305. status = nfs_filemap_write_and_wait_range(src->f_mapping,
  306. pos_src, pos_src + (loff_t)count - 1);
  307. if (status)
  308. return status;
  309. status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
  310. dst_lock, FMODE_WRITE);
  311. if (status) {
  312. if (status == -EAGAIN)
  313. status = -NFS4ERR_BAD_STATEID;
  314. return status;
  315. }
  316. status = nfs_sync_inode(dst_inode);
  317. if (status)
  318. return status;
  319. res->commit_res.verf = NULL;
  320. if (args->sync) {
  321. res->commit_res.verf =
  322. kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL);
  323. if (!res->commit_res.verf)
  324. return -ENOMEM;
  325. }
  326. set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
  327. &src_lock->open_context->state->flags);
  328. set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
  329. &dst_lock->open_context->state->flags);
  330. status = nfs4_call_sync(dst_server->client, dst_server, &msg,
  331. &args->seq_args, &res->seq_res, 0);
  332. trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status);
  333. if (status == -ENOTSUPP)
  334. dst_server->caps &= ~NFS_CAP_COPY;
  335. if (status)
  336. goto out;
  337. if (args->sync &&
  338. nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
  339. &res->commit_res.verf->verifier)) {
  340. status = -EAGAIN;
  341. goto out;
  342. }
  343. if (!res->synchronous) {
  344. status = handle_async_copy(res, dst_server, src_server, src,
  345. dst, &args->src_stateid, restart);
  346. if (status)
  347. goto out;
  348. }
  349. if ((!res->synchronous || !args->sync) &&
  350. res->write_res.verifier.committed != NFS_FILE_SYNC) {
  351. status = process_copy_commit(dst, pos_dst, res);
  352. if (status)
  353. goto out;
  354. }
  355. nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
  356. nfs_invalidate_atime(src_inode);
  357. status = res->write_res.count;
  358. out:
  359. if (args->sync)
  360. kfree(res->commit_res.verf);
  361. return status;
  362. }
  363. ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
  364. struct file *dst, loff_t pos_dst, size_t count,
  365. struct nl4_server *nss,
  366. nfs4_stateid *cnr_stateid, bool sync)
  367. {
  368. struct nfs_server *server = NFS_SERVER(file_inode(dst));
  369. struct nfs_lock_context *src_lock;
  370. struct nfs_lock_context *dst_lock;
  371. struct nfs42_copy_args args = {
  372. .src_fh = NFS_FH(file_inode(src)),
  373. .src_pos = pos_src,
  374. .dst_fh = NFS_FH(file_inode(dst)),
  375. .dst_pos = pos_dst,
  376. .count = count,
  377. .sync = sync,
  378. };
  379. struct nfs42_copy_res res;
  380. struct nfs4_exception src_exception = {
  381. .inode = file_inode(src),
  382. .stateid = &args.src_stateid,
  383. };
  384. struct nfs4_exception dst_exception = {
  385. .inode = file_inode(dst),
  386. .stateid = &args.dst_stateid,
  387. };
  388. ssize_t err, err2;
  389. bool restart = false;
  390. src_lock = nfs_get_lock_context(nfs_file_open_context(src));
  391. if (IS_ERR(src_lock))
  392. return PTR_ERR(src_lock);
  393. src_exception.state = src_lock->open_context->state;
  394. dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
  395. if (IS_ERR(dst_lock)) {
  396. err = PTR_ERR(dst_lock);
  397. goto out_put_src_lock;
  398. }
  399. dst_exception.state = dst_lock->open_context->state;
  400. do {
  401. inode_lock(file_inode(dst));
  402. err = _nfs42_proc_copy(src, src_lock,
  403. dst, dst_lock,
  404. &args, &res,
  405. nss, cnr_stateid, &restart);
  406. inode_unlock(file_inode(dst));
  407. if (err >= 0)
  408. break;
  409. if (err == -ENOTSUPP &&
  410. nfs42_files_from_same_server(src, dst)) {
  411. err = -EOPNOTSUPP;
  412. break;
  413. } else if (err == -EAGAIN) {
  414. if (!restart) {
  415. dst_exception.retry = 1;
  416. continue;
  417. }
  418. break;
  419. } else if (err == -NFS4ERR_OFFLOAD_NO_REQS &&
  420. args.sync != res.synchronous) {
  421. args.sync = res.synchronous;
  422. dst_exception.retry = 1;
  423. continue;
  424. } else if ((err == -ESTALE ||
  425. err == -NFS4ERR_OFFLOAD_DENIED ||
  426. err == -ENOTSUPP) &&
  427. !nfs42_files_from_same_server(src, dst)) {
  428. nfs42_do_offload_cancel_async(src, &args.src_stateid);
  429. err = -EOPNOTSUPP;
  430. break;
  431. }
  432. err2 = nfs4_handle_exception(server, err, &src_exception);
  433. err = nfs4_handle_exception(server, err, &dst_exception);
  434. if (!err)
  435. err = err2;
  436. } while (src_exception.retry || dst_exception.retry);
  437. nfs_put_lock_context(dst_lock);
  438. out_put_src_lock:
  439. nfs_put_lock_context(src_lock);
  440. return err;
  441. }
  442. struct nfs42_offloadcancel_data {
  443. struct nfs_server *seq_server;
  444. struct nfs42_offload_status_args args;
  445. struct nfs42_offload_status_res res;
  446. };
  447. static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata)
  448. {
  449. struct nfs42_offloadcancel_data *data = calldata;
  450. nfs4_setup_sequence(data->seq_server->nfs_client,
  451. &data->args.osa_seq_args,
  452. &data->res.osr_seq_res, task);
  453. }
  454. static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
  455. {
  456. struct nfs42_offloadcancel_data *data = calldata;
  457. trace_nfs4_offload_cancel(&data->args, task->tk_status);
  458. nfs41_sequence_done(task, &data->res.osr_seq_res);
  459. if (task->tk_status &&
  460. nfs4_async_handle_error(task, data->seq_server, NULL,
  461. NULL) == -EAGAIN)
  462. rpc_restart_call_prepare(task);
  463. }
  464. static void nfs42_free_offloadcancel_data(void *data)
  465. {
  466. kfree(data);
  467. }
  468. static const struct rpc_call_ops nfs42_offload_cancel_ops = {
  469. .rpc_call_prepare = nfs42_offload_cancel_prepare,
  470. .rpc_call_done = nfs42_offload_cancel_done,
  471. .rpc_release = nfs42_free_offloadcancel_data,
  472. };
  473. static int nfs42_do_offload_cancel_async(struct file *dst,
  474. nfs4_stateid *stateid)
  475. {
  476. struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
  477. struct nfs42_offloadcancel_data *data = NULL;
  478. struct nfs_open_context *ctx = nfs_file_open_context(dst);
  479. struct rpc_task *task;
  480. struct rpc_message msg = {
  481. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
  482. .rpc_cred = ctx->cred,
  483. };
  484. struct rpc_task_setup task_setup_data = {
  485. .rpc_client = dst_server->client,
  486. .rpc_message = &msg,
  487. .callback_ops = &nfs42_offload_cancel_ops,
  488. .workqueue = nfsiod_workqueue,
  489. .flags = RPC_TASK_ASYNC,
  490. };
  491. int status;
  492. if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
  493. return -EOPNOTSUPP;
  494. data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_KERNEL);
  495. if (data == NULL)
  496. return -ENOMEM;
  497. data->seq_server = dst_server;
  498. data->args.osa_src_fh = NFS_FH(file_inode(dst));
  499. memcpy(&data->args.osa_stateid, stateid,
  500. sizeof(data->args.osa_stateid));
  501. msg.rpc_argp = &data->args;
  502. msg.rpc_resp = &data->res;
  503. task_setup_data.callback_data = data;
  504. nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
  505. 1, 0);
  506. task = rpc_run_task(&task_setup_data);
  507. if (IS_ERR(task))
  508. return PTR_ERR(task);
  509. status = rpc_wait_for_completion_task(task);
  510. if (status == -ENOTSUPP)
  511. dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
  512. rpc_put_task(task);
  513. return status;
  514. }
  515. static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
  516. struct nfs42_copy_notify_args *args,
  517. struct nfs42_copy_notify_res *res)
  518. {
  519. struct nfs_server *src_server = NFS_SERVER(file_inode(src));
  520. struct rpc_message msg = {
  521. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY],
  522. .rpc_argp = args,
  523. .rpc_resp = res,
  524. };
  525. int status;
  526. struct nfs_open_context *ctx;
  527. struct nfs_lock_context *l_ctx;
  528. ctx = get_nfs_open_context(nfs_file_open_context(src));
  529. l_ctx = nfs_get_lock_context(ctx);
  530. if (IS_ERR(l_ctx)) {
  531. status = PTR_ERR(l_ctx);
  532. goto out;
  533. }
  534. status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
  535. FMODE_READ);
  536. nfs_put_lock_context(l_ctx);
  537. if (status) {
  538. if (status == -EAGAIN)
  539. status = -NFS4ERR_BAD_STATEID;
  540. goto out;
  541. }
  542. status = nfs4_call_sync(src_server->client, src_server, &msg,
  543. &args->cna_seq_args, &res->cnr_seq_res, 0);
  544. trace_nfs4_copy_notify(file_inode(src), args, res, status);
  545. if (status == -ENOTSUPP)
  546. src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
  547. out:
  548. put_nfs_open_context(nfs_file_open_context(src));
  549. return status;
  550. }
  551. int nfs42_proc_copy_notify(struct file *src, struct file *dst,
  552. struct nfs42_copy_notify_res *res)
  553. {
  554. struct nfs_server *src_server = NFS_SERVER(file_inode(src));
  555. struct nfs42_copy_notify_args *args;
  556. struct nfs4_exception exception = {
  557. .inode = file_inode(src),
  558. };
  559. int status;
  560. if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
  561. return -EOPNOTSUPP;
  562. args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_KERNEL);
  563. if (args == NULL)
  564. return -ENOMEM;
  565. args->cna_src_fh = NFS_FH(file_inode(src)),
  566. args->cna_dst.nl4_type = NL4_NETADDR;
  567. nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr);
  568. exception.stateid = &args->cna_src_stateid;
  569. do {
  570. status = _nfs42_proc_copy_notify(src, dst, args, res);
  571. if (status == -ENOTSUPP) {
  572. status = -EOPNOTSUPP;
  573. goto out;
  574. }
  575. status = nfs4_handle_exception(src_server, status, &exception);
  576. } while (exception.retry);
  577. out:
  578. kfree(args);
  579. return status;
  580. }
  581. static loff_t _nfs42_proc_llseek(struct file *filep,
  582. struct nfs_lock_context *lock, loff_t offset, int whence)
  583. {
  584. struct inode *inode = file_inode(filep);
  585. struct nfs42_seek_args args = {
  586. .sa_fh = NFS_FH(inode),
  587. .sa_offset = offset,
  588. .sa_what = (whence == SEEK_HOLE) ?
  589. NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
  590. };
  591. struct nfs42_seek_res res;
  592. struct rpc_message msg = {
  593. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
  594. .rpc_argp = &args,
  595. .rpc_resp = &res,
  596. };
  597. struct nfs_server *server = NFS_SERVER(inode);
  598. int status;
  599. if (!nfs_server_capable(inode, NFS_CAP_SEEK))
  600. return -ENOTSUPP;
  601. status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
  602. lock, FMODE_READ);
  603. if (status) {
  604. if (status == -EAGAIN)
  605. status = -NFS4ERR_BAD_STATEID;
  606. return status;
  607. }
  608. status = nfs_filemap_write_and_wait_range(inode->i_mapping,
  609. offset, LLONG_MAX);
  610. if (status)
  611. return status;
  612. status = nfs4_call_sync(server->client, server, &msg,
  613. &args.seq_args, &res.seq_res, 0);
  614. trace_nfs4_llseek(inode, &args, &res, status);
  615. if (status == -ENOTSUPP)
  616. server->caps &= ~NFS_CAP_SEEK;
  617. if (status)
  618. return status;
  619. if (whence == SEEK_DATA && res.sr_eof)
  620. return -NFS4ERR_NXIO;
  621. else
  622. return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
  623. }
  624. loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
  625. {
  626. struct nfs_server *server = NFS_SERVER(file_inode(filep));
  627. struct nfs4_exception exception = { };
  628. struct nfs_lock_context *lock;
  629. loff_t err;
  630. lock = nfs_get_lock_context(nfs_file_open_context(filep));
  631. if (IS_ERR(lock))
  632. return PTR_ERR(lock);
  633. exception.inode = file_inode(filep);
  634. exception.state = lock->open_context->state;
  635. do {
  636. err = _nfs42_proc_llseek(filep, lock, offset, whence);
  637. if (err >= 0)
  638. break;
  639. if (err == -ENOTSUPP) {
  640. err = -EOPNOTSUPP;
  641. break;
  642. }
  643. err = nfs4_handle_exception(server, err, &exception);
  644. } while (exception.retry);
  645. nfs_put_lock_context(lock);
  646. return err;
  647. }
  648. static void
  649. nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
  650. {
  651. struct nfs42_layoutstat_data *data = calldata;
  652. struct inode *inode = data->inode;
  653. struct nfs_server *server = NFS_SERVER(inode);
  654. struct pnfs_layout_hdr *lo;
  655. spin_lock(&inode->i_lock);
  656. lo = NFS_I(inode)->layout;
  657. if (!pnfs_layout_is_valid(lo)) {
  658. spin_unlock(&inode->i_lock);
  659. rpc_exit(task, 0);
  660. return;
  661. }
  662. nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
  663. spin_unlock(&inode->i_lock);
  664. nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
  665. &data->res.seq_res, task);
  666. }
  667. static void
  668. nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
  669. {
  670. struct nfs42_layoutstat_data *data = calldata;
  671. struct inode *inode = data->inode;
  672. struct pnfs_layout_hdr *lo;
  673. if (!nfs4_sequence_done(task, &data->res.seq_res))
  674. return;
  675. switch (task->tk_status) {
  676. case 0:
  677. return;
  678. case -NFS4ERR_BADHANDLE:
  679. case -ESTALE:
  680. pnfs_destroy_layout(NFS_I(inode));
  681. break;
  682. case -NFS4ERR_EXPIRED:
  683. case -NFS4ERR_ADMIN_REVOKED:
  684. case -NFS4ERR_DELEG_REVOKED:
  685. case -NFS4ERR_STALE_STATEID:
  686. case -NFS4ERR_BAD_STATEID:
  687. spin_lock(&inode->i_lock);
  688. lo = NFS_I(inode)->layout;
  689. if (pnfs_layout_is_valid(lo) &&
  690. nfs4_stateid_match(&data->args.stateid,
  691. &lo->plh_stateid)) {
  692. LIST_HEAD(head);
  693. /*
  694. * Mark the bad layout state as invalid, then retry
  695. * with the current stateid.
  696. */
  697. pnfs_mark_layout_stateid_invalid(lo, &head);
  698. spin_unlock(&inode->i_lock);
  699. pnfs_free_lseg_list(&head);
  700. nfs_commit_inode(inode, 0);
  701. } else
  702. spin_unlock(&inode->i_lock);
  703. break;
  704. case -NFS4ERR_OLD_STATEID:
  705. spin_lock(&inode->i_lock);
  706. lo = NFS_I(inode)->layout;
  707. if (pnfs_layout_is_valid(lo) &&
  708. nfs4_stateid_match_other(&data->args.stateid,
  709. &lo->plh_stateid)) {
  710. /* Do we need to delay before resending? */
  711. if (!nfs4_stateid_is_newer(&lo->plh_stateid,
  712. &data->args.stateid))
  713. rpc_delay(task, HZ);
  714. rpc_restart_call_prepare(task);
  715. }
  716. spin_unlock(&inode->i_lock);
  717. break;
  718. case -ENOTSUPP:
  719. case -EOPNOTSUPP:
  720. NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
  721. }
  722. trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
  723. }
  724. static void
  725. nfs42_layoutstat_release(void *calldata)
  726. {
  727. struct nfs42_layoutstat_data *data = calldata;
  728. struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
  729. int i;
  730. for (i = 0; i < data->args.num_dev; i++) {
  731. if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
  732. devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
  733. }
  734. pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
  735. smp_mb__before_atomic();
  736. clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
  737. smp_mb__after_atomic();
  738. nfs_iput_and_deactive(data->inode);
  739. kfree(data->args.devinfo);
  740. kfree(data);
  741. }
  742. static const struct rpc_call_ops nfs42_layoutstat_ops = {
  743. .rpc_call_prepare = nfs42_layoutstat_prepare,
  744. .rpc_call_done = nfs42_layoutstat_done,
  745. .rpc_release = nfs42_layoutstat_release,
  746. };
  747. int nfs42_proc_layoutstats_generic(struct nfs_server *server,
  748. struct nfs42_layoutstat_data *data)
  749. {
  750. struct rpc_message msg = {
  751. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
  752. .rpc_argp = &data->args,
  753. .rpc_resp = &data->res,
  754. };
  755. struct rpc_task_setup task_setup = {
  756. .rpc_client = server->client,
  757. .rpc_message = &msg,
  758. .callback_ops = &nfs42_layoutstat_ops,
  759. .callback_data = data,
  760. .flags = RPC_TASK_ASYNC,
  761. };
  762. struct rpc_task *task;
  763. data->inode = nfs_igrab_and_active(data->args.inode);
  764. if (!data->inode) {
  765. nfs42_layoutstat_release(data);
  766. return -EAGAIN;
  767. }
  768. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
  769. task = rpc_run_task(&task_setup);
  770. if (IS_ERR(task))
  771. return PTR_ERR(task);
  772. rpc_put_task(task);
  773. return 0;
  774. }
  775. static struct nfs42_layouterror_data *
  776. nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
  777. {
  778. struct nfs42_layouterror_data *data;
  779. struct inode *inode = lseg->pls_layout->plh_inode;
  780. data = kzalloc(sizeof(*data), gfp_flags);
  781. if (data) {
  782. data->args.inode = data->inode = nfs_igrab_and_active(inode);
  783. if (data->inode) {
  784. data->lseg = pnfs_get_lseg(lseg);
  785. if (data->lseg)
  786. return data;
  787. nfs_iput_and_deactive(data->inode);
  788. }
  789. kfree(data);
  790. }
  791. return NULL;
  792. }
  793. static void
  794. nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
  795. {
  796. pnfs_put_lseg(data->lseg);
  797. nfs_iput_and_deactive(data->inode);
  798. kfree(data);
  799. }
  800. static void
  801. nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
  802. {
  803. struct nfs42_layouterror_data *data = calldata;
  804. struct inode *inode = data->inode;
  805. struct nfs_server *server = NFS_SERVER(inode);
  806. struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
  807. unsigned i;
  808. spin_lock(&inode->i_lock);
  809. if (!pnfs_layout_is_valid(lo)) {
  810. spin_unlock(&inode->i_lock);
  811. rpc_exit(task, 0);
  812. return;
  813. }
  814. for (i = 0; i < data->args.num_errors; i++)
  815. nfs4_stateid_copy(&data->args.errors[i].stateid,
  816. &lo->plh_stateid);
  817. spin_unlock(&inode->i_lock);
  818. nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
  819. &data->res.seq_res, task);
  820. }
  821. static void
  822. nfs42_layouterror_done(struct rpc_task *task, void *calldata)
  823. {
  824. struct nfs42_layouterror_data *data = calldata;
  825. struct inode *inode = data->inode;
  826. struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
  827. if (!nfs4_sequence_done(task, &data->res.seq_res))
  828. return;
  829. switch (task->tk_status) {
  830. case 0:
  831. return;
  832. case -NFS4ERR_BADHANDLE:
  833. case -ESTALE:
  834. pnfs_destroy_layout(NFS_I(inode));
  835. break;
  836. case -NFS4ERR_EXPIRED:
  837. case -NFS4ERR_ADMIN_REVOKED:
  838. case -NFS4ERR_DELEG_REVOKED:
  839. case -NFS4ERR_STALE_STATEID:
  840. case -NFS4ERR_BAD_STATEID:
  841. spin_lock(&inode->i_lock);
  842. if (pnfs_layout_is_valid(lo) &&
  843. nfs4_stateid_match(&data->args.errors[0].stateid,
  844. &lo->plh_stateid)) {
  845. LIST_HEAD(head);
  846. /*
  847. * Mark the bad layout state as invalid, then retry
  848. * with the current stateid.
  849. */
  850. pnfs_mark_layout_stateid_invalid(lo, &head);
  851. spin_unlock(&inode->i_lock);
  852. pnfs_free_lseg_list(&head);
  853. nfs_commit_inode(inode, 0);
  854. } else
  855. spin_unlock(&inode->i_lock);
  856. break;
  857. case -NFS4ERR_OLD_STATEID:
  858. spin_lock(&inode->i_lock);
  859. if (pnfs_layout_is_valid(lo) &&
  860. nfs4_stateid_match_other(&data->args.errors[0].stateid,
  861. &lo->plh_stateid)) {
  862. /* Do we need to delay before resending? */
  863. if (!nfs4_stateid_is_newer(&lo->plh_stateid,
  864. &data->args.errors[0].stateid))
  865. rpc_delay(task, HZ);
  866. rpc_restart_call_prepare(task);
  867. }
  868. spin_unlock(&inode->i_lock);
  869. break;
  870. case -ENOTSUPP:
  871. case -EOPNOTSUPP:
  872. NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
  873. }
  874. trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
  875. task->tk_status);
  876. }
  877. static void
  878. nfs42_layouterror_release(void *calldata)
  879. {
  880. struct nfs42_layouterror_data *data = calldata;
  881. nfs42_free_layouterror_data(data);
  882. }
  883. static const struct rpc_call_ops nfs42_layouterror_ops = {
  884. .rpc_call_prepare = nfs42_layouterror_prepare,
  885. .rpc_call_done = nfs42_layouterror_done,
  886. .rpc_release = nfs42_layouterror_release,
  887. };
  888. int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
  889. const struct nfs42_layout_error *errors, size_t n)
  890. {
  891. struct inode *inode = lseg->pls_layout->plh_inode;
  892. struct nfs42_layouterror_data *data;
  893. struct rpc_task *task;
  894. struct rpc_message msg = {
  895. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
  896. };
  897. struct rpc_task_setup task_setup = {
  898. .rpc_message = &msg,
  899. .callback_ops = &nfs42_layouterror_ops,
  900. .flags = RPC_TASK_ASYNC,
  901. };
  902. unsigned int i;
  903. if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
  904. return -EOPNOTSUPP;
  905. if (n > NFS42_LAYOUTERROR_MAX)
  906. return -EINVAL;
  907. data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask());
  908. if (!data)
  909. return -ENOMEM;
  910. for (i = 0; i < n; i++) {
  911. data->args.errors[i] = errors[i];
  912. data->args.num_errors++;
  913. data->res.num_errors++;
  914. }
  915. msg.rpc_argp = &data->args;
  916. msg.rpc_resp = &data->res;
  917. task_setup.callback_data = data;
  918. task_setup.rpc_client = NFS_SERVER(inode)->client;
  919. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
  920. task = rpc_run_task(&task_setup);
  921. if (IS_ERR(task))
  922. return PTR_ERR(task);
  923. rpc_put_task(task);
  924. return 0;
  925. }
  926. EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
  927. static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
  928. struct file *dst_f, struct nfs_lock_context *src_lock,
  929. struct nfs_lock_context *dst_lock, loff_t src_offset,
  930. loff_t dst_offset, loff_t count)
  931. {
  932. struct inode *src_inode = file_inode(src_f);
  933. struct inode *dst_inode = file_inode(dst_f);
  934. struct nfs_server *server = NFS_SERVER(dst_inode);
  935. __u32 dst_bitmask[NFS_BITMASK_SZ];
  936. struct nfs42_clone_args args = {
  937. .src_fh = NFS_FH(src_inode),
  938. .dst_fh = NFS_FH(dst_inode),
  939. .src_offset = src_offset,
  940. .dst_offset = dst_offset,
  941. .count = count,
  942. .dst_bitmask = dst_bitmask,
  943. };
  944. struct nfs42_clone_res res = {
  945. .server = server,
  946. };
  947. int status;
  948. msg->rpc_argp = &args;
  949. msg->rpc_resp = &res;
  950. status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
  951. src_lock, FMODE_READ);
  952. if (status) {
  953. if (status == -EAGAIN)
  954. status = -NFS4ERR_BAD_STATEID;
  955. return status;
  956. }
  957. status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
  958. dst_lock, FMODE_WRITE);
  959. if (status) {
  960. if (status == -EAGAIN)
  961. status = -NFS4ERR_BAD_STATEID;
  962. return status;
  963. }
  964. res.dst_fattr = nfs_alloc_fattr();
  965. if (!res.dst_fattr)
  966. return -ENOMEM;
  967. nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask,
  968. dst_inode, NFS_INO_INVALID_BLOCKS);
  969. status = nfs4_call_sync(server->client, server, msg,
  970. &args.seq_args, &res.seq_res, 0);
  971. trace_nfs4_clone(src_inode, dst_inode, &args, status);
  972. if (status == 0) {
  973. /* a zero-length count means clone to EOF in src */
  974. if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE)
  975. count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset;
  976. nfs42_copy_dest_done(dst_inode, dst_offset, count);
  977. status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
  978. }
  979. kfree(res.dst_fattr);
  980. return status;
  981. }
  982. int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
  983. loff_t src_offset, loff_t dst_offset, loff_t count)
  984. {
  985. struct rpc_message msg = {
  986. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
  987. };
  988. struct inode *inode = file_inode(src_f);
  989. struct nfs_server *server = NFS_SERVER(file_inode(src_f));
  990. struct nfs_lock_context *src_lock;
  991. struct nfs_lock_context *dst_lock;
  992. struct nfs4_exception src_exception = { };
  993. struct nfs4_exception dst_exception = { };
  994. int err, err2;
  995. if (!nfs_server_capable(inode, NFS_CAP_CLONE))
  996. return -EOPNOTSUPP;
  997. src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
  998. if (IS_ERR(src_lock))
  999. return PTR_ERR(src_lock);
  1000. src_exception.inode = file_inode(src_f);
  1001. src_exception.state = src_lock->open_context->state;
  1002. dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
  1003. if (IS_ERR(dst_lock)) {
  1004. err = PTR_ERR(dst_lock);
  1005. goto out_put_src_lock;
  1006. }
  1007. dst_exception.inode = file_inode(dst_f);
  1008. dst_exception.state = dst_lock->open_context->state;
  1009. do {
  1010. err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
  1011. src_offset, dst_offset, count);
  1012. if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
  1013. NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
  1014. err = -EOPNOTSUPP;
  1015. break;
  1016. }
  1017. err2 = nfs4_handle_exception(server, err, &src_exception);
  1018. err = nfs4_handle_exception(server, err, &dst_exception);
  1019. if (!err)
  1020. err = err2;
  1021. } while (src_exception.retry || dst_exception.retry);
  1022. nfs_put_lock_context(dst_lock);
  1023. out_put_src_lock:
  1024. nfs_put_lock_context(src_lock);
  1025. return err;
  1026. }
  1027. #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
  1028. static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
  1029. {
  1030. struct nfs_server *server = NFS_SERVER(inode);
  1031. struct nfs42_removexattrargs args = {
  1032. .fh = NFS_FH(inode),
  1033. .xattr_name = name,
  1034. };
  1035. struct nfs42_removexattrres res;
  1036. struct rpc_message msg = {
  1037. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
  1038. .rpc_argp = &args,
  1039. .rpc_resp = &res,
  1040. };
  1041. int ret;
  1042. unsigned long timestamp = jiffies;
  1043. ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
  1044. &res.seq_res, 1);
  1045. trace_nfs4_removexattr(inode, name, ret);
  1046. if (!ret)
  1047. nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
  1048. return ret;
  1049. }
  1050. static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
  1051. const void *buf, size_t buflen, int flags)
  1052. {
  1053. struct nfs_server *server = NFS_SERVER(inode);
  1054. struct page *pages[NFS4XATTR_MAXPAGES];
  1055. struct nfs42_setxattrargs arg = {
  1056. .fh = NFS_FH(inode),
  1057. .xattr_pages = pages,
  1058. .xattr_len = buflen,
  1059. .xattr_name = name,
  1060. .xattr_flags = flags,
  1061. };
  1062. struct nfs42_setxattrres res;
  1063. struct rpc_message msg = {
  1064. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
  1065. .rpc_argp = &arg,
  1066. .rpc_resp = &res,
  1067. };
  1068. int ret, np;
  1069. unsigned long timestamp = jiffies;
  1070. if (buflen > server->sxasize)
  1071. return -ERANGE;
  1072. if (buflen > 0) {
  1073. np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
  1074. if (np < 0)
  1075. return np;
  1076. } else
  1077. np = 0;
  1078. ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
  1079. &res.seq_res, 1);
  1080. trace_nfs4_setxattr(inode, name, ret);
  1081. for (; np > 0; np--)
  1082. put_page(pages[np - 1]);
  1083. if (!ret)
  1084. nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
  1085. return ret;
  1086. }
  1087. static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
  1088. void *buf, size_t buflen, struct page **pages,
  1089. size_t plen)
  1090. {
  1091. struct nfs_server *server = NFS_SERVER(inode);
  1092. struct nfs42_getxattrargs arg = {
  1093. .fh = NFS_FH(inode),
  1094. .xattr_name = name,
  1095. };
  1096. struct nfs42_getxattrres res;
  1097. struct rpc_message msg = {
  1098. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
  1099. .rpc_argp = &arg,
  1100. .rpc_resp = &res,
  1101. };
  1102. ssize_t ret;
  1103. arg.xattr_len = plen;
  1104. arg.xattr_pages = pages;
  1105. ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
  1106. &res.seq_res, 0);
  1107. trace_nfs4_getxattr(inode, name, ret);
  1108. if (ret < 0)
  1109. return ret;
  1110. /*
  1111. * Normally, the caching is done one layer up, but for successful
  1112. * RPCS, always cache the result here, even if the caller was
  1113. * just querying the length, or if the reply was too big for
  1114. * the caller. This avoids a second RPC in the case of the
  1115. * common query-alloc-retrieve cycle for xattrs.
  1116. *
  1117. * Note that xattr_len is always capped to XATTR_SIZE_MAX.
  1118. */
  1119. nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
  1120. if (buflen) {
  1121. if (res.xattr_len > buflen)
  1122. return -ERANGE;
  1123. _copy_from_pages(buf, pages, 0, res.xattr_len);
  1124. }
  1125. return res.xattr_len;
  1126. }
  1127. static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
  1128. size_t buflen, u64 *cookiep, bool *eofp)
  1129. {
  1130. struct nfs_server *server = NFS_SERVER(inode);
  1131. struct page **pages;
  1132. struct nfs42_listxattrsargs arg = {
  1133. .fh = NFS_FH(inode),
  1134. .cookie = *cookiep,
  1135. };
  1136. struct nfs42_listxattrsres res = {
  1137. .eof = false,
  1138. .xattr_buf = buf,
  1139. .xattr_len = buflen,
  1140. };
  1141. struct rpc_message msg = {
  1142. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
  1143. .rpc_argp = &arg,
  1144. .rpc_resp = &res,
  1145. };
  1146. u32 xdrlen;
  1147. int ret, np, i;
  1148. ret = -ENOMEM;
  1149. res.scratch = alloc_page(GFP_KERNEL);
  1150. if (!res.scratch)
  1151. goto out;
  1152. xdrlen = nfs42_listxattr_xdrsize(buflen);
  1153. if (xdrlen > server->lxasize)
  1154. xdrlen = server->lxasize;
  1155. np = xdrlen / PAGE_SIZE + 1;
  1156. pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
  1157. if (!pages)
  1158. goto out_free_scratch;
  1159. for (i = 0; i < np; i++) {
  1160. pages[i] = alloc_page(GFP_KERNEL);
  1161. if (!pages[i])
  1162. goto out_free_pages;
  1163. }
  1164. arg.xattr_pages = pages;
  1165. arg.count = xdrlen;
  1166. ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
  1167. &res.seq_res, 0);
  1168. trace_nfs4_listxattr(inode, ret);
  1169. if (ret >= 0) {
  1170. ret = res.copied;
  1171. *cookiep = res.cookie;
  1172. *eofp = res.eof;
  1173. }
  1174. out_free_pages:
  1175. while (--np >= 0) {
  1176. if (pages[np])
  1177. __free_page(pages[np]);
  1178. }
  1179. kfree(pages);
  1180. out_free_scratch:
  1181. __free_page(res.scratch);
  1182. out:
  1183. return ret;
  1184. }
  1185. ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
  1186. void *buf, size_t buflen)
  1187. {
  1188. struct nfs4_exception exception = { };
  1189. ssize_t err, np, i;
  1190. struct page **pages;
  1191. np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX);
  1192. pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL);
  1193. if (!pages)
  1194. return -ENOMEM;
  1195. for (i = 0; i < np; i++) {
  1196. pages[i] = alloc_page(GFP_KERNEL);
  1197. if (!pages[i]) {
  1198. err = -ENOMEM;
  1199. goto out;
  1200. }
  1201. }
  1202. /*
  1203. * The GETXATTR op has no length field in the call, and the
  1204. * xattr data is at the end of the reply.
  1205. *
  1206. * There is no downside in using the page-aligned length. It will
  1207. * allow receiving and caching xattrs that are too large for the
  1208. * caller but still fit in the page-rounded value.
  1209. */
  1210. do {
  1211. err = _nfs42_proc_getxattr(inode, name, buf, buflen,
  1212. pages, np * PAGE_SIZE);
  1213. if (err >= 0)
  1214. break;
  1215. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  1216. &exception);
  1217. } while (exception.retry);
  1218. out:
  1219. while (--i >= 0)
  1220. __free_page(pages[i]);
  1221. kfree(pages);
  1222. return err;
  1223. }
  1224. int nfs42_proc_setxattr(struct inode *inode, const char *name,
  1225. const void *buf, size_t buflen, int flags)
  1226. {
  1227. struct nfs4_exception exception = { };
  1228. int err;
  1229. do {
  1230. err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
  1231. if (!err)
  1232. break;
  1233. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  1234. &exception);
  1235. } while (exception.retry);
  1236. return err;
  1237. }
  1238. ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
  1239. size_t buflen, u64 *cookiep, bool *eofp)
  1240. {
  1241. struct nfs4_exception exception = { };
  1242. ssize_t err;
  1243. do {
  1244. err = _nfs42_proc_listxattrs(inode, buf, buflen,
  1245. cookiep, eofp);
  1246. if (err >= 0)
  1247. break;
  1248. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  1249. &exception);
  1250. } while (exception.retry);
  1251. return err;
  1252. }
  1253. int nfs42_proc_removexattr(struct inode *inode, const char *name)
  1254. {
  1255. struct nfs4_exception exception = { };
  1256. int err;
  1257. do {
  1258. err = _nfs42_proc_removexattr(inode, name);
  1259. if (!err)
  1260. break;
  1261. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  1262. &exception);
  1263. } while (exception.retry);
  1264. return err;
  1265. }