direct.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/fs/nfs/direct.c
  4. *
  5. * Copyright (C) 2003 by Chuck Lever <[email protected]>
  6. *
  7. * High-performance uncached I/O for the Linux NFS client
  8. *
  9. * There are important applications whose performance or correctness
  10. * depends on uncached access to file data. Database clusters
  11. * (multiple copies of the same instance running on separate hosts)
  12. * implement their own cache coherency protocol that subsumes file
  13. * system cache protocols. Applications that process datasets
  14. * considerably larger than the client's memory do not always benefit
  15. * from a local cache. A streaming video server, for instance, has no
  16. * need to cache the contents of a file.
  17. *
  18. * When an application requests uncached I/O, all read and write requests
  19. * are made directly to the server; data stored or fetched via these
  20. * requests is not cached in the Linux page cache. The client does not
  21. * correct unaligned requests from applications. All requested bytes are
  22. * held on permanent storage before a direct write system call returns to
  23. * an application.
  24. *
  25. * Solaris implements an uncached I/O facility called directio() that
  26. * is used for backups and sequential I/O to very large files. Solaris
  27. * also supports uncaching whole NFS partitions with "-o forcedirectio,"
  28. * an undocumented mount option.
  29. *
  30. * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
  31. * help from Andrew Morton.
  32. *
  33. * 18 Dec 2001 Initial implementation for 2.4 --cel
  34. * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
  35. * 08 Jun 2003 Port to 2.5 APIs --cel
  36. * 31 Mar 2004 Handle direct I/O without VFS support --cel
  37. * 15 Sep 2004 Parallel async reads --cel
  38. * 04 May 2005 support O_DIRECT with aio --cel
  39. *
  40. */
  41. #include <linux/errno.h>
  42. #include <linux/sched.h>
  43. #include <linux/kernel.h>
  44. #include <linux/file.h>
  45. #include <linux/pagemap.h>
  46. #include <linux/kref.h>
  47. #include <linux/slab.h>
  48. #include <linux/task_io_accounting_ops.h>
  49. #include <linux/module.h>
  50. #include <linux/nfs_fs.h>
  51. #include <linux/nfs_page.h>
  52. #include <linux/sunrpc/clnt.h>
  53. #include <linux/uaccess.h>
  54. #include <linux/atomic.h>
  55. #include "internal.h"
  56. #include "iostat.h"
  57. #include "pnfs.h"
  58. #include "fscache.h"
  59. #include "nfstrace.h"
  60. #define NFSDBG_FACILITY NFSDBG_VFS
  61. static struct kmem_cache *nfs_direct_cachep;
  62. static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
  63. static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
  64. static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
  65. static void nfs_direct_write_schedule_work(struct work_struct *work);
  66. static inline void get_dreq(struct nfs_direct_req *dreq)
  67. {
  68. atomic_inc(&dreq->io_count);
  69. }
  70. static inline int put_dreq(struct nfs_direct_req *dreq)
  71. {
  72. return atomic_dec_and_test(&dreq->io_count);
  73. }
  74. static void
  75. nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
  76. const struct nfs_pgio_header *hdr,
  77. ssize_t dreq_len)
  78. {
  79. if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
  80. test_bit(NFS_IOHDR_EOF, &hdr->flags)))
  81. return;
  82. if (dreq->max_count >= dreq_len) {
  83. dreq->max_count = dreq_len;
  84. if (dreq->count > dreq_len)
  85. dreq->count = dreq_len;
  86. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
  87. dreq->error = hdr->error;
  88. else /* Clear outstanding error if this is EOF */
  89. dreq->error = 0;
  90. }
  91. }
  92. static void
  93. nfs_direct_count_bytes(struct nfs_direct_req *dreq,
  94. const struct nfs_pgio_header *hdr)
  95. {
  96. loff_t hdr_end = hdr->io_start + hdr->good_bytes;
  97. ssize_t dreq_len = 0;
  98. if (hdr_end > dreq->io_start)
  99. dreq_len = hdr_end - dreq->io_start;
  100. nfs_direct_handle_truncated(dreq, hdr, dreq_len);
  101. if (dreq_len > dreq->max_count)
  102. dreq_len = dreq->max_count;
  103. if (dreq->count < dreq_len)
  104. dreq->count = dreq_len;
  105. }
  106. /**
  107. * nfs_swap_rw - NFS address space operation for swap I/O
  108. * @iocb: target I/O control block
  109. * @iter: I/O buffer
  110. *
  111. * Perform IO to the swap-file. This is much like direct IO.
  112. */
  113. int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
  114. {
  115. ssize_t ret;
  116. VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
  117. if (iov_iter_rw(iter) == READ)
  118. ret = nfs_file_direct_read(iocb, iter, true);
  119. else
  120. ret = nfs_file_direct_write(iocb, iter, true);
  121. if (ret < 0)
  122. return ret;
  123. return 0;
  124. }
  125. static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
  126. {
  127. unsigned int i;
  128. for (i = 0; i < npages; i++)
  129. put_page(pages[i]);
  130. }
  131. void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
  132. struct nfs_direct_req *dreq)
  133. {
  134. cinfo->inode = dreq->inode;
  135. cinfo->mds = &dreq->mds_cinfo;
  136. cinfo->ds = &dreq->ds_cinfo;
  137. cinfo->dreq = dreq;
  138. cinfo->completion_ops = &nfs_direct_commit_completion_ops;
  139. }
  140. static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
  141. {
  142. struct nfs_direct_req *dreq;
  143. dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
  144. if (!dreq)
  145. return NULL;
  146. kref_init(&dreq->kref);
  147. kref_get(&dreq->kref);
  148. init_completion(&dreq->completion);
  149. INIT_LIST_HEAD(&dreq->mds_cinfo.list);
  150. pnfs_init_ds_commit_info(&dreq->ds_cinfo);
  151. INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
  152. spin_lock_init(&dreq->lock);
  153. return dreq;
  154. }
  155. static void nfs_direct_req_free(struct kref *kref)
  156. {
  157. struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
  158. pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
  159. if (dreq->l_ctx != NULL)
  160. nfs_put_lock_context(dreq->l_ctx);
  161. if (dreq->ctx != NULL)
  162. put_nfs_open_context(dreq->ctx);
  163. kmem_cache_free(nfs_direct_cachep, dreq);
  164. }
  165. static void nfs_direct_req_release(struct nfs_direct_req *dreq)
  166. {
  167. kref_put(&dreq->kref, nfs_direct_req_free);
  168. }
  169. ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
  170. {
  171. return dreq->bytes_left;
  172. }
  173. EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
  174. /*
  175. * Collects and returns the final error value/byte-count.
  176. */
  177. static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
  178. {
  179. ssize_t result = -EIOCBQUEUED;
  180. /* Async requests don't wait here */
  181. if (dreq->iocb)
  182. goto out;
  183. result = wait_for_completion_killable(&dreq->completion);
  184. if (!result) {
  185. result = dreq->count;
  186. WARN_ON_ONCE(dreq->count < 0);
  187. }
  188. if (!result)
  189. result = dreq->error;
  190. out:
  191. return (ssize_t) result;
  192. }
  193. /*
  194. * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
  195. * the iocb is still valid here if this is a synchronous request.
  196. */
  197. static void nfs_direct_complete(struct nfs_direct_req *dreq)
  198. {
  199. struct inode *inode = dreq->inode;
  200. inode_dio_end(inode);
  201. if (dreq->iocb) {
  202. long res = (long) dreq->error;
  203. if (dreq->count != 0) {
  204. res = (long) dreq->count;
  205. WARN_ON_ONCE(dreq->count < 0);
  206. }
  207. dreq->iocb->ki_complete(dreq->iocb, res);
  208. }
  209. complete(&dreq->completion);
  210. nfs_direct_req_release(dreq);
  211. }
  212. static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
  213. {
  214. unsigned long bytes = 0;
  215. struct nfs_direct_req *dreq = hdr->dreq;
  216. spin_lock(&dreq->lock);
  217. if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  218. spin_unlock(&dreq->lock);
  219. goto out_put;
  220. }
  221. nfs_direct_count_bytes(dreq, hdr);
  222. spin_unlock(&dreq->lock);
  223. while (!list_empty(&hdr->pages)) {
  224. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  225. struct page *page = req->wb_page;
  226. if (!PageCompound(page) && bytes < hdr->good_bytes &&
  227. (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
  228. set_page_dirty(page);
  229. bytes += req->wb_bytes;
  230. nfs_list_remove_request(req);
  231. nfs_release_request(req);
  232. }
  233. out_put:
  234. if (put_dreq(dreq))
  235. nfs_direct_complete(dreq);
  236. hdr->release(hdr);
  237. }
  238. static void nfs_read_sync_pgio_error(struct list_head *head, int error)
  239. {
  240. struct nfs_page *req;
  241. while (!list_empty(head)) {
  242. req = nfs_list_entry(head->next);
  243. nfs_list_remove_request(req);
  244. nfs_release_request(req);
  245. }
  246. }
  247. static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
  248. {
  249. get_dreq(hdr->dreq);
  250. }
  251. static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
  252. .error_cleanup = nfs_read_sync_pgio_error,
  253. .init_hdr = nfs_direct_pgio_init,
  254. .completion = nfs_direct_read_completion,
  255. };
  256. /*
  257. * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
  258. * operation. If nfs_readdata_alloc() or get_user_pages() fails,
  259. * bail and stop sending more reads. Read length accounting is
  260. * handled automatically by nfs_direct_read_result(). Otherwise, if
  261. * no requests have been sent, just return an error.
  262. */
  263. static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
  264. struct iov_iter *iter,
  265. loff_t pos)
  266. {
  267. struct nfs_pageio_descriptor desc;
  268. struct inode *inode = dreq->inode;
  269. ssize_t result = -EINVAL;
  270. size_t requested_bytes = 0;
  271. size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
  272. nfs_pageio_init_read(&desc, dreq->inode, false,
  273. &nfs_direct_read_completion_ops);
  274. get_dreq(dreq);
  275. desc.pg_dreq = dreq;
  276. inode_dio_begin(inode);
  277. while (iov_iter_count(iter)) {
  278. struct page **pagevec;
  279. size_t bytes;
  280. size_t pgbase;
  281. unsigned npages, i;
  282. result = iov_iter_get_pages_alloc2(iter, &pagevec,
  283. rsize, &pgbase);
  284. if (result < 0)
  285. break;
  286. bytes = result;
  287. npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
  288. for (i = 0; i < npages; i++) {
  289. struct nfs_page *req;
  290. unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
  291. /* XXX do we need to do the eof zeroing found in async_filler? */
  292. req = nfs_create_request(dreq->ctx, pagevec[i],
  293. pgbase, req_len);
  294. if (IS_ERR(req)) {
  295. result = PTR_ERR(req);
  296. break;
  297. }
  298. req->wb_index = pos >> PAGE_SHIFT;
  299. req->wb_offset = pos & ~PAGE_MASK;
  300. if (!nfs_pageio_add_request(&desc, req)) {
  301. result = desc.pg_error;
  302. nfs_release_request(req);
  303. break;
  304. }
  305. pgbase = 0;
  306. bytes -= req_len;
  307. requested_bytes += req_len;
  308. pos += req_len;
  309. dreq->bytes_left -= req_len;
  310. }
  311. nfs_direct_release_pages(pagevec, npages);
  312. kvfree(pagevec);
  313. if (result < 0)
  314. break;
  315. }
  316. nfs_pageio_complete(&desc);
  317. /*
  318. * If no bytes were started, return the error, and let the
  319. * generic layer handle the completion.
  320. */
  321. if (requested_bytes == 0) {
  322. inode_dio_end(inode);
  323. nfs_direct_req_release(dreq);
  324. return result < 0 ? result : -EIO;
  325. }
  326. if (put_dreq(dreq))
  327. nfs_direct_complete(dreq);
  328. return requested_bytes;
  329. }
  330. /**
  331. * nfs_file_direct_read - file direct read operation for NFS files
  332. * @iocb: target I/O control block
  333. * @iter: vector of user buffers into which to read data
  334. * @swap: flag indicating this is swap IO, not O_DIRECT IO
  335. *
  336. * We use this function for direct reads instead of calling
  337. * generic_file_aio_read() in order to avoid gfar's check to see if
  338. * the request starts before the end of the file. For that check
  339. * to work, we must generate a GETATTR before each direct read, and
  340. * even then there is a window between the GETATTR and the subsequent
  341. * READ where the file size could change. Our preference is simply
  342. * to do all reads the application wants, and the server will take
  343. * care of managing the end of file boundary.
  344. *
  345. * This function also eliminates unnecessarily updating the file's
  346. * atime locally, as the NFS server sets the file's atime, and this
  347. * client must read the updated atime from the server back into its
  348. * cache.
  349. */
  350. ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
  351. bool swap)
  352. {
  353. struct file *file = iocb->ki_filp;
  354. struct address_space *mapping = file->f_mapping;
  355. struct inode *inode = mapping->host;
  356. struct nfs_direct_req *dreq;
  357. struct nfs_lock_context *l_ctx;
  358. ssize_t result, requested;
  359. size_t count = iov_iter_count(iter);
  360. nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
  361. dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
  362. file, count, (long long) iocb->ki_pos);
  363. result = 0;
  364. if (!count)
  365. goto out;
  366. task_io_account_read(count);
  367. result = -ENOMEM;
  368. dreq = nfs_direct_req_alloc();
  369. if (dreq == NULL)
  370. goto out;
  371. dreq->inode = inode;
  372. dreq->bytes_left = dreq->max_count = count;
  373. dreq->io_start = iocb->ki_pos;
  374. dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
  375. l_ctx = nfs_get_lock_context(dreq->ctx);
  376. if (IS_ERR(l_ctx)) {
  377. result = PTR_ERR(l_ctx);
  378. nfs_direct_req_release(dreq);
  379. goto out_release;
  380. }
  381. dreq->l_ctx = l_ctx;
  382. if (!is_sync_kiocb(iocb))
  383. dreq->iocb = iocb;
  384. if (user_backed_iter(iter))
  385. dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
  386. if (!swap)
  387. nfs_start_io_direct(inode);
  388. NFS_I(inode)->read_io += count;
  389. requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
  390. if (!swap)
  391. nfs_end_io_direct(inode);
  392. if (requested > 0) {
  393. result = nfs_direct_wait(dreq);
  394. if (result > 0) {
  395. requested -= result;
  396. iocb->ki_pos += result;
  397. }
  398. iov_iter_revert(iter, requested);
  399. } else {
  400. result = requested;
  401. }
  402. out_release:
  403. nfs_direct_req_release(dreq);
  404. out:
  405. return result;
  406. }
  407. static void nfs_direct_add_page_head(struct list_head *list,
  408. struct nfs_page *req)
  409. {
  410. struct nfs_page *head = req->wb_head;
  411. if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
  412. return;
  413. if (!list_empty(&head->wb_list)) {
  414. nfs_unlock_request(head);
  415. return;
  416. }
  417. list_add(&head->wb_list, list);
  418. kref_get(&head->wb_kref);
  419. kref_get(&head->wb_kref);
  420. }
  421. static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
  422. {
  423. struct nfs_page *req, *subreq;
  424. list_for_each_entry(req, list, wb_list) {
  425. if (req->wb_head != req) {
  426. nfs_direct_add_page_head(&req->wb_list, req);
  427. continue;
  428. }
  429. subreq = req->wb_this_page;
  430. if (subreq == req)
  431. continue;
  432. do {
  433. /*
  434. * Remove subrequests from this list before freeing
  435. * them in the call to nfs_join_page_group().
  436. */
  437. if (!list_empty(&subreq->wb_list)) {
  438. nfs_list_remove_request(subreq);
  439. nfs_release_request(subreq);
  440. }
  441. } while ((subreq = subreq->wb_this_page) != req);
  442. nfs_join_page_group(req, inode);
  443. }
  444. }
  445. static void
  446. nfs_direct_write_scan_commit_list(struct inode *inode,
  447. struct list_head *list,
  448. struct nfs_commit_info *cinfo)
  449. {
  450. mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
  451. pnfs_recover_commit_reqs(list, cinfo);
  452. nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
  453. mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
  454. }
  455. static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
  456. {
  457. struct nfs_pageio_descriptor desc;
  458. struct nfs_page *req, *tmp;
  459. LIST_HEAD(reqs);
  460. struct nfs_commit_info cinfo;
  461. LIST_HEAD(failed);
  462. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  463. nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
  464. nfs_direct_join_group(&reqs, dreq->inode);
  465. dreq->count = 0;
  466. dreq->max_count = 0;
  467. list_for_each_entry(req, &reqs, wb_list)
  468. dreq->max_count += req->wb_bytes;
  469. nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
  470. get_dreq(dreq);
  471. nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
  472. &nfs_direct_write_completion_ops);
  473. desc.pg_dreq = dreq;
  474. list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
  475. /* Bump the transmission count */
  476. req->wb_nio++;
  477. if (!nfs_pageio_add_request(&desc, req)) {
  478. nfs_list_move_request(req, &failed);
  479. spin_lock(&cinfo.inode->i_lock);
  480. dreq->flags = 0;
  481. if (desc.pg_error < 0)
  482. dreq->error = desc.pg_error;
  483. else
  484. dreq->error = -EIO;
  485. spin_unlock(&cinfo.inode->i_lock);
  486. }
  487. nfs_release_request(req);
  488. }
  489. nfs_pageio_complete(&desc);
  490. while (!list_empty(&failed)) {
  491. req = nfs_list_entry(failed.next);
  492. nfs_list_remove_request(req);
  493. nfs_unlock_and_release_request(req);
  494. }
  495. if (put_dreq(dreq))
  496. nfs_direct_write_complete(dreq);
  497. }
  498. static void nfs_direct_commit_complete(struct nfs_commit_data *data)
  499. {
  500. const struct nfs_writeverf *verf = data->res.verf;
  501. struct nfs_direct_req *dreq = data->dreq;
  502. struct nfs_commit_info cinfo;
  503. struct nfs_page *req;
  504. int status = data->task.tk_status;
  505. trace_nfs_direct_commit_complete(dreq);
  506. if (status < 0) {
  507. /* Errors in commit are fatal */
  508. dreq->error = status;
  509. dreq->max_count = 0;
  510. dreq->count = 0;
  511. dreq->flags = NFS_ODIRECT_DONE;
  512. } else {
  513. status = dreq->error;
  514. }
  515. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  516. while (!list_empty(&data->pages)) {
  517. req = nfs_list_entry(data->pages.next);
  518. nfs_list_remove_request(req);
  519. if (status >= 0 && !nfs_write_match_verf(verf, req)) {
  520. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  521. /*
  522. * Despite the reboot, the write was successful,
  523. * so reset wb_nio.
  524. */
  525. req->wb_nio = 0;
  526. nfs_mark_request_commit(req, NULL, &cinfo, 0);
  527. } else /* Error or match */
  528. nfs_release_request(req);
  529. nfs_unlock_and_release_request(req);
  530. }
  531. if (nfs_commit_end(cinfo.mds))
  532. nfs_direct_write_complete(dreq);
  533. }
  534. static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
  535. struct nfs_page *req)
  536. {
  537. struct nfs_direct_req *dreq = cinfo->dreq;
  538. trace_nfs_direct_resched_write(dreq);
  539. spin_lock(&dreq->lock);
  540. if (dreq->flags != NFS_ODIRECT_DONE)
  541. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  542. spin_unlock(&dreq->lock);
  543. nfs_mark_request_commit(req, NULL, cinfo, 0);
  544. }
  545. static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
  546. .completion = nfs_direct_commit_complete,
  547. .resched_write = nfs_direct_resched_write,
  548. };
  549. static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
  550. {
  551. int res;
  552. struct nfs_commit_info cinfo;
  553. LIST_HEAD(mds_list);
  554. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  555. nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
  556. res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
  557. if (res < 0) /* res == -ENOMEM */
  558. nfs_direct_write_reschedule(dreq);
  559. }
  560. static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
  561. {
  562. struct nfs_commit_info cinfo;
  563. struct nfs_page *req;
  564. LIST_HEAD(reqs);
  565. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  566. nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
  567. while (!list_empty(&reqs)) {
  568. req = nfs_list_entry(reqs.next);
  569. nfs_list_remove_request(req);
  570. nfs_release_request(req);
  571. nfs_unlock_and_release_request(req);
  572. }
  573. }
  574. static void nfs_direct_write_schedule_work(struct work_struct *work)
  575. {
  576. struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
  577. int flags = dreq->flags;
  578. dreq->flags = 0;
  579. switch (flags) {
  580. case NFS_ODIRECT_DO_COMMIT:
  581. nfs_direct_commit_schedule(dreq);
  582. break;
  583. case NFS_ODIRECT_RESCHED_WRITES:
  584. nfs_direct_write_reschedule(dreq);
  585. break;
  586. default:
  587. nfs_direct_write_clear_reqs(dreq);
  588. nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
  589. nfs_direct_complete(dreq);
  590. }
  591. }
  592. static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
  593. {
  594. trace_nfs_direct_write_complete(dreq);
  595. queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
  596. }
  597. static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
  598. {
  599. struct nfs_direct_req *dreq = hdr->dreq;
  600. struct nfs_commit_info cinfo;
  601. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  602. int flags = NFS_ODIRECT_DONE;
  603. trace_nfs_direct_write_completion(dreq);
  604. nfs_init_cinfo_from_dreq(&cinfo, dreq);
  605. spin_lock(&dreq->lock);
  606. if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  607. spin_unlock(&dreq->lock);
  608. goto out_put;
  609. }
  610. nfs_direct_count_bytes(dreq, hdr);
  611. if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags)) {
  612. if (!dreq->flags)
  613. dreq->flags = NFS_ODIRECT_DO_COMMIT;
  614. flags = dreq->flags;
  615. }
  616. spin_unlock(&dreq->lock);
  617. while (!list_empty(&hdr->pages)) {
  618. req = nfs_list_entry(hdr->pages.next);
  619. nfs_list_remove_request(req);
  620. if (flags == NFS_ODIRECT_DO_COMMIT) {
  621. kref_get(&req->wb_kref);
  622. memcpy(&req->wb_verf, &hdr->verf.verifier,
  623. sizeof(req->wb_verf));
  624. nfs_mark_request_commit(req, hdr->lseg, &cinfo,
  625. hdr->ds_commit_idx);
  626. } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
  627. kref_get(&req->wb_kref);
  628. nfs_mark_request_commit(req, NULL, &cinfo, 0);
  629. }
  630. nfs_unlock_and_release_request(req);
  631. }
  632. out_put:
  633. if (put_dreq(dreq))
  634. nfs_direct_write_complete(dreq);
  635. hdr->release(hdr);
  636. }
  637. static void nfs_write_sync_pgio_error(struct list_head *head, int error)
  638. {
  639. struct nfs_page *req;
  640. while (!list_empty(head)) {
  641. req = nfs_list_entry(head->next);
  642. nfs_list_remove_request(req);
  643. nfs_unlock_and_release_request(req);
  644. }
  645. }
  646. static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
  647. {
  648. struct nfs_direct_req *dreq = hdr->dreq;
  649. trace_nfs_direct_write_reschedule_io(dreq);
  650. spin_lock(&dreq->lock);
  651. if (dreq->error == 0) {
  652. dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
  653. /* fake unstable write to let common nfs resend pages */
  654. hdr->verf.committed = NFS_UNSTABLE;
  655. hdr->good_bytes = hdr->args.offset + hdr->args.count -
  656. hdr->io_start;
  657. }
  658. spin_unlock(&dreq->lock);
  659. }
  660. static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
  661. .error_cleanup = nfs_write_sync_pgio_error,
  662. .init_hdr = nfs_direct_pgio_init,
  663. .completion = nfs_direct_write_completion,
  664. .reschedule_io = nfs_direct_write_reschedule_io,
  665. };
  666. /*
  667. * NB: Return the value of the first error return code. Subsequent
  668. * errors after the first one are ignored.
  669. */
  670. /*
  671. * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
  672. * operation. If nfs_writedata_alloc() or get_user_pages() fails,
  673. * bail and stop sending more writes. Write length accounting is
  674. * handled automatically by nfs_direct_write_result(). Otherwise, if
  675. * no requests have been sent, just return an error.
  676. */
  677. static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
  678. struct iov_iter *iter,
  679. loff_t pos, int ioflags)
  680. {
  681. struct nfs_pageio_descriptor desc;
  682. struct inode *inode = dreq->inode;
  683. ssize_t result = 0;
  684. size_t requested_bytes = 0;
  685. size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
  686. trace_nfs_direct_write_schedule_iovec(dreq);
  687. nfs_pageio_init_write(&desc, inode, ioflags, false,
  688. &nfs_direct_write_completion_ops);
  689. desc.pg_dreq = dreq;
  690. get_dreq(dreq);
  691. inode_dio_begin(inode);
  692. NFS_I(inode)->write_io += iov_iter_count(iter);
  693. while (iov_iter_count(iter)) {
  694. struct page **pagevec;
  695. size_t bytes;
  696. size_t pgbase;
  697. unsigned npages, i;
  698. result = iov_iter_get_pages_alloc2(iter, &pagevec,
  699. wsize, &pgbase);
  700. if (result < 0)
  701. break;
  702. bytes = result;
  703. npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
  704. for (i = 0; i < npages; i++) {
  705. struct nfs_page *req;
  706. unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
  707. req = nfs_create_request(dreq->ctx, pagevec[i],
  708. pgbase, req_len);
  709. if (IS_ERR(req)) {
  710. result = PTR_ERR(req);
  711. break;
  712. }
  713. if (desc.pg_error < 0) {
  714. nfs_free_request(req);
  715. result = desc.pg_error;
  716. break;
  717. }
  718. nfs_lock_request(req);
  719. req->wb_index = pos >> PAGE_SHIFT;
  720. req->wb_offset = pos & ~PAGE_MASK;
  721. if (!nfs_pageio_add_request(&desc, req)) {
  722. result = desc.pg_error;
  723. nfs_unlock_and_release_request(req);
  724. break;
  725. }
  726. pgbase = 0;
  727. bytes -= req_len;
  728. requested_bytes += req_len;
  729. pos += req_len;
  730. dreq->bytes_left -= req_len;
  731. }
  732. nfs_direct_release_pages(pagevec, npages);
  733. kvfree(pagevec);
  734. if (result < 0)
  735. break;
  736. }
  737. nfs_pageio_complete(&desc);
  738. /*
  739. * If no bytes were started, return the error, and let the
  740. * generic layer handle the completion.
  741. */
  742. if (requested_bytes == 0) {
  743. inode_dio_end(inode);
  744. nfs_direct_req_release(dreq);
  745. return result < 0 ? result : -EIO;
  746. }
  747. if (put_dreq(dreq))
  748. nfs_direct_write_complete(dreq);
  749. return requested_bytes;
  750. }
  751. /**
  752. * nfs_file_direct_write - file direct write operation for NFS files
  753. * @iocb: target I/O control block
  754. * @iter: vector of user buffers from which to write data
  755. * @swap: flag indicating this is swap IO, not O_DIRECT IO
  756. *
  757. * We use this function for direct writes instead of calling
  758. * generic_file_aio_write() in order to avoid taking the inode
  759. * semaphore and updating the i_size. The NFS server will set
  760. * the new i_size and this client must read the updated size
  761. * back into its cache. We let the server do generic write
  762. * parameter checking and report problems.
  763. *
  764. * We eliminate local atime updates, see direct read above.
  765. *
  766. * We avoid unnecessary page cache invalidations for normal cached
  767. * readers of this file.
  768. *
  769. * Note that O_APPEND is not supported for NFS direct writes, as there
  770. * is no atomic O_APPEND write facility in the NFS protocol.
  771. */
  772. ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
  773. bool swap)
  774. {
  775. ssize_t result, requested;
  776. size_t count;
  777. struct file *file = iocb->ki_filp;
  778. struct address_space *mapping = file->f_mapping;
  779. struct inode *inode = mapping->host;
  780. struct nfs_direct_req *dreq;
  781. struct nfs_lock_context *l_ctx;
  782. loff_t pos, end;
  783. dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
  784. file, iov_iter_count(iter), (long long) iocb->ki_pos);
  785. if (swap)
  786. /* bypass generic checks */
  787. result = iov_iter_count(iter);
  788. else
  789. result = generic_write_checks(iocb, iter);
  790. if (result <= 0)
  791. return result;
  792. count = result;
  793. nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
  794. pos = iocb->ki_pos;
  795. end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
  796. task_io_account_write(count);
  797. result = -ENOMEM;
  798. dreq = nfs_direct_req_alloc();
  799. if (!dreq)
  800. goto out;
  801. dreq->inode = inode;
  802. dreq->bytes_left = dreq->max_count = count;
  803. dreq->io_start = pos;
  804. dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
  805. l_ctx = nfs_get_lock_context(dreq->ctx);
  806. if (IS_ERR(l_ctx)) {
  807. result = PTR_ERR(l_ctx);
  808. nfs_direct_req_release(dreq);
  809. goto out_release;
  810. }
  811. dreq->l_ctx = l_ctx;
  812. if (!is_sync_kiocb(iocb))
  813. dreq->iocb = iocb;
  814. pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
  815. if (swap) {
  816. requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
  817. FLUSH_STABLE);
  818. } else {
  819. nfs_start_io_direct(inode);
  820. requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
  821. FLUSH_COND_STABLE);
  822. if (mapping->nrpages) {
  823. invalidate_inode_pages2_range(mapping,
  824. pos >> PAGE_SHIFT, end);
  825. }
  826. nfs_end_io_direct(inode);
  827. }
  828. if (requested > 0) {
  829. result = nfs_direct_wait(dreq);
  830. if (result > 0) {
  831. requested -= result;
  832. iocb->ki_pos = pos + result;
  833. /* XXX: should check the generic_write_sync retval */
  834. generic_write_sync(iocb, result);
  835. }
  836. iov_iter_revert(iter, requested);
  837. } else {
  838. result = requested;
  839. }
  840. nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
  841. out_release:
  842. nfs_direct_req_release(dreq);
  843. out:
  844. return result;
  845. }
  846. /**
  847. * nfs_init_directcache - create a slab cache for nfs_direct_req structures
  848. *
  849. */
  850. int __init nfs_init_directcache(void)
  851. {
  852. nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
  853. sizeof(struct nfs_direct_req),
  854. 0, (SLAB_RECLAIM_ACCOUNT|
  855. SLAB_MEM_SPREAD),
  856. NULL);
  857. if (nfs_direct_cachep == NULL)
  858. return -ENOMEM;
  859. return 0;
  860. }
  861. /**
  862. * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
  863. *
  864. */
  865. void nfs_destroy_directcache(void)
  866. {
  867. kmem_cache_destroy(nfs_direct_cachep);
  868. }