io.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* kiocb-using read/write
  3. *
  4. * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #include <linux/mount.h>
  8. #include <linux/slab.h>
  9. #include <linux/file.h>
  10. #include <linux/uio.h>
  11. #include <linux/falloc.h>
  12. #include <linux/sched/mm.h>
  13. #include <trace/events/fscache.h>
  14. #include "internal.h"
  15. struct cachefiles_kiocb {
  16. struct kiocb iocb;
  17. refcount_t ki_refcnt;
  18. loff_t start;
  19. union {
  20. size_t skipped;
  21. size_t len;
  22. };
  23. struct cachefiles_object *object;
  24. netfs_io_terminated_t term_func;
  25. void *term_func_priv;
  26. bool was_async;
  27. unsigned int inval_counter; /* Copy of cookie->inval_counter */
  28. u64 b_writing;
  29. };
  30. static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
  31. {
  32. if (refcount_dec_and_test(&ki->ki_refcnt)) {
  33. cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
  34. fput(ki->iocb.ki_filp);
  35. kfree(ki);
  36. }
  37. }
  38. /*
  39. * Handle completion of a read from the cache.
  40. */
  41. static void cachefiles_read_complete(struct kiocb *iocb, long ret)
  42. {
  43. struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
  44. struct inode *inode = file_inode(ki->iocb.ki_filp);
  45. _enter("%ld", ret);
  46. if (ret < 0)
  47. trace_cachefiles_io_error(ki->object, inode, ret,
  48. cachefiles_trace_read_error);
  49. if (ki->term_func) {
  50. if (ret >= 0) {
  51. if (ki->object->cookie->inval_counter == ki->inval_counter)
  52. ki->skipped += ret;
  53. else
  54. ret = -ESTALE;
  55. }
  56. ki->term_func(ki->term_func_priv, ret, ki->was_async);
  57. }
  58. cachefiles_put_kiocb(ki);
  59. }
  60. /*
  61. * Initiate a read from the cache.
  62. */
  63. static int cachefiles_read(struct netfs_cache_resources *cres,
  64. loff_t start_pos,
  65. struct iov_iter *iter,
  66. enum netfs_read_from_hole read_hole,
  67. netfs_io_terminated_t term_func,
  68. void *term_func_priv)
  69. {
  70. struct cachefiles_object *object;
  71. struct cachefiles_kiocb *ki;
  72. struct file *file;
  73. unsigned int old_nofs;
  74. ssize_t ret = -ENOBUFS;
  75. size_t len = iov_iter_count(iter), skipped = 0;
  76. if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
  77. goto presubmission_error;
  78. fscache_count_read();
  79. object = cachefiles_cres_object(cres);
  80. file = cachefiles_cres_file(cres);
  81. _enter("%pD,%li,%llx,%zx/%llx",
  82. file, file_inode(file)->i_ino, start_pos, len,
  83. i_size_read(file_inode(file)));
  84. /* If the caller asked us to seek for data before doing the read, then
  85. * we should do that now. If we find a gap, we fill it with zeros.
  86. */
  87. if (read_hole != NETFS_READ_HOLE_IGNORE) {
  88. loff_t off = start_pos, off2;
  89. off2 = cachefiles_inject_read_error();
  90. if (off2 == 0)
  91. off2 = vfs_llseek(file, off, SEEK_DATA);
  92. if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
  93. skipped = 0;
  94. ret = off2;
  95. goto presubmission_error;
  96. }
  97. if (off2 == -ENXIO || off2 >= start_pos + len) {
  98. /* The region is beyond the EOF or there's no more data
  99. * in the region, so clear the rest of the buffer and
  100. * return success.
  101. */
  102. ret = -ENODATA;
  103. if (read_hole == NETFS_READ_HOLE_FAIL)
  104. goto presubmission_error;
  105. iov_iter_zero(len, iter);
  106. skipped = len;
  107. ret = 0;
  108. goto presubmission_error;
  109. }
  110. skipped = off2 - off;
  111. iov_iter_zero(skipped, iter);
  112. }
  113. ret = -ENOMEM;
  114. ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
  115. if (!ki)
  116. goto presubmission_error;
  117. refcount_set(&ki->ki_refcnt, 2);
  118. ki->iocb.ki_filp = file;
  119. ki->iocb.ki_pos = start_pos + skipped;
  120. ki->iocb.ki_flags = IOCB_DIRECT;
  121. ki->iocb.ki_ioprio = get_current_ioprio();
  122. ki->skipped = skipped;
  123. ki->object = object;
  124. ki->inval_counter = cres->inval_counter;
  125. ki->term_func = term_func;
  126. ki->term_func_priv = term_func_priv;
  127. ki->was_async = true;
  128. if (ki->term_func)
  129. ki->iocb.ki_complete = cachefiles_read_complete;
  130. get_file(ki->iocb.ki_filp);
  131. cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
  132. trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
  133. old_nofs = memalloc_nofs_save();
  134. ret = cachefiles_inject_read_error();
  135. if (ret == 0)
  136. ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
  137. memalloc_nofs_restore(old_nofs);
  138. switch (ret) {
  139. case -EIOCBQUEUED:
  140. goto in_progress;
  141. case -ERESTARTSYS:
  142. case -ERESTARTNOINTR:
  143. case -ERESTARTNOHAND:
  144. case -ERESTART_RESTARTBLOCK:
  145. /* There's no easy way to restart the syscall since other AIO's
  146. * may be already running. Just fail this IO with EINTR.
  147. */
  148. ret = -EINTR;
  149. fallthrough;
  150. default:
  151. ki->was_async = false;
  152. cachefiles_read_complete(&ki->iocb, ret);
  153. if (ret > 0)
  154. ret = 0;
  155. break;
  156. }
  157. in_progress:
  158. cachefiles_put_kiocb(ki);
  159. _leave(" = %zd", ret);
  160. return ret;
  161. presubmission_error:
  162. if (term_func)
  163. term_func(term_func_priv, ret < 0 ? ret : skipped, false);
  164. return ret;
  165. }
  166. /*
  167. * Query the occupancy of the cache in a region, returning where the next chunk
  168. * of data starts and how long it is.
  169. */
  170. static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
  171. loff_t start, size_t len, size_t granularity,
  172. loff_t *_data_start, size_t *_data_len)
  173. {
  174. struct cachefiles_object *object;
  175. struct file *file;
  176. loff_t off, off2;
  177. *_data_start = -1;
  178. *_data_len = 0;
  179. if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
  180. return -ENOBUFS;
  181. object = cachefiles_cres_object(cres);
  182. file = cachefiles_cres_file(cres);
  183. granularity = max_t(size_t, object->volume->cache->bsize, granularity);
  184. _enter("%pD,%li,%llx,%zx/%llx",
  185. file, file_inode(file)->i_ino, start, len,
  186. i_size_read(file_inode(file)));
  187. off = cachefiles_inject_read_error();
  188. if (off == 0)
  189. off = vfs_llseek(file, start, SEEK_DATA);
  190. if (off == -ENXIO)
  191. return -ENODATA; /* Beyond EOF */
  192. if (off < 0 && off >= (loff_t)-MAX_ERRNO)
  193. return -ENOBUFS; /* Error. */
  194. if (round_up(off, granularity) >= start + len)
  195. return -ENODATA; /* No data in range */
  196. off2 = cachefiles_inject_read_error();
  197. if (off2 == 0)
  198. off2 = vfs_llseek(file, off, SEEK_HOLE);
  199. if (off2 == -ENXIO)
  200. return -ENODATA; /* Beyond EOF */
  201. if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
  202. return -ENOBUFS; /* Error. */
  203. /* Round away partial blocks */
  204. off = round_up(off, granularity);
  205. off2 = round_down(off2, granularity);
  206. if (off2 <= off)
  207. return -ENODATA;
  208. *_data_start = off;
  209. if (off2 > start + len)
  210. *_data_len = len;
  211. else
  212. *_data_len = off2 - off;
  213. return 0;
  214. }
  215. /*
  216. * Handle completion of a write to the cache.
  217. */
  218. static void cachefiles_write_complete(struct kiocb *iocb, long ret)
  219. {
  220. struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
  221. struct cachefiles_object *object = ki->object;
  222. struct inode *inode = file_inode(ki->iocb.ki_filp);
  223. _enter("%ld", ret);
  224. /* Tell lockdep we inherited freeze protection from submission thread */
  225. __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
  226. __sb_end_write(inode->i_sb, SB_FREEZE_WRITE);
  227. if (ret < 0)
  228. trace_cachefiles_io_error(object, inode, ret,
  229. cachefiles_trace_write_error);
  230. atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
  231. set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
  232. if (ki->term_func)
  233. ki->term_func(ki->term_func_priv, ret, ki->was_async);
  234. cachefiles_put_kiocb(ki);
  235. }
  236. /*
  237. * Initiate a write to the cache.
  238. */
  239. int __cachefiles_write(struct cachefiles_object *object,
  240. struct file *file,
  241. loff_t start_pos,
  242. struct iov_iter *iter,
  243. netfs_io_terminated_t term_func,
  244. void *term_func_priv)
  245. {
  246. struct cachefiles_cache *cache;
  247. struct cachefiles_kiocb *ki;
  248. struct inode *inode;
  249. unsigned int old_nofs;
  250. ssize_t ret;
  251. size_t len = iov_iter_count(iter);
  252. fscache_count_write();
  253. cache = object->volume->cache;
  254. _enter("%pD,%li,%llx,%zx/%llx",
  255. file, file_inode(file)->i_ino, start_pos, len,
  256. i_size_read(file_inode(file)));
  257. ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
  258. if (!ki) {
  259. if (term_func)
  260. term_func(term_func_priv, -ENOMEM, false);
  261. return -ENOMEM;
  262. }
  263. refcount_set(&ki->ki_refcnt, 2);
  264. ki->iocb.ki_filp = file;
  265. ki->iocb.ki_pos = start_pos;
  266. ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE;
  267. ki->iocb.ki_ioprio = get_current_ioprio();
  268. ki->object = object;
  269. ki->start = start_pos;
  270. ki->len = len;
  271. ki->term_func = term_func;
  272. ki->term_func_priv = term_func_priv;
  273. ki->was_async = true;
  274. ki->b_writing = (len + (1 << cache->bshift) - 1) >> cache->bshift;
  275. if (ki->term_func)
  276. ki->iocb.ki_complete = cachefiles_write_complete;
  277. atomic_long_add(ki->b_writing, &cache->b_writing);
  278. /* Open-code file_start_write here to grab freeze protection, which
  279. * will be released by another thread in aio_complete_rw(). Fool
  280. * lockdep by telling it the lock got released so that it doesn't
  281. * complain about the held lock when we return to userspace.
  282. */
  283. inode = file_inode(file);
  284. __sb_start_write(inode->i_sb, SB_FREEZE_WRITE);
  285. __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
  286. get_file(ki->iocb.ki_filp);
  287. cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
  288. trace_cachefiles_write(object, inode, ki->iocb.ki_pos, len);
  289. old_nofs = memalloc_nofs_save();
  290. ret = cachefiles_inject_write_error();
  291. if (ret == 0)
  292. ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
  293. memalloc_nofs_restore(old_nofs);
  294. switch (ret) {
  295. case -EIOCBQUEUED:
  296. goto in_progress;
  297. case -ERESTARTSYS:
  298. case -ERESTARTNOINTR:
  299. case -ERESTARTNOHAND:
  300. case -ERESTART_RESTARTBLOCK:
  301. /* There's no easy way to restart the syscall since other AIO's
  302. * may be already running. Just fail this IO with EINTR.
  303. */
  304. ret = -EINTR;
  305. fallthrough;
  306. default:
  307. ki->was_async = false;
  308. cachefiles_write_complete(&ki->iocb, ret);
  309. if (ret > 0)
  310. ret = 0;
  311. break;
  312. }
  313. in_progress:
  314. cachefiles_put_kiocb(ki);
  315. _leave(" = %zd", ret);
  316. return ret;
  317. }
  318. static int cachefiles_write(struct netfs_cache_resources *cres,
  319. loff_t start_pos,
  320. struct iov_iter *iter,
  321. netfs_io_terminated_t term_func,
  322. void *term_func_priv)
  323. {
  324. if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) {
  325. if (term_func)
  326. term_func(term_func_priv, -ENOBUFS, false);
  327. return -ENOBUFS;
  328. }
  329. return __cachefiles_write(cachefiles_cres_object(cres),
  330. cachefiles_cres_file(cres),
  331. start_pos, iter,
  332. term_func, term_func_priv);
  333. }
  334. /*
  335. * Prepare a read operation, shortening it to a cached/uncached
  336. * boundary as appropriate.
  337. */
  338. static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
  339. loff_t i_size)
  340. {
  341. enum cachefiles_prepare_read_trace why;
  342. struct netfs_io_request *rreq = subreq->rreq;
  343. struct netfs_cache_resources *cres = &rreq->cache_resources;
  344. struct cachefiles_object *object;
  345. struct cachefiles_cache *cache;
  346. struct fscache_cookie *cookie = fscache_cres_cookie(cres);
  347. const struct cred *saved_cred;
  348. struct file *file = cachefiles_cres_file(cres);
  349. enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
  350. loff_t off, to;
  351. ino_t ino = file ? file_inode(file)->i_ino : 0;
  352. int rc;
  353. _enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size);
  354. if (subreq->start >= i_size) {
  355. ret = NETFS_FILL_WITH_ZEROES;
  356. why = cachefiles_trace_read_after_eof;
  357. goto out_no_object;
  358. }
  359. if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
  360. __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
  361. why = cachefiles_trace_read_no_data;
  362. if (!test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags))
  363. goto out_no_object;
  364. }
  365. /* The object and the file may be being created in the background. */
  366. if (!file) {
  367. why = cachefiles_trace_read_no_file;
  368. if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
  369. goto out_no_object;
  370. file = cachefiles_cres_file(cres);
  371. if (!file)
  372. goto out_no_object;
  373. ino = file_inode(file)->i_ino;
  374. }
  375. object = cachefiles_cres_object(cres);
  376. cache = object->volume->cache;
  377. cachefiles_begin_secure(cache, &saved_cred);
  378. retry:
  379. off = cachefiles_inject_read_error();
  380. if (off == 0)
  381. off = vfs_llseek(file, subreq->start, SEEK_DATA);
  382. if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
  383. if (off == (loff_t)-ENXIO) {
  384. why = cachefiles_trace_read_seek_nxio;
  385. goto download_and_store;
  386. }
  387. trace_cachefiles_io_error(object, file_inode(file), off,
  388. cachefiles_trace_seek_error);
  389. why = cachefiles_trace_read_seek_error;
  390. goto out;
  391. }
  392. if (off >= subreq->start + subreq->len) {
  393. why = cachefiles_trace_read_found_hole;
  394. goto download_and_store;
  395. }
  396. if (off > subreq->start) {
  397. off = round_up(off, cache->bsize);
  398. subreq->len = off - subreq->start;
  399. why = cachefiles_trace_read_found_part;
  400. goto download_and_store;
  401. }
  402. to = cachefiles_inject_read_error();
  403. if (to == 0)
  404. to = vfs_llseek(file, subreq->start, SEEK_HOLE);
  405. if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
  406. trace_cachefiles_io_error(object, file_inode(file), to,
  407. cachefiles_trace_seek_error);
  408. why = cachefiles_trace_read_seek_error;
  409. goto out;
  410. }
  411. if (to < subreq->start + subreq->len) {
  412. if (subreq->start + subreq->len >= i_size)
  413. to = round_up(to, cache->bsize);
  414. else
  415. to = round_down(to, cache->bsize);
  416. subreq->len = to - subreq->start;
  417. }
  418. why = cachefiles_trace_read_have_data;
  419. ret = NETFS_READ_FROM_CACHE;
  420. goto out;
  421. download_and_store:
  422. __set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
  423. if (test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) {
  424. rc = cachefiles_ondemand_read(object, subreq->start,
  425. subreq->len);
  426. if (!rc) {
  427. __clear_bit(NETFS_SREQ_ONDEMAND, &subreq->flags);
  428. goto retry;
  429. }
  430. ret = NETFS_INVALID_READ;
  431. }
  432. out:
  433. cachefiles_end_secure(cache, saved_cred);
  434. out_no_object:
  435. trace_cachefiles_prep_read(subreq, ret, why, ino);
  436. return ret;
  437. }
  438. /*
  439. * Prepare for a write to occur.
  440. */
  441. int __cachefiles_prepare_write(struct cachefiles_object *object,
  442. struct file *file,
  443. loff_t *_start, size_t *_len,
  444. bool no_space_allocated_yet)
  445. {
  446. struct cachefiles_cache *cache = object->volume->cache;
  447. loff_t start = *_start, pos;
  448. size_t len = *_len, down;
  449. int ret;
  450. /* Round to DIO size */
  451. down = start - round_down(start, PAGE_SIZE);
  452. *_start = start - down;
  453. *_len = round_up(down + len, PAGE_SIZE);
  454. /* We need to work out whether there's sufficient disk space to perform
  455. * the write - but we can skip that check if we have space already
  456. * allocated.
  457. */
  458. if (no_space_allocated_yet)
  459. goto check_space;
  460. pos = cachefiles_inject_read_error();
  461. if (pos == 0)
  462. pos = vfs_llseek(file, *_start, SEEK_DATA);
  463. if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
  464. if (pos == -ENXIO)
  465. goto check_space; /* Unallocated tail */
  466. trace_cachefiles_io_error(object, file_inode(file), pos,
  467. cachefiles_trace_seek_error);
  468. return pos;
  469. }
  470. if ((u64)pos >= (u64)*_start + *_len)
  471. goto check_space; /* Unallocated region */
  472. /* We have a block that's at least partially filled - if we're low on
  473. * space, we need to see if it's fully allocated. If it's not, we may
  474. * want to cull it.
  475. */
  476. if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
  477. cachefiles_has_space_check) == 0)
  478. return 0; /* Enough space to simply overwrite the whole block */
  479. pos = cachefiles_inject_read_error();
  480. if (pos == 0)
  481. pos = vfs_llseek(file, *_start, SEEK_HOLE);
  482. if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
  483. trace_cachefiles_io_error(object, file_inode(file), pos,
  484. cachefiles_trace_seek_error);
  485. return pos;
  486. }
  487. if ((u64)pos >= (u64)*_start + *_len)
  488. return 0; /* Fully allocated */
  489. /* Partially allocated, but insufficient space: cull. */
  490. fscache_count_no_write_space();
  491. ret = cachefiles_inject_remove_error();
  492. if (ret == 0)
  493. ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  494. *_start, *_len);
  495. if (ret < 0) {
  496. trace_cachefiles_io_error(object, file_inode(file), ret,
  497. cachefiles_trace_fallocate_error);
  498. cachefiles_io_error_obj(object,
  499. "CacheFiles: fallocate failed (%d)\n", ret);
  500. ret = -EIO;
  501. }
  502. return ret;
  503. check_space:
  504. return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
  505. cachefiles_has_space_for_write);
  506. }
  507. static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
  508. loff_t *_start, size_t *_len, loff_t i_size,
  509. bool no_space_allocated_yet)
  510. {
  511. struct cachefiles_object *object = cachefiles_cres_object(cres);
  512. struct cachefiles_cache *cache = object->volume->cache;
  513. const struct cred *saved_cred;
  514. int ret;
  515. if (!cachefiles_cres_file(cres)) {
  516. if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
  517. return -ENOBUFS;
  518. if (!cachefiles_cres_file(cres))
  519. return -ENOBUFS;
  520. }
  521. cachefiles_begin_secure(cache, &saved_cred);
  522. ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
  523. _start, _len,
  524. no_space_allocated_yet);
  525. cachefiles_end_secure(cache, saved_cred);
  526. return ret;
  527. }
  528. /*
  529. * Clean up an operation.
  530. */
  531. static void cachefiles_end_operation(struct netfs_cache_resources *cres)
  532. {
  533. struct file *file = cachefiles_cres_file(cres);
  534. if (file)
  535. fput(file);
  536. fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
  537. }
  538. static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
  539. .end_operation = cachefiles_end_operation,
  540. .read = cachefiles_read,
  541. .write = cachefiles_write,
  542. .prepare_read = cachefiles_prepare_read,
  543. .prepare_write = cachefiles_prepare_write,
  544. .query_occupancy = cachefiles_query_occupancy,
  545. };
  546. /*
  547. * Open the cache file when beginning a cache operation.
  548. */
  549. bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
  550. enum fscache_want_state want_state)
  551. {
  552. struct cachefiles_object *object = cachefiles_cres_object(cres);
  553. if (!cachefiles_cres_file(cres)) {
  554. cres->ops = &cachefiles_netfs_cache_ops;
  555. if (object->file) {
  556. spin_lock(&object->lock);
  557. if (!cres->cache_priv2 && object->file)
  558. cres->cache_priv2 = get_file(object->file);
  559. spin_unlock(&object->lock);
  560. }
  561. }
  562. if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
  563. pr_err("failed to get cres->file\n");
  564. return false;
  565. }
  566. return true;
  567. }