io.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Cache data I/O routines
  3. *
  4. * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #define FSCACHE_DEBUG_LEVEL OPERATION
  8. #include <linux/fscache-cache.h>
  9. #include <linux/uio.h>
  10. #include <linux/bvec.h>
  11. #include <linux/slab.h>
  12. #include <linux/uio.h>
  13. #include "internal.h"
  14. /**
  15. * fscache_wait_for_operation - Wait for an object become accessible
  16. * @cres: The cache resources for the operation being performed
  17. * @want_state: The minimum state the object must be at
  18. *
  19. * See if the target cache object is at the specified minimum state of
  20. * accessibility yet, and if not, wait for it.
  21. */
  22. bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
  23. enum fscache_want_state want_state)
  24. {
  25. struct fscache_cookie *cookie = fscache_cres_cookie(cres);
  26. enum fscache_cookie_state state;
  27. again:
  28. if (!fscache_cache_is_live(cookie->volume->cache)) {
  29. _leave(" [broken]");
  30. return false;
  31. }
  32. state = fscache_cookie_state(cookie);
  33. _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
  34. switch (state) {
  35. case FSCACHE_COOKIE_STATE_CREATING:
  36. case FSCACHE_COOKIE_STATE_INVALIDATING:
  37. if (want_state == FSCACHE_WANT_PARAMS)
  38. goto ready; /* There can be no content */
  39. fallthrough;
  40. case FSCACHE_COOKIE_STATE_LOOKING_UP:
  41. case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
  42. wait_var_event(&cookie->state,
  43. fscache_cookie_state(cookie) != state);
  44. goto again;
  45. case FSCACHE_COOKIE_STATE_ACTIVE:
  46. goto ready;
  47. case FSCACHE_COOKIE_STATE_DROPPED:
  48. case FSCACHE_COOKIE_STATE_RELINQUISHING:
  49. default:
  50. _leave(" [not live]");
  51. return false;
  52. }
  53. ready:
  54. if (!cres->cache_priv2)
  55. return cookie->volume->cache->ops->begin_operation(cres, want_state);
  56. return true;
  57. }
  58. EXPORT_SYMBOL(fscache_wait_for_operation);
  59. /*
  60. * Begin an I/O operation on the cache, waiting till we reach the right state.
  61. *
  62. * Attaches the resources required to the operation resources record.
  63. */
  64. static int fscache_begin_operation(struct netfs_cache_resources *cres,
  65. struct fscache_cookie *cookie,
  66. enum fscache_want_state want_state,
  67. enum fscache_access_trace why)
  68. {
  69. enum fscache_cookie_state state;
  70. long timeo;
  71. bool once_only = false;
  72. cres->ops = NULL;
  73. cres->cache_priv = cookie;
  74. cres->cache_priv2 = NULL;
  75. cres->debug_id = cookie->debug_id;
  76. cres->inval_counter = cookie->inval_counter;
  77. if (!fscache_begin_cookie_access(cookie, why))
  78. return -ENOBUFS;
  79. again:
  80. spin_lock(&cookie->lock);
  81. state = fscache_cookie_state(cookie);
  82. _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
  83. switch (state) {
  84. case FSCACHE_COOKIE_STATE_LOOKING_UP:
  85. case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
  86. case FSCACHE_COOKIE_STATE_INVALIDATING:
  87. goto wait_for_file_wrangling;
  88. case FSCACHE_COOKIE_STATE_CREATING:
  89. if (want_state == FSCACHE_WANT_PARAMS)
  90. goto ready; /* There can be no content */
  91. goto wait_for_file_wrangling;
  92. case FSCACHE_COOKIE_STATE_ACTIVE:
  93. goto ready;
  94. case FSCACHE_COOKIE_STATE_DROPPED:
  95. case FSCACHE_COOKIE_STATE_RELINQUISHING:
  96. WARN(1, "Can't use cookie in state %u\n", cookie->state);
  97. goto not_live;
  98. default:
  99. goto not_live;
  100. }
  101. ready:
  102. spin_unlock(&cookie->lock);
  103. if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
  104. goto failed;
  105. return 0;
  106. wait_for_file_wrangling:
  107. spin_unlock(&cookie->lock);
  108. trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
  109. atomic_read(&cookie->n_accesses),
  110. fscache_access_io_wait);
  111. timeo = wait_var_event_timeout(&cookie->state,
  112. fscache_cookie_state(cookie) != state, 20 * HZ);
  113. if (timeo <= 1 && !once_only) {
  114. pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
  115. __func__, fscache_cookie_state(cookie), state);
  116. fscache_print_cookie(cookie, 'O');
  117. once_only = true;
  118. }
  119. goto again;
  120. not_live:
  121. spin_unlock(&cookie->lock);
  122. failed:
  123. cres->cache_priv = NULL;
  124. cres->ops = NULL;
  125. fscache_end_cookie_access(cookie, fscache_access_io_not_live);
  126. _leave(" = -ENOBUFS");
  127. return -ENOBUFS;
  128. }
  129. int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
  130. struct fscache_cookie *cookie)
  131. {
  132. return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
  133. fscache_access_io_read);
  134. }
  135. EXPORT_SYMBOL(__fscache_begin_read_operation);
  136. int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
  137. struct fscache_cookie *cookie)
  138. {
  139. return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
  140. fscache_access_io_write);
  141. }
  142. EXPORT_SYMBOL(__fscache_begin_write_operation);
  143. /**
  144. * fscache_dirty_folio - Mark folio dirty and pin a cache object for writeback
  145. * @mapping: The mapping the folio belongs to.
  146. * @folio: The folio being dirtied.
  147. * @cookie: The cookie referring to the cache object
  148. *
  149. * Set the dirty flag on a folio and pin an in-use cache object in memory
  150. * so that writeback can later write to it. This is intended
  151. * to be called from the filesystem's ->dirty_folio() method.
  152. *
  153. * Return: true if the dirty flag was set on the folio, false otherwise.
  154. */
  155. bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
  156. struct fscache_cookie *cookie)
  157. {
  158. struct inode *inode = mapping->host;
  159. bool need_use = false;
  160. _enter("");
  161. if (!filemap_dirty_folio(mapping, folio))
  162. return false;
  163. if (!fscache_cookie_valid(cookie))
  164. return true;
  165. if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
  166. spin_lock(&inode->i_lock);
  167. if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
  168. inode->i_state |= I_PINNING_FSCACHE_WB;
  169. need_use = true;
  170. }
  171. spin_unlock(&inode->i_lock);
  172. if (need_use)
  173. fscache_use_cookie(cookie, true);
  174. }
  175. return true;
  176. }
  177. EXPORT_SYMBOL(fscache_dirty_folio);
  178. struct fscache_write_request {
  179. struct netfs_cache_resources cache_resources;
  180. struct address_space *mapping;
  181. loff_t start;
  182. size_t len;
  183. bool set_bits;
  184. netfs_io_terminated_t term_func;
  185. void *term_func_priv;
  186. };
  187. void __fscache_clear_page_bits(struct address_space *mapping,
  188. loff_t start, size_t len)
  189. {
  190. pgoff_t first = start / PAGE_SIZE;
  191. pgoff_t last = (start + len - 1) / PAGE_SIZE;
  192. struct page *page;
  193. if (len) {
  194. XA_STATE(xas, &mapping->i_pages, first);
  195. rcu_read_lock();
  196. xas_for_each(&xas, page, last) {
  197. end_page_fscache(page);
  198. }
  199. rcu_read_unlock();
  200. }
  201. }
  202. EXPORT_SYMBOL(__fscache_clear_page_bits);
  203. /*
  204. * Deal with the completion of writing the data to the cache.
  205. */
  206. static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
  207. bool was_async)
  208. {
  209. struct fscache_write_request *wreq = priv;
  210. fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
  211. wreq->set_bits);
  212. if (wreq->term_func)
  213. wreq->term_func(wreq->term_func_priv, transferred_or_error,
  214. was_async);
  215. fscache_end_operation(&wreq->cache_resources);
  216. kfree(wreq);
  217. }
  218. void __fscache_write_to_cache(struct fscache_cookie *cookie,
  219. struct address_space *mapping,
  220. loff_t start, size_t len, loff_t i_size,
  221. netfs_io_terminated_t term_func,
  222. void *term_func_priv,
  223. bool cond)
  224. {
  225. struct fscache_write_request *wreq;
  226. struct netfs_cache_resources *cres;
  227. struct iov_iter iter;
  228. int ret = -ENOBUFS;
  229. if (len == 0)
  230. goto abandon;
  231. _enter("%llx,%zx", start, len);
  232. wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
  233. if (!wreq)
  234. goto abandon;
  235. wreq->mapping = mapping;
  236. wreq->start = start;
  237. wreq->len = len;
  238. wreq->set_bits = cond;
  239. wreq->term_func = term_func;
  240. wreq->term_func_priv = term_func_priv;
  241. cres = &wreq->cache_resources;
  242. if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
  243. fscache_access_io_write) < 0)
  244. goto abandon_free;
  245. ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
  246. if (ret < 0)
  247. goto abandon_end;
  248. /* TODO: Consider clearing page bits now for space the write isn't
  249. * covering. This is more complicated than it appears when THPs are
  250. * taken into account.
  251. */
  252. iov_iter_xarray(&iter, ITER_SOURCE, &mapping->i_pages, start, len);
  253. fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
  254. return;
  255. abandon_end:
  256. return fscache_wreq_done(wreq, ret, false);
  257. abandon_free:
  258. kfree(wreq);
  259. abandon:
  260. fscache_clear_page_bits(mapping, start, len, cond);
  261. if (term_func)
  262. term_func(term_func_priv, ret, false);
  263. }
  264. EXPORT_SYMBOL(__fscache_write_to_cache);
  265. /*
  266. * Change the size of a backing object.
  267. */
  268. void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
  269. {
  270. struct netfs_cache_resources cres;
  271. trace_fscache_resize(cookie, new_size);
  272. if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
  273. fscache_access_io_resize) == 0) {
  274. fscache_stat(&fscache_n_resizes);
  275. set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
  276. /* We cannot defer a resize as we need to do it inside the
  277. * netfs's inode lock so that we're serialised with respect to
  278. * writes.
  279. */
  280. cookie->volume->cache->ops->resize_cookie(&cres, new_size);
  281. fscache_end_operation(&cres);
  282. } else {
  283. fscache_stat(&fscache_n_resizes_null);
  284. }
  285. }
  286. EXPORT_SYMBOL(__fscache_resize_cookie);