ondemand.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/fdtable.h>
  3. #include <linux/anon_inodes.h>
  4. #include <linux/uio.h>
  5. #include "internal.h"
  6. static int cachefiles_ondemand_fd_release(struct inode *inode,
  7. struct file *file)
  8. {
  9. struct cachefiles_object *object = file->private_data;
  10. struct cachefiles_cache *cache = object->volume->cache;
  11. int object_id = object->ondemand_id;
  12. struct cachefiles_req *req;
  13. XA_STATE(xas, &cache->reqs, 0);
  14. xa_lock(&cache->reqs);
  15. object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
  16. /*
  17. * Flush all pending READ requests since their completion depends on
  18. * anon_fd.
  19. */
  20. xas_for_each(&xas, req, ULONG_MAX) {
  21. if (req->msg.object_id == object_id &&
  22. req->msg.opcode == CACHEFILES_OP_READ) {
  23. req->error = -EIO;
  24. complete(&req->done);
  25. xas_store(&xas, NULL);
  26. }
  27. }
  28. xa_unlock(&cache->reqs);
  29. xa_erase(&cache->ondemand_ids, object_id);
  30. trace_cachefiles_ondemand_fd_release(object, object_id);
  31. cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
  32. cachefiles_put_unbind_pincount(cache);
  33. return 0;
  34. }
  35. static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb,
  36. struct iov_iter *iter)
  37. {
  38. struct cachefiles_object *object = kiocb->ki_filp->private_data;
  39. struct cachefiles_cache *cache = object->volume->cache;
  40. struct file *file = object->file;
  41. size_t len = iter->count;
  42. loff_t pos = kiocb->ki_pos;
  43. const struct cred *saved_cred;
  44. int ret;
  45. if (!file)
  46. return -ENOBUFS;
  47. cachefiles_begin_secure(cache, &saved_cred);
  48. ret = __cachefiles_prepare_write(object, file, &pos, &len, true);
  49. cachefiles_end_secure(cache, saved_cred);
  50. if (ret < 0)
  51. return ret;
  52. trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len);
  53. ret = __cachefiles_write(object, file, pos, iter, NULL, NULL);
  54. if (!ret)
  55. ret = len;
  56. return ret;
  57. }
  58. static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos,
  59. int whence)
  60. {
  61. struct cachefiles_object *object = filp->private_data;
  62. struct file *file = object->file;
  63. if (!file)
  64. return -ENOBUFS;
  65. return vfs_llseek(file, pos, whence);
  66. }
  67. static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl,
  68. unsigned long arg)
  69. {
  70. struct cachefiles_object *object = filp->private_data;
  71. struct cachefiles_cache *cache = object->volume->cache;
  72. struct cachefiles_req *req;
  73. unsigned long id;
  74. if (ioctl != CACHEFILES_IOC_READ_COMPLETE)
  75. return -EINVAL;
  76. if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
  77. return -EOPNOTSUPP;
  78. id = arg;
  79. req = xa_erase(&cache->reqs, id);
  80. if (!req)
  81. return -EINVAL;
  82. trace_cachefiles_ondemand_cread(object, id);
  83. complete(&req->done);
  84. return 0;
  85. }
  86. static const struct file_operations cachefiles_ondemand_fd_fops = {
  87. .owner = THIS_MODULE,
  88. .release = cachefiles_ondemand_fd_release,
  89. .write_iter = cachefiles_ondemand_fd_write_iter,
  90. .llseek = cachefiles_ondemand_fd_llseek,
  91. .unlocked_ioctl = cachefiles_ondemand_fd_ioctl,
  92. };
  93. /*
  94. * OPEN request Completion (copen)
  95. * - command: "copen <id>,<cache_size>"
  96. * <cache_size> indicates the object size if >=0, error code if negative
  97. */
  98. int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
  99. {
  100. struct cachefiles_req *req;
  101. struct fscache_cookie *cookie;
  102. char *pid, *psize;
  103. unsigned long id;
  104. long size;
  105. int ret;
  106. if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
  107. return -EOPNOTSUPP;
  108. if (!*args) {
  109. pr_err("Empty id specified\n");
  110. return -EINVAL;
  111. }
  112. pid = args;
  113. psize = strchr(args, ',');
  114. if (!psize) {
  115. pr_err("Cache size is not specified\n");
  116. return -EINVAL;
  117. }
  118. *psize = 0;
  119. psize++;
  120. ret = kstrtoul(pid, 0, &id);
  121. if (ret)
  122. return ret;
  123. req = xa_erase(&cache->reqs, id);
  124. if (!req)
  125. return -EINVAL;
  126. /* fail OPEN request if copen format is invalid */
  127. ret = kstrtol(psize, 0, &size);
  128. if (ret) {
  129. req->error = ret;
  130. goto out;
  131. }
  132. /* fail OPEN request if daemon reports an error */
  133. if (size < 0) {
  134. if (!IS_ERR_VALUE(size)) {
  135. req->error = -EINVAL;
  136. ret = -EINVAL;
  137. } else {
  138. req->error = size;
  139. ret = 0;
  140. }
  141. goto out;
  142. }
  143. cookie = req->object->cookie;
  144. cookie->object_size = size;
  145. if (size)
  146. clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
  147. else
  148. set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
  149. trace_cachefiles_ondemand_copen(req->object, id, size);
  150. out:
  151. complete(&req->done);
  152. return ret;
  153. }
  154. static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
  155. {
  156. struct cachefiles_object *object;
  157. struct cachefiles_cache *cache;
  158. struct cachefiles_open *load;
  159. struct file *file;
  160. u32 object_id;
  161. int ret, fd;
  162. object = cachefiles_grab_object(req->object,
  163. cachefiles_obj_get_ondemand_fd);
  164. cache = object->volume->cache;
  165. ret = xa_alloc_cyclic(&cache->ondemand_ids, &object_id, NULL,
  166. XA_LIMIT(1, INT_MAX),
  167. &cache->ondemand_id_next, GFP_KERNEL);
  168. if (ret < 0)
  169. goto err;
  170. fd = get_unused_fd_flags(O_WRONLY);
  171. if (fd < 0) {
  172. ret = fd;
  173. goto err_free_id;
  174. }
  175. file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
  176. object, O_WRONLY);
  177. if (IS_ERR(file)) {
  178. ret = PTR_ERR(file);
  179. goto err_put_fd;
  180. }
  181. file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
  182. fd_install(fd, file);
  183. load = (void *)req->msg.data;
  184. load->fd = fd;
  185. req->msg.object_id = object_id;
  186. object->ondemand_id = object_id;
  187. cachefiles_get_unbind_pincount(cache);
  188. trace_cachefiles_ondemand_open(object, &req->msg, load);
  189. return 0;
  190. err_put_fd:
  191. put_unused_fd(fd);
  192. err_free_id:
  193. xa_erase(&cache->ondemand_ids, object_id);
  194. err:
  195. cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
  196. return ret;
  197. }
  198. ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
  199. char __user *_buffer, size_t buflen)
  200. {
  201. struct cachefiles_req *req;
  202. struct cachefiles_msg *msg;
  203. unsigned long id = 0;
  204. size_t n;
  205. int ret = 0;
  206. XA_STATE(xas, &cache->reqs, cache->req_id_next);
  207. /*
  208. * Cyclically search for a request that has not ever been processed,
  209. * to prevent requests from being processed repeatedly, and make
  210. * request distribution fair.
  211. */
  212. xa_lock(&cache->reqs);
  213. req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
  214. if (!req && cache->req_id_next > 0) {
  215. xas_set(&xas, 0);
  216. req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
  217. }
  218. if (!req) {
  219. xa_unlock(&cache->reqs);
  220. return 0;
  221. }
  222. msg = &req->msg;
  223. n = msg->len;
  224. if (n > buflen) {
  225. xa_unlock(&cache->reqs);
  226. return -EMSGSIZE;
  227. }
  228. xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
  229. cache->req_id_next = xas.xa_index + 1;
  230. xa_unlock(&cache->reqs);
  231. id = xas.xa_index;
  232. msg->msg_id = id;
  233. if (msg->opcode == CACHEFILES_OP_OPEN) {
  234. ret = cachefiles_ondemand_get_fd(req);
  235. if (ret)
  236. goto error;
  237. }
  238. if (copy_to_user(_buffer, msg, n) != 0) {
  239. ret = -EFAULT;
  240. goto err_put_fd;
  241. }
  242. /* CLOSE request has no reply */
  243. if (msg->opcode == CACHEFILES_OP_CLOSE) {
  244. xa_erase(&cache->reqs, id);
  245. complete(&req->done);
  246. }
  247. return n;
  248. err_put_fd:
  249. if (msg->opcode == CACHEFILES_OP_OPEN)
  250. close_fd(((struct cachefiles_open *)msg->data)->fd);
  251. error:
  252. xa_erase(&cache->reqs, id);
  253. req->error = ret;
  254. complete(&req->done);
  255. return ret;
  256. }
  257. typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
  258. static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
  259. enum cachefiles_opcode opcode,
  260. size_t data_len,
  261. init_req_fn init_req,
  262. void *private)
  263. {
  264. struct cachefiles_cache *cache = object->volume->cache;
  265. struct cachefiles_req *req;
  266. XA_STATE(xas, &cache->reqs, 0);
  267. int ret;
  268. if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags))
  269. return 0;
  270. if (test_bit(CACHEFILES_DEAD, &cache->flags))
  271. return -EIO;
  272. req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL);
  273. if (!req)
  274. return -ENOMEM;
  275. req->object = object;
  276. init_completion(&req->done);
  277. req->msg.opcode = opcode;
  278. req->msg.len = sizeof(struct cachefiles_msg) + data_len;
  279. ret = init_req(req, private);
  280. if (ret)
  281. goto out;
  282. do {
  283. /*
  284. * Stop enqueuing the request when daemon is dying. The
  285. * following two operations need to be atomic as a whole.
  286. * 1) check cache state, and
  287. * 2) enqueue request if cache is alive.
  288. * Otherwise the request may be enqueued after xarray has been
  289. * flushed, leaving the orphan request never being completed.
  290. *
  291. * CPU 1 CPU 2
  292. * ===== =====
  293. * test CACHEFILES_DEAD bit
  294. * set CACHEFILES_DEAD bit
  295. * flush requests in the xarray
  296. * enqueue the request
  297. */
  298. xas_lock(&xas);
  299. if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
  300. xas_unlock(&xas);
  301. ret = -EIO;
  302. goto out;
  303. }
  304. /* coupled with the barrier in cachefiles_flush_reqs() */
  305. smp_mb();
  306. if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
  307. WARN_ON_ONCE(object->ondemand_id == 0);
  308. xas_unlock(&xas);
  309. ret = -EIO;
  310. goto out;
  311. }
  312. xas.xa_index = 0;
  313. xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
  314. if (xas.xa_node == XAS_RESTART)
  315. xas_set_err(&xas, -EBUSY);
  316. xas_store(&xas, req);
  317. xas_clear_mark(&xas, XA_FREE_MARK);
  318. xas_set_mark(&xas, CACHEFILES_REQ_NEW);
  319. xas_unlock(&xas);
  320. } while (xas_nomem(&xas, GFP_KERNEL));
  321. ret = xas_error(&xas);
  322. if (ret)
  323. goto out;
  324. wake_up_all(&cache->daemon_pollwq);
  325. wait_for_completion(&req->done);
  326. ret = req->error;
  327. out:
  328. kfree(req);
  329. return ret;
  330. }
  331. static int cachefiles_ondemand_init_open_req(struct cachefiles_req *req,
  332. void *private)
  333. {
  334. struct cachefiles_object *object = req->object;
  335. struct fscache_cookie *cookie = object->cookie;
  336. struct fscache_volume *volume = object->volume->vcookie;
  337. struct cachefiles_open *load = (void *)req->msg.data;
  338. size_t volume_key_size, cookie_key_size;
  339. void *volume_key, *cookie_key;
  340. /*
  341. * Volume key is a NUL-terminated string. key[0] stores strlen() of the
  342. * string, followed by the content of the string (excluding '\0').
  343. */
  344. volume_key_size = volume->key[0] + 1;
  345. volume_key = volume->key + 1;
  346. /* Cookie key is binary data, which is netfs specific. */
  347. cookie_key_size = cookie->key_len;
  348. cookie_key = fscache_get_key(cookie);
  349. if (!(object->cookie->advice & FSCACHE_ADV_WANT_CACHE_SIZE)) {
  350. pr_err("WANT_CACHE_SIZE is needed for on-demand mode\n");
  351. return -EINVAL;
  352. }
  353. load->volume_key_size = volume_key_size;
  354. load->cookie_key_size = cookie_key_size;
  355. memcpy(load->data, volume_key, volume_key_size);
  356. memcpy(load->data + volume_key_size, cookie_key, cookie_key_size);
  357. return 0;
  358. }
  359. static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req,
  360. void *private)
  361. {
  362. struct cachefiles_object *object = req->object;
  363. int object_id = object->ondemand_id;
  364. /*
  365. * It's possible that object id is still 0 if the cookie looking up
  366. * phase failed before OPEN request has ever been sent. Also avoid
  367. * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
  368. * anon_fd has already been closed.
  369. */
  370. if (object_id <= 0)
  371. return -ENOENT;
  372. req->msg.object_id = object_id;
  373. trace_cachefiles_ondemand_close(object, &req->msg);
  374. return 0;
  375. }
  376. struct cachefiles_read_ctx {
  377. loff_t off;
  378. size_t len;
  379. };
  380. static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req,
  381. void *private)
  382. {
  383. struct cachefiles_object *object = req->object;
  384. struct cachefiles_read *load = (void *)req->msg.data;
  385. struct cachefiles_read_ctx *read_ctx = private;
  386. int object_id = object->ondemand_id;
  387. /* Stop enqueuing requests when daemon has closed anon_fd. */
  388. if (object_id <= 0) {
  389. WARN_ON_ONCE(object_id == 0);
  390. pr_info_once("READ: anonymous fd closed prematurely.\n");
  391. return -EIO;
  392. }
  393. req->msg.object_id = object_id;
  394. load->off = read_ctx->off;
  395. load->len = read_ctx->len;
  396. trace_cachefiles_ondemand_read(object, &req->msg, load);
  397. return 0;
  398. }
  399. int cachefiles_ondemand_init_object(struct cachefiles_object *object)
  400. {
  401. struct fscache_cookie *cookie = object->cookie;
  402. struct fscache_volume *volume = object->volume->vcookie;
  403. size_t volume_key_size, cookie_key_size, data_len;
  404. /*
  405. * CacheFiles will firstly check the cache file under the root cache
  406. * directory. If the coherency check failed, it will fallback to
  407. * creating a new tmpfile as the cache file. Reuse the previously
  408. * allocated object ID if any.
  409. */
  410. if (object->ondemand_id > 0)
  411. return 0;
  412. volume_key_size = volume->key[0] + 1;
  413. cookie_key_size = cookie->key_len;
  414. data_len = sizeof(struct cachefiles_open) +
  415. volume_key_size + cookie_key_size;
  416. return cachefiles_ondemand_send_req(object, CACHEFILES_OP_OPEN,
  417. data_len, cachefiles_ondemand_init_open_req, NULL);
  418. }
  419. void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
  420. {
  421. cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
  422. cachefiles_ondemand_init_close_req, NULL);
  423. }
  424. int cachefiles_ondemand_read(struct cachefiles_object *object,
  425. loff_t pos, size_t len)
  426. {
  427. struct cachefiles_read_ctx read_ctx = {pos, len};
  428. return cachefiles_ondemand_send_req(object, CACHEFILES_OP_READ,
  429. sizeof(struct cachefiles_read),
  430. cachefiles_ondemand_init_read_req, &read_ctx);
  431. }