namei.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* CacheFiles path walking and related routines
  3. *
  4. * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells ([email protected])
  6. */
  7. #include <linux/fs.h>
  8. #include <linux/namei.h>
  9. #include "internal.h"
  10. /*
  11. * Mark the backing file as being a cache file if it's not already in use. The
  12. * mark tells the culling request command that it's not allowed to cull the
  13. * file or directory. The caller must hold the inode lock.
  14. */
  15. static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
  16. struct inode *inode)
  17. {
  18. bool can_use = false;
  19. if (!(inode->i_flags & S_KERNEL_FILE)) {
  20. inode->i_flags |= S_KERNEL_FILE;
  21. trace_cachefiles_mark_active(object, inode);
  22. can_use = true;
  23. } else {
  24. trace_cachefiles_mark_failed(object, inode);
  25. }
  26. return can_use;
  27. }
  28. static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
  29. struct inode *inode)
  30. {
  31. bool can_use;
  32. inode_lock(inode);
  33. can_use = __cachefiles_mark_inode_in_use(object, inode);
  34. inode_unlock(inode);
  35. return can_use;
  36. }
  37. /*
  38. * Unmark a backing inode. The caller must hold the inode lock.
  39. */
  40. static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
  41. struct inode *inode)
  42. {
  43. inode->i_flags &= ~S_KERNEL_FILE;
  44. trace_cachefiles_mark_inactive(object, inode);
  45. }
  46. static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
  47. struct inode *inode)
  48. {
  49. inode_lock(inode);
  50. __cachefiles_unmark_inode_in_use(object, inode);
  51. inode_unlock(inode);
  52. }
  53. /*
  54. * Unmark a backing inode and tell cachefilesd that there's something that can
  55. * be culled.
  56. */
  57. void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
  58. struct file *file)
  59. {
  60. struct cachefiles_cache *cache = object->volume->cache;
  61. struct inode *inode = file_inode(file);
  62. cachefiles_do_unmark_inode_in_use(object, inode);
  63. if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
  64. atomic_long_add(inode->i_blocks, &cache->b_released);
  65. if (atomic_inc_return(&cache->f_released))
  66. cachefiles_state_changed(cache);
  67. }
  68. }
  69. /*
  70. * get a subdirectory
  71. */
  72. struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
  73. struct dentry *dir,
  74. const char *dirname,
  75. bool *_is_new)
  76. {
  77. struct dentry *subdir;
  78. struct path path;
  79. int ret;
  80. _enter(",,%s", dirname);
  81. /* search the current directory for the element name */
  82. inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
  83. retry:
  84. ret = cachefiles_inject_read_error();
  85. if (ret == 0)
  86. subdir = lookup_one_len(dirname, dir, strlen(dirname));
  87. else
  88. subdir = ERR_PTR(ret);
  89. trace_cachefiles_lookup(NULL, dir, subdir);
  90. if (IS_ERR(subdir)) {
  91. trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
  92. PTR_ERR(subdir),
  93. cachefiles_trace_lookup_error);
  94. if (PTR_ERR(subdir) == -ENOMEM)
  95. goto nomem_d_alloc;
  96. goto lookup_error;
  97. }
  98. _debug("subdir -> %pd %s",
  99. subdir, d_backing_inode(subdir) ? "positive" : "negative");
  100. /* we need to create the subdir if it doesn't exist yet */
  101. if (d_is_negative(subdir)) {
  102. ret = cachefiles_has_space(cache, 1, 0,
  103. cachefiles_has_space_for_create);
  104. if (ret < 0)
  105. goto mkdir_error;
  106. _debug("attempt mkdir");
  107. path.mnt = cache->mnt;
  108. path.dentry = dir;
  109. ret = security_path_mkdir(&path, subdir, 0700);
  110. if (ret < 0)
  111. goto mkdir_error;
  112. ret = cachefiles_inject_write_error();
  113. if (ret == 0)
  114. ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
  115. if (ret < 0) {
  116. trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
  117. cachefiles_trace_mkdir_error);
  118. goto mkdir_error;
  119. }
  120. trace_cachefiles_mkdir(dir, subdir);
  121. if (unlikely(d_unhashed(subdir))) {
  122. cachefiles_put_directory(subdir);
  123. goto retry;
  124. }
  125. ASSERT(d_backing_inode(subdir));
  126. _debug("mkdir -> %pd{ino=%lu}",
  127. subdir, d_backing_inode(subdir)->i_ino);
  128. if (_is_new)
  129. *_is_new = true;
  130. }
  131. /* Tell rmdir() it's not allowed to delete the subdir */
  132. inode_lock(d_inode(subdir));
  133. inode_unlock(d_inode(dir));
  134. if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
  135. pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
  136. subdir, d_inode(subdir)->i_ino);
  137. goto mark_error;
  138. }
  139. inode_unlock(d_inode(subdir));
  140. /* we need to make sure the subdir is a directory */
  141. ASSERT(d_backing_inode(subdir));
  142. if (!d_can_lookup(subdir)) {
  143. pr_err("%s is not a directory\n", dirname);
  144. ret = -EIO;
  145. goto check_error;
  146. }
  147. ret = -EPERM;
  148. if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
  149. !d_backing_inode(subdir)->i_op->lookup ||
  150. !d_backing_inode(subdir)->i_op->mkdir ||
  151. !d_backing_inode(subdir)->i_op->rename ||
  152. !d_backing_inode(subdir)->i_op->rmdir ||
  153. !d_backing_inode(subdir)->i_op->unlink)
  154. goto check_error;
  155. _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
  156. return subdir;
  157. check_error:
  158. cachefiles_put_directory(subdir);
  159. _leave(" = %d [check]", ret);
  160. return ERR_PTR(ret);
  161. mark_error:
  162. inode_unlock(d_inode(subdir));
  163. dput(subdir);
  164. return ERR_PTR(-EBUSY);
  165. mkdir_error:
  166. inode_unlock(d_inode(dir));
  167. dput(subdir);
  168. pr_err("mkdir %s failed with error %d\n", dirname, ret);
  169. return ERR_PTR(ret);
  170. lookup_error:
  171. inode_unlock(d_inode(dir));
  172. ret = PTR_ERR(subdir);
  173. pr_err("Lookup %s failed with error %d\n", dirname, ret);
  174. return ERR_PTR(ret);
  175. nomem_d_alloc:
  176. inode_unlock(d_inode(dir));
  177. _leave(" = -ENOMEM");
  178. return ERR_PTR(-ENOMEM);
  179. }
  180. /*
  181. * Put a subdirectory.
  182. */
  183. void cachefiles_put_directory(struct dentry *dir)
  184. {
  185. if (dir) {
  186. cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
  187. dput(dir);
  188. }
  189. }
  190. /*
  191. * Remove a regular file from the cache.
  192. */
  193. static int cachefiles_unlink(struct cachefiles_cache *cache,
  194. struct cachefiles_object *object,
  195. struct dentry *dir, struct dentry *dentry,
  196. enum fscache_why_object_killed why)
  197. {
  198. struct path path = {
  199. .mnt = cache->mnt,
  200. .dentry = dir,
  201. };
  202. int ret;
  203. trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
  204. ret = security_path_unlink(&path, dentry);
  205. if (ret < 0) {
  206. cachefiles_io_error(cache, "Unlink security error");
  207. return ret;
  208. }
  209. ret = cachefiles_inject_remove_error();
  210. if (ret == 0) {
  211. ret = vfs_unlink(&init_user_ns, d_backing_inode(dir), dentry, NULL);
  212. if (ret == -EIO)
  213. cachefiles_io_error(cache, "Unlink failed");
  214. }
  215. if (ret != 0)
  216. trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
  217. cachefiles_trace_unlink_error);
  218. return ret;
  219. }
  220. /*
  221. * Delete an object representation from the cache
  222. * - File backed objects are unlinked
  223. * - Directory backed objects are stuffed into the graveyard for userspace to
  224. * delete
  225. */
  226. int cachefiles_bury_object(struct cachefiles_cache *cache,
  227. struct cachefiles_object *object,
  228. struct dentry *dir,
  229. struct dentry *rep,
  230. enum fscache_why_object_killed why)
  231. {
  232. struct dentry *grave, *trap;
  233. struct path path, path_to_graveyard;
  234. char nbuffer[8 + 8 + 1];
  235. int ret;
  236. _enter(",'%pd','%pd'", dir, rep);
  237. if (rep->d_parent != dir) {
  238. inode_unlock(d_inode(dir));
  239. _leave(" = -ESTALE");
  240. return -ESTALE;
  241. }
  242. /* non-directories can just be unlinked */
  243. if (!d_is_dir(rep)) {
  244. dget(rep); /* Stop the dentry being negated if it's only pinned
  245. * by a file struct.
  246. */
  247. ret = cachefiles_unlink(cache, object, dir, rep, why);
  248. dput(rep);
  249. inode_unlock(d_inode(dir));
  250. _leave(" = %d", ret);
  251. return ret;
  252. }
  253. /* directories have to be moved to the graveyard */
  254. _debug("move stale object to graveyard");
  255. inode_unlock(d_inode(dir));
  256. try_again:
  257. /* first step is to make up a grave dentry in the graveyard */
  258. sprintf(nbuffer, "%08x%08x",
  259. (uint32_t) ktime_get_real_seconds(),
  260. (uint32_t) atomic_inc_return(&cache->gravecounter));
  261. /* do the multiway lock magic */
  262. trap = lock_rename(cache->graveyard, dir);
  263. /* do some checks before getting the grave dentry */
  264. if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
  265. /* the entry was probably culled when we dropped the parent dir
  266. * lock */
  267. unlock_rename(cache->graveyard, dir);
  268. _leave(" = 0 [culled?]");
  269. return 0;
  270. }
  271. if (!d_can_lookup(cache->graveyard)) {
  272. unlock_rename(cache->graveyard, dir);
  273. cachefiles_io_error(cache, "Graveyard no longer a directory");
  274. return -EIO;
  275. }
  276. if (trap == rep) {
  277. unlock_rename(cache->graveyard, dir);
  278. cachefiles_io_error(cache, "May not make directory loop");
  279. return -EIO;
  280. }
  281. if (d_mountpoint(rep)) {
  282. unlock_rename(cache->graveyard, dir);
  283. cachefiles_io_error(cache, "Mountpoint in cache");
  284. return -EIO;
  285. }
  286. grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
  287. if (IS_ERR(grave)) {
  288. unlock_rename(cache->graveyard, dir);
  289. trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
  290. PTR_ERR(grave),
  291. cachefiles_trace_lookup_error);
  292. if (PTR_ERR(grave) == -ENOMEM) {
  293. _leave(" = -ENOMEM");
  294. return -ENOMEM;
  295. }
  296. cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
  297. return -EIO;
  298. }
  299. if (d_is_positive(grave)) {
  300. unlock_rename(cache->graveyard, dir);
  301. dput(grave);
  302. grave = NULL;
  303. cond_resched();
  304. goto try_again;
  305. }
  306. if (d_mountpoint(grave)) {
  307. unlock_rename(cache->graveyard, dir);
  308. dput(grave);
  309. cachefiles_io_error(cache, "Mountpoint in graveyard");
  310. return -EIO;
  311. }
  312. /* target should not be an ancestor of source */
  313. if (trap == grave) {
  314. unlock_rename(cache->graveyard, dir);
  315. dput(grave);
  316. cachefiles_io_error(cache, "May not make directory loop");
  317. return -EIO;
  318. }
  319. /* attempt the rename */
  320. path.mnt = cache->mnt;
  321. path.dentry = dir;
  322. path_to_graveyard.mnt = cache->mnt;
  323. path_to_graveyard.dentry = cache->graveyard;
  324. ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
  325. if (ret < 0) {
  326. cachefiles_io_error(cache, "Rename security error %d", ret);
  327. } else {
  328. struct renamedata rd = {
  329. .old_mnt_userns = &init_user_ns,
  330. .old_dir = d_inode(dir),
  331. .old_dentry = rep,
  332. .new_mnt_userns = &init_user_ns,
  333. .new_dir = d_inode(cache->graveyard),
  334. .new_dentry = grave,
  335. };
  336. trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
  337. ret = cachefiles_inject_read_error();
  338. if (ret == 0)
  339. ret = vfs_rename(&rd);
  340. if (ret != 0)
  341. trace_cachefiles_vfs_error(object, d_inode(dir), ret,
  342. cachefiles_trace_rename_error);
  343. if (ret != 0 && ret != -ENOMEM)
  344. cachefiles_io_error(cache,
  345. "Rename failed with error %d", ret);
  346. }
  347. __cachefiles_unmark_inode_in_use(object, d_inode(rep));
  348. unlock_rename(cache->graveyard, dir);
  349. dput(grave);
  350. _leave(" = 0");
  351. return 0;
  352. }
  353. /*
  354. * Delete a cache file.
  355. */
  356. int cachefiles_delete_object(struct cachefiles_object *object,
  357. enum fscache_why_object_killed why)
  358. {
  359. struct cachefiles_volume *volume = object->volume;
  360. struct dentry *dentry = object->file->f_path.dentry;
  361. struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
  362. int ret;
  363. _enter(",OBJ%x{%pD}", object->debug_id, object->file);
  364. /* Stop the dentry being negated if it's only pinned by a file struct. */
  365. dget(dentry);
  366. inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
  367. ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
  368. inode_unlock(d_backing_inode(fan));
  369. dput(dentry);
  370. return ret;
  371. }
  372. /*
  373. * Create a temporary file and leave it unattached and un-xattr'd until the
  374. * time comes to discard the object from memory.
  375. */
  376. struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
  377. {
  378. struct cachefiles_volume *volume = object->volume;
  379. struct cachefiles_cache *cache = volume->cache;
  380. const struct cred *saved_cred;
  381. struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
  382. struct file *file;
  383. const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
  384. uint64_t ni_size;
  385. long ret;
  386. cachefiles_begin_secure(cache, &saved_cred);
  387. ret = cachefiles_inject_write_error();
  388. if (ret == 0) {
  389. file = vfs_tmpfile_open(&init_user_ns, &parentpath, S_IFREG,
  390. O_RDWR | O_LARGEFILE | O_DIRECT,
  391. cache->cache_cred);
  392. ret = PTR_ERR_OR_ZERO(file);
  393. }
  394. if (ret) {
  395. trace_cachefiles_vfs_error(object, d_inode(fan), ret,
  396. cachefiles_trace_tmpfile_error);
  397. if (ret == -EIO)
  398. cachefiles_io_error_obj(object, "Failed to create tmpfile");
  399. goto err;
  400. }
  401. trace_cachefiles_tmpfile(object, file_inode(file));
  402. /* This is a newly created file with no other possible user */
  403. if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
  404. WARN_ON(1);
  405. ret = cachefiles_ondemand_init_object(object);
  406. if (ret < 0)
  407. goto err_unuse;
  408. ni_size = object->cookie->object_size;
  409. ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
  410. if (ni_size > 0) {
  411. trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
  412. cachefiles_trunc_expand_tmpfile);
  413. ret = cachefiles_inject_write_error();
  414. if (ret == 0)
  415. ret = vfs_truncate(&file->f_path, ni_size);
  416. if (ret < 0) {
  417. trace_cachefiles_vfs_error(
  418. object, file_inode(file), ret,
  419. cachefiles_trace_trunc_error);
  420. goto err_unuse;
  421. }
  422. }
  423. ret = -EINVAL;
  424. if (unlikely(!file->f_op->read_iter) ||
  425. unlikely(!file->f_op->write_iter)) {
  426. fput(file);
  427. pr_notice("Cache does not support read_iter and write_iter\n");
  428. goto err_unuse;
  429. }
  430. out:
  431. cachefiles_end_secure(cache, saved_cred);
  432. return file;
  433. err_unuse:
  434. cachefiles_do_unmark_inode_in_use(object, file_inode(file));
  435. fput(file);
  436. err:
  437. file = ERR_PTR(ret);
  438. goto out;
  439. }
  440. /*
  441. * Create a new file.
  442. */
  443. static bool cachefiles_create_file(struct cachefiles_object *object)
  444. {
  445. struct file *file;
  446. int ret;
  447. ret = cachefiles_has_space(object->volume->cache, 1, 0,
  448. cachefiles_has_space_for_create);
  449. if (ret < 0)
  450. return false;
  451. file = cachefiles_create_tmpfile(object);
  452. if (IS_ERR(file))
  453. return false;
  454. set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
  455. set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
  456. _debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
  457. object->file = file;
  458. return true;
  459. }
  460. /*
  461. * Open an existing file, checking its attributes and replacing it if it is
  462. * stale.
  463. */
  464. static bool cachefiles_open_file(struct cachefiles_object *object,
  465. struct dentry *dentry)
  466. {
  467. struct cachefiles_cache *cache = object->volume->cache;
  468. struct file *file;
  469. struct path path;
  470. int ret;
  471. _enter("%pd", dentry);
  472. if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
  473. pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
  474. dentry, d_inode(dentry)->i_ino);
  475. return false;
  476. }
  477. /* We need to open a file interface onto a data file now as we can't do
  478. * it on demand because writeback called from do_exit() sees
  479. * current->fs == NULL - which breaks d_path() called from ext4 open.
  480. */
  481. path.mnt = cache->mnt;
  482. path.dentry = dentry;
  483. file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
  484. d_backing_inode(dentry), cache->cache_cred);
  485. if (IS_ERR(file)) {
  486. trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
  487. PTR_ERR(file),
  488. cachefiles_trace_open_error);
  489. goto error;
  490. }
  491. if (unlikely(!file->f_op->read_iter) ||
  492. unlikely(!file->f_op->write_iter)) {
  493. pr_notice("Cache does not support read_iter and write_iter\n");
  494. goto error_fput;
  495. }
  496. _debug("file -> %pd positive", dentry);
  497. ret = cachefiles_ondemand_init_object(object);
  498. if (ret < 0)
  499. goto error_fput;
  500. ret = cachefiles_check_auxdata(object, file);
  501. if (ret < 0)
  502. goto check_failed;
  503. object->file = file;
  504. /* Always update the atime on an object we've just looked up (this is
  505. * used to keep track of culling, and atimes are only updated by read,
  506. * write and readdir but not lookup or open).
  507. */
  508. touch_atime(&file->f_path);
  509. dput(dentry);
  510. return true;
  511. check_failed:
  512. fscache_cookie_lookup_negative(object->cookie);
  513. cachefiles_unmark_inode_in_use(object, file);
  514. fput(file);
  515. dput(dentry);
  516. if (ret == -ESTALE)
  517. return cachefiles_create_file(object);
  518. return false;
  519. error_fput:
  520. fput(file);
  521. error:
  522. cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
  523. dput(dentry);
  524. return false;
  525. }
  526. /*
  527. * walk from the parent object to the child object through the backing
  528. * filesystem, creating directories as we go
  529. */
  530. bool cachefiles_look_up_object(struct cachefiles_object *object)
  531. {
  532. struct cachefiles_volume *volume = object->volume;
  533. struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
  534. int ret;
  535. _enter("OBJ%x,%s,", object->debug_id, object->d_name);
  536. /* Look up path "cache/vol/fanout/file". */
  537. ret = cachefiles_inject_read_error();
  538. if (ret == 0)
  539. dentry = lookup_positive_unlocked(object->d_name, fan,
  540. object->d_name_len);
  541. else
  542. dentry = ERR_PTR(ret);
  543. trace_cachefiles_lookup(object, fan, dentry);
  544. if (IS_ERR(dentry)) {
  545. if (dentry == ERR_PTR(-ENOENT))
  546. goto new_file;
  547. if (dentry == ERR_PTR(-EIO))
  548. cachefiles_io_error_obj(object, "Lookup failed");
  549. return false;
  550. }
  551. if (!d_is_reg(dentry)) {
  552. pr_err("%pd is not a file\n", dentry);
  553. inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
  554. ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
  555. FSCACHE_OBJECT_IS_WEIRD);
  556. dput(dentry);
  557. if (ret < 0)
  558. return false;
  559. goto new_file;
  560. }
  561. if (!cachefiles_open_file(object, dentry))
  562. return false;
  563. _leave(" = t [%lu]", file_inode(object->file)->i_ino);
  564. return true;
  565. new_file:
  566. fscache_cookie_lookup_negative(object->cookie);
  567. return cachefiles_create_file(object);
  568. }
  569. /*
  570. * Attempt to link a temporary file into its rightful place in the cache.
  571. */
  572. bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
  573. struct cachefiles_object *object)
  574. {
  575. struct cachefiles_volume *volume = object->volume;
  576. struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
  577. bool success = false;
  578. int ret;
  579. _enter(",%pD", object->file);
  580. inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
  581. ret = cachefiles_inject_read_error();
  582. if (ret == 0)
  583. dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
  584. else
  585. dentry = ERR_PTR(ret);
  586. if (IS_ERR(dentry)) {
  587. trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
  588. cachefiles_trace_lookup_error);
  589. _debug("lookup fail %ld", PTR_ERR(dentry));
  590. goto out_unlock;
  591. }
  592. if (!d_is_negative(dentry)) {
  593. if (d_backing_inode(dentry) == file_inode(object->file)) {
  594. success = true;
  595. goto out_dput;
  596. }
  597. ret = cachefiles_unlink(volume->cache, object, fan, dentry,
  598. FSCACHE_OBJECT_IS_STALE);
  599. if (ret < 0)
  600. goto out_dput;
  601. dput(dentry);
  602. ret = cachefiles_inject_read_error();
  603. if (ret == 0)
  604. dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
  605. else
  606. dentry = ERR_PTR(ret);
  607. if (IS_ERR(dentry)) {
  608. trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
  609. cachefiles_trace_lookup_error);
  610. _debug("lookup fail %ld", PTR_ERR(dentry));
  611. goto out_unlock;
  612. }
  613. }
  614. ret = cachefiles_inject_read_error();
  615. if (ret == 0)
  616. ret = vfs_link(object->file->f_path.dentry, &init_user_ns,
  617. d_inode(fan), dentry, NULL);
  618. if (ret < 0) {
  619. trace_cachefiles_vfs_error(object, d_inode(fan), ret,
  620. cachefiles_trace_link_error);
  621. _debug("link fail %d", ret);
  622. } else {
  623. trace_cachefiles_link(object, file_inode(object->file));
  624. spin_lock(&object->lock);
  625. /* TODO: Do we want to switch the file pointer to the new dentry? */
  626. clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
  627. spin_unlock(&object->lock);
  628. success = true;
  629. }
  630. out_dput:
  631. dput(dentry);
  632. out_unlock:
  633. inode_unlock(d_inode(fan));
  634. _leave(" = %u", success);
  635. return success;
  636. }
  637. /*
  638. * Look up an inode to be checked or culled. Return -EBUSY if the inode is
  639. * marked in use.
  640. */
  641. static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
  642. struct dentry *dir,
  643. char *filename)
  644. {
  645. struct dentry *victim;
  646. int ret = -ENOENT;
  647. inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
  648. victim = lookup_one_len(filename, dir, strlen(filename));
  649. if (IS_ERR(victim))
  650. goto lookup_error;
  651. if (d_is_negative(victim))
  652. goto lookup_put;
  653. if (d_inode(victim)->i_flags & S_KERNEL_FILE)
  654. goto lookup_busy;
  655. return victim;
  656. lookup_busy:
  657. ret = -EBUSY;
  658. lookup_put:
  659. inode_unlock(d_inode(dir));
  660. dput(victim);
  661. return ERR_PTR(ret);
  662. lookup_error:
  663. inode_unlock(d_inode(dir));
  664. ret = PTR_ERR(victim);
  665. if (ret == -ENOENT)
  666. return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
  667. if (ret == -EIO) {
  668. cachefiles_io_error(cache, "Lookup failed");
  669. } else if (ret != -ENOMEM) {
  670. pr_err("Internal error: %d\n", ret);
  671. ret = -EIO;
  672. }
  673. return ERR_PTR(ret);
  674. }
  675. /*
  676. * Cull an object if it's not in use
  677. * - called only by cache manager daemon
  678. */
  679. int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
  680. char *filename)
  681. {
  682. struct dentry *victim;
  683. struct inode *inode;
  684. int ret;
  685. _enter(",%pd/,%s", dir, filename);
  686. victim = cachefiles_lookup_for_cull(cache, dir, filename);
  687. if (IS_ERR(victim))
  688. return PTR_ERR(victim);
  689. /* check to see if someone is using this object */
  690. inode = d_inode(victim);
  691. inode_lock(inode);
  692. if (inode->i_flags & S_KERNEL_FILE) {
  693. ret = -EBUSY;
  694. } else {
  695. /* Stop the cache from picking it back up */
  696. inode->i_flags |= S_KERNEL_FILE;
  697. ret = 0;
  698. }
  699. inode_unlock(inode);
  700. if (ret < 0)
  701. goto error_unlock;
  702. ret = cachefiles_bury_object(cache, NULL, dir, victim,
  703. FSCACHE_OBJECT_WAS_CULLED);
  704. if (ret < 0)
  705. goto error;
  706. fscache_count_culled();
  707. dput(victim);
  708. _leave(" = 0");
  709. return 0;
  710. error_unlock:
  711. inode_unlock(d_inode(dir));
  712. error:
  713. dput(victim);
  714. if (ret == -ENOENT)
  715. return -ESTALE; /* Probably got retired by the netfs */
  716. if (ret != -ENOMEM) {
  717. pr_err("Internal error: %d\n", ret);
  718. ret = -EIO;
  719. }
  720. _leave(" = %d", ret);
  721. return ret;
  722. }
  723. /*
  724. * Find out if an object is in use or not
  725. * - called only by cache manager daemon
  726. * - returns -EBUSY or 0 to indicate whether an object is in use or not
  727. */
  728. int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
  729. char *filename)
  730. {
  731. struct dentry *victim;
  732. int ret = 0;
  733. victim = cachefiles_lookup_for_cull(cache, dir, filename);
  734. if (IS_ERR(victim))
  735. return PTR_ERR(victim);
  736. inode_unlock(d_inode(dir));
  737. dput(victim);
  738. return ret;
  739. }