readdir.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) 2011 Novell Inc.
  5. */
  6. #include <linux/fs.h>
  7. #include <linux/slab.h>
  8. #include <linux/namei.h>
  9. #include <linux/file.h>
  10. #include <linux/xattr.h>
  11. #include <linux/rbtree.h>
  12. #include <linux/security.h>
  13. #include <linux/cred.h>
  14. #include <linux/ratelimit.h>
  15. #include "overlayfs.h"
  16. struct ovl_cache_entry {
  17. unsigned int len;
  18. unsigned int type;
  19. u64 real_ino;
  20. u64 ino;
  21. struct list_head l_node;
  22. struct rb_node node;
  23. struct ovl_cache_entry *next_maybe_whiteout;
  24. bool is_upper;
  25. bool is_whiteout;
  26. char name[];
  27. };
  28. struct ovl_dir_cache {
  29. long refcount;
  30. u64 version;
  31. struct list_head entries;
  32. struct rb_root root;
  33. };
  34. struct ovl_readdir_data {
  35. struct dir_context ctx;
  36. struct dentry *dentry;
  37. bool is_lowest;
  38. struct rb_root *root;
  39. struct list_head *list;
  40. struct list_head middle;
  41. struct ovl_cache_entry *first_maybe_whiteout;
  42. int count;
  43. int err;
  44. bool is_upper;
  45. bool d_type_supported;
  46. };
  47. struct ovl_dir_file {
  48. bool is_real;
  49. bool is_upper;
  50. struct ovl_dir_cache *cache;
  51. struct list_head *cursor;
  52. struct file *realfile;
  53. struct file *upperfile;
  54. };
  55. static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
  56. {
  57. return rb_entry(n, struct ovl_cache_entry, node);
  58. }
  59. static bool ovl_cache_entry_find_link(const char *name, int len,
  60. struct rb_node ***link,
  61. struct rb_node **parent)
  62. {
  63. bool found = false;
  64. struct rb_node **newp = *link;
  65. while (!found && *newp) {
  66. int cmp;
  67. struct ovl_cache_entry *tmp;
  68. *parent = *newp;
  69. tmp = ovl_cache_entry_from_node(*newp);
  70. cmp = strncmp(name, tmp->name, len);
  71. if (cmp > 0)
  72. newp = &tmp->node.rb_right;
  73. else if (cmp < 0 || len < tmp->len)
  74. newp = &tmp->node.rb_left;
  75. else
  76. found = true;
  77. }
  78. *link = newp;
  79. return found;
  80. }
  81. static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
  82. const char *name, int len)
  83. {
  84. struct rb_node *node = root->rb_node;
  85. int cmp;
  86. while (node) {
  87. struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
  88. cmp = strncmp(name, p->name, len);
  89. if (cmp > 0)
  90. node = p->node.rb_right;
  91. else if (cmp < 0 || len < p->len)
  92. node = p->node.rb_left;
  93. else
  94. return p;
  95. }
  96. return NULL;
  97. }
  98. static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd,
  99. struct ovl_cache_entry *p)
  100. {
  101. /* Don't care if not doing ovl_iter() */
  102. if (!rdd->dentry)
  103. return false;
  104. /* Always recalc d_ino when remapping lower inode numbers */
  105. if (ovl_xino_bits(rdd->dentry->d_sb))
  106. return true;
  107. /* Always recalc d_ino for parent */
  108. if (strcmp(p->name, "..") == 0)
  109. return true;
  110. /* If this is lower, then native d_ino will do */
  111. if (!rdd->is_upper)
  112. return false;
  113. /*
  114. * Recalc d_ino for '.' and for all entries if dir is impure (contains
  115. * copied up entries)
  116. */
  117. if ((p->name[0] == '.' && p->len == 1) ||
  118. ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry)))
  119. return true;
  120. return false;
  121. }
  122. static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
  123. const char *name, int len,
  124. u64 ino, unsigned int d_type)
  125. {
  126. struct ovl_cache_entry *p;
  127. size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
  128. p = kmalloc(size, GFP_KERNEL);
  129. if (!p)
  130. return NULL;
  131. memcpy(p->name, name, len);
  132. p->name[len] = '\0';
  133. p->len = len;
  134. p->type = d_type;
  135. p->real_ino = ino;
  136. p->ino = ino;
  137. /* Defer setting d_ino for upper entry to ovl_iterate() */
  138. if (ovl_calc_d_ino(rdd, p))
  139. p->ino = 0;
  140. p->is_upper = rdd->is_upper;
  141. p->is_whiteout = false;
  142. if (d_type == DT_CHR) {
  143. p->next_maybe_whiteout = rdd->first_maybe_whiteout;
  144. rdd->first_maybe_whiteout = p;
  145. }
  146. return p;
  147. }
  148. static bool ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
  149. const char *name, int len, u64 ino,
  150. unsigned int d_type)
  151. {
  152. struct rb_node **newp = &rdd->root->rb_node;
  153. struct rb_node *parent = NULL;
  154. struct ovl_cache_entry *p;
  155. if (ovl_cache_entry_find_link(name, len, &newp, &parent))
  156. return true;
  157. p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
  158. if (p == NULL) {
  159. rdd->err = -ENOMEM;
  160. return false;
  161. }
  162. list_add_tail(&p->l_node, rdd->list);
  163. rb_link_node(&p->node, parent, newp);
  164. rb_insert_color(&p->node, rdd->root);
  165. return true;
  166. }
  167. static bool ovl_fill_lowest(struct ovl_readdir_data *rdd,
  168. const char *name, int namelen,
  169. loff_t offset, u64 ino, unsigned int d_type)
  170. {
  171. struct ovl_cache_entry *p;
  172. p = ovl_cache_entry_find(rdd->root, name, namelen);
  173. if (p) {
  174. list_move_tail(&p->l_node, &rdd->middle);
  175. } else {
  176. p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
  177. if (p == NULL)
  178. rdd->err = -ENOMEM;
  179. else
  180. list_add_tail(&p->l_node, &rdd->middle);
  181. }
  182. return rdd->err == 0;
  183. }
  184. void ovl_cache_free(struct list_head *list)
  185. {
  186. struct ovl_cache_entry *p;
  187. struct ovl_cache_entry *n;
  188. list_for_each_entry_safe(p, n, list, l_node)
  189. kfree(p);
  190. INIT_LIST_HEAD(list);
  191. }
  192. void ovl_dir_cache_free(struct inode *inode)
  193. {
  194. struct ovl_dir_cache *cache = ovl_dir_cache(inode);
  195. if (cache) {
  196. ovl_cache_free(&cache->entries);
  197. kfree(cache);
  198. }
  199. }
  200. static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
  201. {
  202. struct ovl_dir_cache *cache = od->cache;
  203. WARN_ON(cache->refcount <= 0);
  204. cache->refcount--;
  205. if (!cache->refcount) {
  206. if (ovl_dir_cache(d_inode(dentry)) == cache)
  207. ovl_set_dir_cache(d_inode(dentry), NULL);
  208. ovl_cache_free(&cache->entries);
  209. kfree(cache);
  210. }
  211. }
  212. static bool ovl_fill_merge(struct dir_context *ctx, const char *name,
  213. int namelen, loff_t offset, u64 ino,
  214. unsigned int d_type)
  215. {
  216. struct ovl_readdir_data *rdd =
  217. container_of(ctx, struct ovl_readdir_data, ctx);
  218. rdd->count++;
  219. if (!rdd->is_lowest)
  220. return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
  221. else
  222. return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
  223. }
  224. static int ovl_check_whiteouts(const struct path *path, struct ovl_readdir_data *rdd)
  225. {
  226. int err;
  227. struct ovl_cache_entry *p;
  228. struct dentry *dentry, *dir = path->dentry;
  229. const struct cred *old_cred;
  230. old_cred = ovl_override_creds(rdd->dentry->d_sb);
  231. err = down_write_killable(&dir->d_inode->i_rwsem);
  232. if (!err) {
  233. while (rdd->first_maybe_whiteout) {
  234. p = rdd->first_maybe_whiteout;
  235. rdd->first_maybe_whiteout = p->next_maybe_whiteout;
  236. dentry = lookup_one(mnt_user_ns(path->mnt), p->name, dir, p->len);
  237. if (!IS_ERR(dentry)) {
  238. p->is_whiteout = ovl_is_whiteout(dentry);
  239. dput(dentry);
  240. }
  241. }
  242. inode_unlock(dir->d_inode);
  243. }
  244. ovl_revert_creds(rdd->dentry->d_sb, old_cred);
  245. return err;
  246. }
  247. static inline int ovl_dir_read(const struct path *realpath,
  248. struct ovl_readdir_data *rdd)
  249. {
  250. struct file *realfile;
  251. int err;
  252. realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE);
  253. if (IS_ERR(realfile))
  254. return PTR_ERR(realfile);
  255. rdd->first_maybe_whiteout = NULL;
  256. rdd->ctx.pos = 0;
  257. do {
  258. rdd->count = 0;
  259. rdd->err = 0;
  260. err = iterate_dir(realfile, &rdd->ctx);
  261. if (err >= 0)
  262. err = rdd->err;
  263. } while (!err && rdd->count);
  264. if (!err && rdd->first_maybe_whiteout && rdd->dentry)
  265. err = ovl_check_whiteouts(realpath, rdd);
  266. fput(realfile);
  267. return err;
  268. }
  269. static void ovl_dir_reset(struct file *file)
  270. {
  271. struct ovl_dir_file *od = file->private_data;
  272. struct ovl_dir_cache *cache = od->cache;
  273. struct dentry *dentry = file->f_path.dentry;
  274. bool is_real;
  275. if (cache && ovl_dentry_version_get(dentry) != cache->version) {
  276. ovl_cache_put(od, dentry);
  277. od->cache = NULL;
  278. od->cursor = NULL;
  279. }
  280. is_real = ovl_dir_is_real(dentry);
  281. if (od->is_real != is_real) {
  282. /* is_real can only become false when dir is copied up */
  283. if (WARN_ON(is_real))
  284. return;
  285. od->is_real = false;
  286. }
  287. }
  288. static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
  289. struct rb_root *root)
  290. {
  291. int err;
  292. struct path realpath;
  293. struct ovl_readdir_data rdd = {
  294. .ctx.actor = ovl_fill_merge,
  295. .dentry = dentry,
  296. .list = list,
  297. .root = root,
  298. .is_lowest = false,
  299. };
  300. int idx, next;
  301. for (idx = 0; idx != -1; idx = next) {
  302. next = ovl_path_next(idx, dentry, &realpath);
  303. rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
  304. if (next != -1) {
  305. err = ovl_dir_read(&realpath, &rdd);
  306. if (err)
  307. break;
  308. } else {
  309. /*
  310. * Insert lowest layer entries before upper ones, this
  311. * allows offsets to be reasonably constant
  312. */
  313. list_add(&rdd.middle, rdd.list);
  314. rdd.is_lowest = true;
  315. err = ovl_dir_read(&realpath, &rdd);
  316. list_del(&rdd.middle);
  317. }
  318. }
  319. return err;
  320. }
  321. static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
  322. {
  323. struct list_head *p;
  324. loff_t off = 0;
  325. list_for_each(p, &od->cache->entries) {
  326. if (off >= pos)
  327. break;
  328. off++;
  329. }
  330. /* Cursor is safe since the cache is stable */
  331. od->cursor = p;
  332. }
  333. static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
  334. {
  335. int res;
  336. struct ovl_dir_cache *cache;
  337. cache = ovl_dir_cache(d_inode(dentry));
  338. if (cache && ovl_dentry_version_get(dentry) == cache->version) {
  339. WARN_ON(!cache->refcount);
  340. cache->refcount++;
  341. return cache;
  342. }
  343. ovl_set_dir_cache(d_inode(dentry), NULL);
  344. cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
  345. if (!cache)
  346. return ERR_PTR(-ENOMEM);
  347. cache->refcount = 1;
  348. INIT_LIST_HEAD(&cache->entries);
  349. cache->root = RB_ROOT;
  350. res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root);
  351. if (res) {
  352. ovl_cache_free(&cache->entries);
  353. kfree(cache);
  354. return ERR_PTR(res);
  355. }
  356. cache->version = ovl_dentry_version_get(dentry);
  357. ovl_set_dir_cache(d_inode(dentry), cache);
  358. return cache;
  359. }
  360. /* Map inode number to lower fs unique range */
  361. static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
  362. const char *name, int namelen, bool warn)
  363. {
  364. unsigned int xinoshift = 64 - xinobits;
  365. if (unlikely(ino >> xinoshift)) {
  366. if (warn) {
  367. pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n",
  368. namelen, name, ino, xinobits);
  369. }
  370. return ino;
  371. }
  372. /*
  373. * The lowest xinobit is reserved for mapping the non-peresistent inode
  374. * numbers range, but this range is only exposed via st_ino, not here.
  375. */
  376. return ino | ((u64)fsid) << (xinoshift + 1);
  377. }
  378. /*
  379. * Set d_ino for upper entries. Non-upper entries should always report
  380. * the uppermost real inode ino and should not call this function.
  381. *
  382. * When not all layer are on same fs, report real ino also for upper.
  383. *
  384. * When all layers are on the same fs, and upper has a reference to
  385. * copy up origin, call vfs_getattr() on the overlay entry to make
  386. * sure that d_ino will be consistent with st_ino from stat(2).
  387. */
  388. static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry *p)
  389. {
  390. struct dentry *dir = path->dentry;
  391. struct dentry *this = NULL;
  392. enum ovl_path_type type;
  393. u64 ino = p->real_ino;
  394. int xinobits = ovl_xino_bits(dir->d_sb);
  395. int err = 0;
  396. if (!ovl_same_dev(dir->d_sb))
  397. goto out;
  398. if (p->name[0] == '.') {
  399. if (p->len == 1) {
  400. this = dget(dir);
  401. goto get;
  402. }
  403. if (p->len == 2 && p->name[1] == '.') {
  404. /* we shall not be moved */
  405. this = dget(dir->d_parent);
  406. goto get;
  407. }
  408. }
  409. this = lookup_one(mnt_user_ns(path->mnt), p->name, dir, p->len);
  410. if (IS_ERR_OR_NULL(this) || !this->d_inode) {
  411. /* Mark a stale entry */
  412. p->is_whiteout = true;
  413. if (IS_ERR(this)) {
  414. err = PTR_ERR(this);
  415. this = NULL;
  416. goto fail;
  417. }
  418. goto out;
  419. }
  420. get:
  421. type = ovl_path_type(this);
  422. if (OVL_TYPE_ORIGIN(type)) {
  423. struct kstat stat;
  424. struct path statpath = *path;
  425. statpath.dentry = this;
  426. err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
  427. if (err)
  428. goto fail;
  429. /*
  430. * Directory inode is always on overlay st_dev.
  431. * Non-dir with ovl_same_dev() could be on pseudo st_dev in case
  432. * of xino bits overflow.
  433. */
  434. WARN_ON_ONCE(S_ISDIR(stat.mode) &&
  435. dir->d_sb->s_dev != stat.dev);
  436. ino = stat.ino;
  437. } else if (xinobits && !OVL_TYPE_UPPER(type)) {
  438. ino = ovl_remap_lower_ino(ino, xinobits,
  439. ovl_layer_lower(this)->fsid,
  440. p->name, p->len,
  441. ovl_xino_warn(dir->d_sb));
  442. }
  443. out:
  444. p->ino = ino;
  445. dput(this);
  446. return err;
  447. fail:
  448. pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n",
  449. p->name, err);
  450. goto out;
  451. }
  452. static bool ovl_fill_plain(struct dir_context *ctx, const char *name,
  453. int namelen, loff_t offset, u64 ino,
  454. unsigned int d_type)
  455. {
  456. struct ovl_cache_entry *p;
  457. struct ovl_readdir_data *rdd =
  458. container_of(ctx, struct ovl_readdir_data, ctx);
  459. rdd->count++;
  460. p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
  461. if (p == NULL) {
  462. rdd->err = -ENOMEM;
  463. return false;
  464. }
  465. list_add_tail(&p->l_node, rdd->list);
  466. return true;
  467. }
  468. static int ovl_dir_read_impure(const struct path *path, struct list_head *list,
  469. struct rb_root *root)
  470. {
  471. int err;
  472. struct path realpath;
  473. struct ovl_cache_entry *p, *n;
  474. struct ovl_readdir_data rdd = {
  475. .ctx.actor = ovl_fill_plain,
  476. .list = list,
  477. .root = root,
  478. };
  479. INIT_LIST_HEAD(list);
  480. *root = RB_ROOT;
  481. ovl_path_upper(path->dentry, &realpath);
  482. err = ovl_dir_read(&realpath, &rdd);
  483. if (err)
  484. return err;
  485. list_for_each_entry_safe(p, n, list, l_node) {
  486. if (strcmp(p->name, ".") != 0 &&
  487. strcmp(p->name, "..") != 0) {
  488. err = ovl_cache_update_ino(path, p);
  489. if (err)
  490. return err;
  491. }
  492. if (p->ino == p->real_ino) {
  493. list_del(&p->l_node);
  494. kfree(p);
  495. } else {
  496. struct rb_node **newp = &root->rb_node;
  497. struct rb_node *parent = NULL;
  498. if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len,
  499. &newp, &parent)))
  500. return -EIO;
  501. rb_link_node(&p->node, parent, newp);
  502. rb_insert_color(&p->node, root);
  503. }
  504. }
  505. return 0;
  506. }
  507. static struct ovl_dir_cache *ovl_cache_get_impure(const struct path *path)
  508. {
  509. int res;
  510. struct dentry *dentry = path->dentry;
  511. struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
  512. struct ovl_dir_cache *cache;
  513. cache = ovl_dir_cache(d_inode(dentry));
  514. if (cache && ovl_dentry_version_get(dentry) == cache->version)
  515. return cache;
  516. /* Impure cache is not refcounted, free it here */
  517. ovl_dir_cache_free(d_inode(dentry));
  518. ovl_set_dir_cache(d_inode(dentry), NULL);
  519. cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
  520. if (!cache)
  521. return ERR_PTR(-ENOMEM);
  522. res = ovl_dir_read_impure(path, &cache->entries, &cache->root);
  523. if (res) {
  524. ovl_cache_free(&cache->entries);
  525. kfree(cache);
  526. return ERR_PTR(res);
  527. }
  528. if (list_empty(&cache->entries)) {
  529. /*
  530. * A good opportunity to get rid of an unneeded "impure" flag.
  531. * Removing the "impure" xattr is best effort.
  532. */
  533. if (!ovl_want_write(dentry)) {
  534. ovl_removexattr(ofs, ovl_dentry_upper(dentry),
  535. OVL_XATTR_IMPURE);
  536. ovl_drop_write(dentry);
  537. }
  538. ovl_clear_flag(OVL_IMPURE, d_inode(dentry));
  539. kfree(cache);
  540. return NULL;
  541. }
  542. cache->version = ovl_dentry_version_get(dentry);
  543. ovl_set_dir_cache(d_inode(dentry), cache);
  544. return cache;
  545. }
  546. struct ovl_readdir_translate {
  547. struct dir_context *orig_ctx;
  548. struct ovl_dir_cache *cache;
  549. struct dir_context ctx;
  550. u64 parent_ino;
  551. int fsid;
  552. int xinobits;
  553. bool xinowarn;
  554. };
  555. static bool ovl_fill_real(struct dir_context *ctx, const char *name,
  556. int namelen, loff_t offset, u64 ino,
  557. unsigned int d_type)
  558. {
  559. struct ovl_readdir_translate *rdt =
  560. container_of(ctx, struct ovl_readdir_translate, ctx);
  561. struct dir_context *orig_ctx = rdt->orig_ctx;
  562. if (rdt->parent_ino && strcmp(name, "..") == 0) {
  563. ino = rdt->parent_ino;
  564. } else if (rdt->cache) {
  565. struct ovl_cache_entry *p;
  566. p = ovl_cache_entry_find(&rdt->cache->root, name, namelen);
  567. if (p)
  568. ino = p->ino;
  569. } else if (rdt->xinobits) {
  570. ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid,
  571. name, namelen, rdt->xinowarn);
  572. }
  573. return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
  574. }
  575. static bool ovl_is_impure_dir(struct file *file)
  576. {
  577. struct ovl_dir_file *od = file->private_data;
  578. struct inode *dir = d_inode(file->f_path.dentry);
  579. /*
  580. * Only upper dir can be impure, but if we are in the middle of
  581. * iterating a lower real dir, dir could be copied up and marked
  582. * impure. We only want the impure cache if we started iterating
  583. * a real upper dir to begin with.
  584. */
  585. return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
  586. }
  587. static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
  588. {
  589. int err;
  590. struct ovl_dir_file *od = file->private_data;
  591. struct dentry *dir = file->f_path.dentry;
  592. const struct ovl_layer *lower_layer = ovl_layer_lower(dir);
  593. struct ovl_readdir_translate rdt = {
  594. .ctx.actor = ovl_fill_real,
  595. .orig_ctx = ctx,
  596. .xinobits = ovl_xino_bits(dir->d_sb),
  597. .xinowarn = ovl_xino_warn(dir->d_sb),
  598. };
  599. if (rdt.xinobits && lower_layer)
  600. rdt.fsid = lower_layer->fsid;
  601. if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) {
  602. struct kstat stat;
  603. struct path statpath = file->f_path;
  604. statpath.dentry = dir->d_parent;
  605. err = vfs_getattr(&statpath, &stat, STATX_INO, 0);
  606. if (err)
  607. return err;
  608. WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev);
  609. rdt.parent_ino = stat.ino;
  610. }
  611. if (ovl_is_impure_dir(file)) {
  612. rdt.cache = ovl_cache_get_impure(&file->f_path);
  613. if (IS_ERR(rdt.cache))
  614. return PTR_ERR(rdt.cache);
  615. }
  616. err = iterate_dir(od->realfile, &rdt.ctx);
  617. ctx->pos = rdt.ctx.pos;
  618. return err;
  619. }
  620. static int ovl_iterate(struct file *file, struct dir_context *ctx)
  621. {
  622. struct ovl_dir_file *od = file->private_data;
  623. struct dentry *dentry = file->f_path.dentry;
  624. struct ovl_cache_entry *p;
  625. const struct cred *old_cred;
  626. int err;
  627. old_cred = ovl_override_creds(dentry->d_sb);
  628. if (!ctx->pos)
  629. ovl_dir_reset(file);
  630. if (od->is_real) {
  631. /*
  632. * If parent is merge, then need to adjust d_ino for '..', if
  633. * dir is impure then need to adjust d_ino for copied up
  634. * entries.
  635. */
  636. if (ovl_xino_bits(dentry->d_sb) ||
  637. (ovl_same_fs(dentry->d_sb) &&
  638. (ovl_is_impure_dir(file) ||
  639. OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
  640. err = ovl_iterate_real(file, ctx);
  641. } else {
  642. err = iterate_dir(od->realfile, ctx);
  643. }
  644. goto out;
  645. }
  646. if (!od->cache) {
  647. struct ovl_dir_cache *cache;
  648. cache = ovl_cache_get(dentry);
  649. err = PTR_ERR(cache);
  650. if (IS_ERR(cache))
  651. goto out;
  652. od->cache = cache;
  653. ovl_seek_cursor(od, ctx->pos);
  654. }
  655. while (od->cursor != &od->cache->entries) {
  656. p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
  657. if (!p->is_whiteout) {
  658. if (!p->ino) {
  659. err = ovl_cache_update_ino(&file->f_path, p);
  660. if (err)
  661. goto out;
  662. }
  663. }
  664. /* ovl_cache_update_ino() sets is_whiteout on stale entry */
  665. if (!p->is_whiteout) {
  666. if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
  667. break;
  668. }
  669. od->cursor = p->l_node.next;
  670. ctx->pos++;
  671. }
  672. err = 0;
  673. out:
  674. ovl_revert_creds(dentry->d_sb, old_cred);
  675. return err;
  676. }
  677. static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
  678. {
  679. loff_t res;
  680. struct ovl_dir_file *od = file->private_data;
  681. inode_lock(file_inode(file));
  682. if (!file->f_pos)
  683. ovl_dir_reset(file);
  684. if (od->is_real) {
  685. res = vfs_llseek(od->realfile, offset, origin);
  686. file->f_pos = od->realfile->f_pos;
  687. } else {
  688. res = -EINVAL;
  689. switch (origin) {
  690. case SEEK_CUR:
  691. offset += file->f_pos;
  692. break;
  693. case SEEK_SET:
  694. break;
  695. default:
  696. goto out_unlock;
  697. }
  698. if (offset < 0)
  699. goto out_unlock;
  700. if (offset != file->f_pos) {
  701. file->f_pos = offset;
  702. if (od->cache)
  703. ovl_seek_cursor(od, offset);
  704. }
  705. res = offset;
  706. }
  707. out_unlock:
  708. inode_unlock(file_inode(file));
  709. return res;
  710. }
  711. static struct file *ovl_dir_open_realfile(const struct file *file,
  712. const struct path *realpath)
  713. {
  714. struct file *res;
  715. const struct cred *old_cred;
  716. old_cred = ovl_override_creds(file_inode(file)->i_sb);
  717. res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE));
  718. ovl_revert_creds(file_inode(file)->i_sb, old_cred);
  719. return res;
  720. }
  721. /*
  722. * Like ovl_real_fdget(), returns upperfile if dir was copied up since open.
  723. * Unlike ovl_real_fdget(), this caches upperfile in file->private_data.
  724. *
  725. * TODO: use same abstract type for file->private_data of dir and file so
  726. * upperfile could also be cached for files as well.
  727. */
  728. struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
  729. {
  730. struct ovl_dir_file *od = file->private_data;
  731. struct dentry *dentry = file->f_path.dentry;
  732. struct file *old, *realfile = od->realfile;
  733. if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
  734. return want_upper ? NULL : realfile;
  735. /*
  736. * Need to check if we started out being a lower dir, but got copied up
  737. */
  738. if (!od->is_upper) {
  739. realfile = READ_ONCE(od->upperfile);
  740. if (!realfile) {
  741. struct path upperpath;
  742. ovl_path_upper(dentry, &upperpath);
  743. realfile = ovl_dir_open_realfile(file, &upperpath);
  744. if (IS_ERR(realfile))
  745. return realfile;
  746. old = cmpxchg_release(&od->upperfile, NULL, realfile);
  747. if (old) {
  748. fput(realfile);
  749. realfile = old;
  750. }
  751. }
  752. }
  753. return realfile;
  754. }
  755. static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
  756. int datasync)
  757. {
  758. struct file *realfile;
  759. int err;
  760. err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
  761. if (err <= 0)
  762. return err;
  763. realfile = ovl_dir_real_file(file, true);
  764. err = PTR_ERR_OR_ZERO(realfile);
  765. /* Nothing to sync for lower */
  766. if (!realfile || err)
  767. return err;
  768. return vfs_fsync_range(realfile, start, end, datasync);
  769. }
  770. static int ovl_dir_release(struct inode *inode, struct file *file)
  771. {
  772. struct ovl_dir_file *od = file->private_data;
  773. if (od->cache) {
  774. inode_lock(inode);
  775. ovl_cache_put(od, file->f_path.dentry);
  776. inode_unlock(inode);
  777. }
  778. fput(od->realfile);
  779. if (od->upperfile)
  780. fput(od->upperfile);
  781. kfree(od);
  782. return 0;
  783. }
  784. static int ovl_dir_open(struct inode *inode, struct file *file)
  785. {
  786. struct path realpath;
  787. struct file *realfile;
  788. struct ovl_dir_file *od;
  789. enum ovl_path_type type;
  790. od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
  791. if (!od)
  792. return -ENOMEM;
  793. #ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
  794. ovl_path_lowerdata(file->f_path.dentry, &realpath);
  795. if (likely(realpath.mnt && realpath.dentry)) {
  796. // We still use '__OVL_PATH_UPPER' here which should be fine.
  797. type = __OVL_PATH_UPPER;
  798. goto bypass_orig_flow;
  799. }
  800. #endif
  801. type = ovl_path_real(file->f_path.dentry, &realpath);
  802. #ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
  803. bypass_orig_flow:
  804. #endif
  805. realfile = ovl_dir_open_realfile(file, &realpath);
  806. if (IS_ERR(realfile)) {
  807. kfree(od);
  808. return PTR_ERR(realfile);
  809. }
  810. od->realfile = realfile;
  811. od->is_real = ovl_dir_is_real(file->f_path.dentry);
  812. od->is_upper = OVL_TYPE_UPPER(type);
  813. file->private_data = od;
  814. return 0;
  815. }
  816. const struct file_operations ovl_dir_operations = {
  817. .read = generic_read_dir,
  818. .open = ovl_dir_open,
  819. .iterate = ovl_iterate,
  820. .llseek = ovl_dir_llseek,
  821. .fsync = ovl_dir_fsync,
  822. .release = ovl_dir_release,
  823. };
  824. int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
  825. {
  826. int err;
  827. struct ovl_cache_entry *p, *n;
  828. struct rb_root root = RB_ROOT;
  829. const struct cred *old_cred;
  830. old_cred = ovl_override_creds(dentry->d_sb);
  831. err = ovl_dir_read_merged(dentry, list, &root);
  832. ovl_revert_creds(dentry->d_sb, old_cred);
  833. if (err)
  834. return err;
  835. err = 0;
  836. list_for_each_entry_safe(p, n, list, l_node) {
  837. /*
  838. * Select whiteouts in upperdir, they should
  839. * be cleared when deleting this directory.
  840. */
  841. if (p->is_whiteout) {
  842. if (p->is_upper)
  843. continue;
  844. goto del_entry;
  845. }
  846. if (p->name[0] == '.') {
  847. if (p->len == 1)
  848. goto del_entry;
  849. if (p->len == 2 && p->name[1] == '.')
  850. goto del_entry;
  851. }
  852. err = -ENOTEMPTY;
  853. break;
  854. del_entry:
  855. list_del(&p->l_node);
  856. kfree(p);
  857. }
  858. return err;
  859. }
  860. void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper,
  861. struct list_head *list)
  862. {
  863. struct ovl_cache_entry *p;
  864. inode_lock_nested(upper->d_inode, I_MUTEX_CHILD);
  865. list_for_each_entry(p, list, l_node) {
  866. struct dentry *dentry;
  867. if (WARN_ON(!p->is_whiteout || !p->is_upper))
  868. continue;
  869. dentry = ovl_lookup_upper(ofs, p->name, upper, p->len);
  870. if (IS_ERR(dentry)) {
  871. pr_err("lookup '%s/%.*s' failed (%i)\n",
  872. upper->d_name.name, p->len, p->name,
  873. (int) PTR_ERR(dentry));
  874. continue;
  875. }
  876. if (dentry->d_inode)
  877. ovl_cleanup(ofs, upper->d_inode, dentry);
  878. dput(dentry);
  879. }
  880. inode_unlock(upper->d_inode);
  881. }
  882. static bool ovl_check_d_type(struct dir_context *ctx, const char *name,
  883. int namelen, loff_t offset, u64 ino,
  884. unsigned int d_type)
  885. {
  886. struct ovl_readdir_data *rdd =
  887. container_of(ctx, struct ovl_readdir_data, ctx);
  888. /* Even if d_type is not supported, DT_DIR is returned for . and .. */
  889. if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
  890. return true;
  891. if (d_type != DT_UNKNOWN)
  892. rdd->d_type_supported = true;
  893. return true;
  894. }
  895. /*
  896. * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
  897. * if error is encountered.
  898. */
  899. int ovl_check_d_type_supported(const struct path *realpath)
  900. {
  901. int err;
  902. struct ovl_readdir_data rdd = {
  903. .ctx.actor = ovl_check_d_type,
  904. .d_type_supported = false,
  905. };
  906. err = ovl_dir_read(realpath, &rdd);
  907. if (err)
  908. return err;
  909. return rdd.d_type_supported;
  910. }
  911. #define OVL_INCOMPATDIR_NAME "incompat"
  912. static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *path,
  913. int level)
  914. {
  915. int err;
  916. struct inode *dir = path->dentry->d_inode;
  917. LIST_HEAD(list);
  918. struct rb_root root = RB_ROOT;
  919. struct ovl_cache_entry *p;
  920. struct ovl_readdir_data rdd = {
  921. .ctx.actor = ovl_fill_merge,
  922. .dentry = NULL,
  923. .list = &list,
  924. .root = &root,
  925. .is_lowest = false,
  926. };
  927. bool incompat = false;
  928. /*
  929. * The "work/incompat" directory is treated specially - if it is not
  930. * empty, instead of printing a generic error and mounting read-only,
  931. * we will error about incompat features and fail the mount.
  932. *
  933. * When called from ovl_indexdir_cleanup(), path->dentry->d_name.name
  934. * starts with '#'.
  935. */
  936. if (level == 2 &&
  937. !strcmp(path->dentry->d_name.name, OVL_INCOMPATDIR_NAME))
  938. incompat = true;
  939. err = ovl_dir_read(path, &rdd);
  940. if (err)
  941. goto out;
  942. inode_lock_nested(dir, I_MUTEX_PARENT);
  943. list_for_each_entry(p, &list, l_node) {
  944. struct dentry *dentry;
  945. if (p->name[0] == '.') {
  946. if (p->len == 1)
  947. continue;
  948. if (p->len == 2 && p->name[1] == '.')
  949. continue;
  950. } else if (incompat) {
  951. pr_err("overlay with incompat feature '%s' cannot be mounted\n",
  952. p->name);
  953. err = -EINVAL;
  954. break;
  955. }
  956. dentry = ovl_lookup_upper(ofs, p->name, path->dentry, p->len);
  957. if (IS_ERR(dentry))
  958. continue;
  959. if (dentry->d_inode)
  960. err = ovl_workdir_cleanup(ofs, dir, path->mnt, dentry, level);
  961. dput(dentry);
  962. if (err)
  963. break;
  964. }
  965. inode_unlock(dir);
  966. out:
  967. ovl_cache_free(&list);
  968. return err;
  969. }
  970. int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir,
  971. struct vfsmount *mnt, struct dentry *dentry, int level)
  972. {
  973. int err;
  974. if (!d_is_dir(dentry) || level > 1) {
  975. return ovl_cleanup(ofs, dir, dentry);
  976. }
  977. err = ovl_do_rmdir(ofs, dir, dentry);
  978. if (err) {
  979. struct path path = { .mnt = mnt, .dentry = dentry };
  980. inode_unlock(dir);
  981. err = ovl_workdir_cleanup_recurse(ofs, &path, level + 1);
  982. inode_lock_nested(dir, I_MUTEX_PARENT);
  983. if (!err)
  984. err = ovl_cleanup(ofs, dir, dentry);
  985. }
  986. return err;
  987. }
  988. int ovl_indexdir_cleanup(struct ovl_fs *ofs)
  989. {
  990. int err;
  991. struct dentry *indexdir = ofs->indexdir;
  992. struct dentry *index = NULL;
  993. struct inode *dir = indexdir->d_inode;
  994. struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir };
  995. LIST_HEAD(list);
  996. struct rb_root root = RB_ROOT;
  997. struct ovl_cache_entry *p;
  998. struct ovl_readdir_data rdd = {
  999. .ctx.actor = ovl_fill_merge,
  1000. .dentry = NULL,
  1001. .list = &list,
  1002. .root = &root,
  1003. .is_lowest = false,
  1004. };
  1005. err = ovl_dir_read(&path, &rdd);
  1006. if (err)
  1007. goto out;
  1008. inode_lock_nested(dir, I_MUTEX_PARENT);
  1009. list_for_each_entry(p, &list, l_node) {
  1010. if (p->name[0] == '.') {
  1011. if (p->len == 1)
  1012. continue;
  1013. if (p->len == 2 && p->name[1] == '.')
  1014. continue;
  1015. }
  1016. index = ovl_lookup_upper(ofs, p->name, indexdir, p->len);
  1017. if (IS_ERR(index)) {
  1018. err = PTR_ERR(index);
  1019. index = NULL;
  1020. break;
  1021. }
  1022. /* Cleanup leftover from index create/cleanup attempt */
  1023. if (index->d_name.name[0] == '#') {
  1024. err = ovl_workdir_cleanup(ofs, dir, path.mnt, index, 1);
  1025. if (err)
  1026. break;
  1027. goto next;
  1028. }
  1029. err = ovl_verify_index(ofs, index);
  1030. if (!err) {
  1031. goto next;
  1032. } else if (err == -ESTALE) {
  1033. /* Cleanup stale index entries */
  1034. err = ovl_cleanup(ofs, dir, index);
  1035. } else if (err != -ENOENT) {
  1036. /*
  1037. * Abort mount to avoid corrupting the index if
  1038. * an incompatible index entry was found or on out
  1039. * of memory.
  1040. */
  1041. break;
  1042. } else if (ofs->config.nfs_export) {
  1043. /*
  1044. * Whiteout orphan index to block future open by
  1045. * handle after overlay nlink dropped to zero.
  1046. */
  1047. err = ovl_cleanup_and_whiteout(ofs, dir, index);
  1048. } else {
  1049. /* Cleanup orphan index entries */
  1050. err = ovl_cleanup(ofs, dir, index);
  1051. }
  1052. if (err)
  1053. break;
  1054. next:
  1055. dput(index);
  1056. index = NULL;
  1057. }
  1058. dput(index);
  1059. inode_unlock(dir);
  1060. out:
  1061. ovl_cache_free(&list);
  1062. if (err)
  1063. pr_err("failed index dir cleanup (%i)\n", err);
  1064. return err;
  1065. }