inode.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) 2011 Novell Inc.
  5. */
  6. #include <linux/fs.h>
  7. #include <linux/slab.h>
  8. #include <linux/cred.h>
  9. #include <linux/xattr.h>
  10. #include <linux/posix_acl.h>
  11. #include <linux/ratelimit.h>
  12. #include <linux/fiemap.h>
  13. #include <linux/fileattr.h>
  14. #include <linux/security.h>
  15. #include <linux/namei.h>
  16. #include "overlayfs.h"
  17. int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
  18. struct iattr *attr)
  19. {
  20. int err;
  21. struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
  22. bool full_copy_up = false;
  23. struct dentry *upperdentry;
  24. const struct cred *old_cred;
  25. err = setattr_prepare(&init_user_ns, dentry, attr);
  26. if (err)
  27. return err;
  28. err = ovl_want_write(dentry);
  29. if (err)
  30. goto out;
  31. if (attr->ia_valid & ATTR_SIZE) {
  32. /* Truncate should trigger data copy up as well */
  33. full_copy_up = true;
  34. }
  35. if (!full_copy_up)
  36. err = ovl_copy_up(dentry);
  37. else
  38. err = ovl_copy_up_with_data(dentry);
  39. if (!err) {
  40. struct inode *winode = NULL;
  41. upperdentry = ovl_dentry_upper(dentry);
  42. if (attr->ia_valid & ATTR_SIZE) {
  43. winode = d_inode(upperdentry);
  44. err = get_write_access(winode);
  45. if (err)
  46. goto out_drop_write;
  47. }
  48. if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
  49. attr->ia_valid &= ~ATTR_MODE;
  50. /*
  51. * We might have to translate ovl file into real file object
  52. * once use cases emerge. For now, simply don't let underlying
  53. * filesystem rely on attr->ia_file
  54. */
  55. attr->ia_valid &= ~ATTR_FILE;
  56. /*
  57. * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN
  58. * set. Overlayfs does not pass O_TRUNC flag to underlying
  59. * filesystem during open -> do not pass ATTR_OPEN. This
  60. * disables optimization in fuse which assumes open(O_TRUNC)
  61. * already set file size to 0. But we never passed O_TRUNC to
  62. * fuse. So by clearing ATTR_OPEN, fuse will be forced to send
  63. * setattr request to server.
  64. */
  65. attr->ia_valid &= ~ATTR_OPEN;
  66. inode_lock(upperdentry->d_inode);
  67. old_cred = ovl_override_creds(dentry->d_sb);
  68. err = ovl_do_notify_change(ofs, upperdentry, attr);
  69. ovl_revert_creds(dentry->d_sb, old_cred);
  70. if (!err)
  71. ovl_copyattr(dentry->d_inode);
  72. inode_unlock(upperdentry->d_inode);
  73. if (winode)
  74. put_write_access(winode);
  75. }
  76. out_drop_write:
  77. ovl_drop_write(dentry);
  78. out:
  79. return err;
  80. }
  81. static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
  82. {
  83. bool samefs = ovl_same_fs(dentry->d_sb);
  84. unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
  85. unsigned int xinoshift = 64 - xinobits;
  86. if (samefs) {
  87. /*
  88. * When all layers are on the same fs, all real inode
  89. * number are unique, so we use the overlay st_dev,
  90. * which is friendly to du -x.
  91. */
  92. stat->dev = dentry->d_sb->s_dev;
  93. return;
  94. } else if (xinobits) {
  95. /*
  96. * All inode numbers of underlying fs should not be using the
  97. * high xinobits, so we use high xinobits to partition the
  98. * overlay st_ino address space. The high bits holds the fsid
  99. * (upper fsid is 0). The lowest xinobit is reserved for mapping
  100. * the non-persistent inode numbers range in case of overflow.
  101. * This way all overlay inode numbers are unique and use the
  102. * overlay st_dev.
  103. */
  104. if (likely(!(stat->ino >> xinoshift))) {
  105. stat->ino |= ((u64)fsid) << (xinoshift + 1);
  106. stat->dev = dentry->d_sb->s_dev;
  107. return;
  108. } else if (ovl_xino_warn(dentry->d_sb)) {
  109. pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
  110. dentry, stat->ino, xinobits);
  111. }
  112. }
  113. /* The inode could not be mapped to a unified st_ino address space */
  114. if (S_ISDIR(dentry->d_inode->i_mode)) {
  115. /*
  116. * Always use the overlay st_dev for directories, so 'find
  117. * -xdev' will scan the entire overlay mount and won't cross the
  118. * overlay mount boundaries.
  119. *
  120. * If not all layers are on the same fs the pair {real st_ino;
  121. * overlay st_dev} is not unique, so use the non persistent
  122. * overlay st_ino for directories.
  123. */
  124. stat->dev = dentry->d_sb->s_dev;
  125. stat->ino = dentry->d_inode->i_ino;
  126. } else {
  127. /*
  128. * For non-samefs setup, if we cannot map all layers st_ino
  129. * to a unified address space, we need to make sure that st_dev
  130. * is unique per underlying fs, so we use the unique anonymous
  131. * bdev assigned to the underlying fs.
  132. */
  133. stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
  134. }
  135. }
  136. int ovl_getattr(struct user_namespace *mnt_userns, const struct path *path,
  137. struct kstat *stat, u32 request_mask, unsigned int flags)
  138. {
  139. struct dentry *dentry = path->dentry;
  140. enum ovl_path_type type;
  141. struct path realpath;
  142. const struct cred *old_cred;
  143. struct inode *inode = d_inode(dentry);
  144. bool is_dir = S_ISDIR(inode->i_mode);
  145. int fsid = 0;
  146. int err;
  147. bool metacopy_blocks = false;
  148. metacopy_blocks = ovl_is_metacopy_dentry(dentry);
  149. type = ovl_path_real(dentry, &realpath);
  150. old_cred = ovl_override_creds(dentry->d_sb);
  151. err = vfs_getattr(&realpath, stat, request_mask, flags);
  152. if (err)
  153. goto out;
  154. /* Report the effective immutable/append-only STATX flags */
  155. generic_fill_statx_attr(inode, stat);
  156. /*
  157. * For non-dir or same fs, we use st_ino of the copy up origin.
  158. * This guaranties constant st_dev/st_ino across copy up.
  159. * With xino feature and non-samefs, we use st_ino of the copy up
  160. * origin masked with high bits that represent the layer id.
  161. *
  162. * If lower filesystem supports NFS file handles, this also guaranties
  163. * persistent st_ino across mount cycle.
  164. */
  165. if (!is_dir || ovl_same_dev(dentry->d_sb)) {
  166. if (!OVL_TYPE_UPPER(type)) {
  167. fsid = ovl_layer_lower(dentry)->fsid;
  168. } else if (OVL_TYPE_ORIGIN(type)) {
  169. struct kstat lowerstat;
  170. u32 lowermask = STATX_INO | STATX_BLOCKS |
  171. (!is_dir ? STATX_NLINK : 0);
  172. ovl_path_lower(dentry, &realpath);
  173. err = vfs_getattr(&realpath, &lowerstat,
  174. lowermask, flags);
  175. if (err)
  176. goto out;
  177. /*
  178. * Lower hardlinks may be broken on copy up to different
  179. * upper files, so we cannot use the lower origin st_ino
  180. * for those different files, even for the same fs case.
  181. *
  182. * Similarly, several redirected dirs can point to the
  183. * same dir on a lower layer. With the "verify_lower"
  184. * feature, we do not use the lower origin st_ino, if
  185. * we haven't verified that this redirect is unique.
  186. *
  187. * With inodes index enabled, it is safe to use st_ino
  188. * of an indexed origin. The index validates that the
  189. * upper hardlink is not broken and that a redirected
  190. * dir is the only redirect to that origin.
  191. */
  192. if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
  193. (!ovl_verify_lower(dentry->d_sb) &&
  194. (is_dir || lowerstat.nlink == 1))) {
  195. fsid = ovl_layer_lower(dentry)->fsid;
  196. stat->ino = lowerstat.ino;
  197. }
  198. /*
  199. * If we are querying a metacopy dentry and lower
  200. * dentry is data dentry, then use the blocks we
  201. * queried just now. We don't have to do additional
  202. * vfs_getattr(). If lower itself is metacopy, then
  203. * additional vfs_getattr() is unavoidable.
  204. */
  205. if (metacopy_blocks &&
  206. realpath.dentry == ovl_dentry_lowerdata(dentry)) {
  207. stat->blocks = lowerstat.blocks;
  208. metacopy_blocks = false;
  209. }
  210. }
  211. if (metacopy_blocks) {
  212. /*
  213. * If lower is not same as lowerdata or if there was
  214. * no origin on upper, we can end up here.
  215. */
  216. struct kstat lowerdatastat;
  217. u32 lowermask = STATX_BLOCKS;
  218. ovl_path_lowerdata(dentry, &realpath);
  219. err = vfs_getattr(&realpath, &lowerdatastat,
  220. lowermask, flags);
  221. if (err)
  222. goto out;
  223. stat->blocks = lowerdatastat.blocks;
  224. }
  225. }
  226. ovl_map_dev_ino(dentry, stat, fsid);
  227. /*
  228. * It's probably not worth it to count subdirs to get the
  229. * correct link count. nlink=1 seems to pacify 'find' and
  230. * other utilities.
  231. */
  232. if (is_dir && OVL_TYPE_MERGE(type))
  233. stat->nlink = 1;
  234. /*
  235. * Return the overlay inode nlinks for indexed upper inodes.
  236. * Overlay inode nlink counts the union of the upper hardlinks
  237. * and non-covered lower hardlinks. It does not include the upper
  238. * index hardlink.
  239. */
  240. if (!is_dir && ovl_test_flag(OVL_INDEX, d_inode(dentry)))
  241. stat->nlink = dentry->d_inode->i_nlink;
  242. out:
  243. ovl_revert_creds(dentry->d_sb, old_cred);
  244. return err;
  245. }
  246. int ovl_permission(struct user_namespace *mnt_userns,
  247. struct inode *inode, int mask)
  248. {
  249. struct inode *upperinode = ovl_inode_upper(inode);
  250. struct inode *realinode;
  251. struct path realpath;
  252. const struct cred *old_cred;
  253. int err;
  254. /* Careful in RCU walk mode */
  255. realinode = ovl_i_path_real(inode, &realpath);
  256. if (!realinode) {
  257. WARN_ON(!(mask & MAY_NOT_BLOCK));
  258. return -ECHILD;
  259. }
  260. /*
  261. * Check overlay inode with the creds of task and underlying inode
  262. * with creds of mounter
  263. */
  264. err = generic_permission(&init_user_ns, inode, mask);
  265. if (err)
  266. return err;
  267. old_cred = ovl_override_creds(inode->i_sb);
  268. if (!upperinode &&
  269. !special_file(realinode->i_mode) && mask & MAY_WRITE) {
  270. mask &= ~(MAY_WRITE | MAY_APPEND);
  271. /* Make sure mounter can read file for copy up later */
  272. mask |= MAY_READ;
  273. }
  274. err = inode_permission(mnt_user_ns(realpath.mnt), realinode, mask);
  275. ovl_revert_creds(inode->i_sb, old_cred);
  276. return err;
  277. }
  278. static const char *ovl_get_link(struct dentry *dentry,
  279. struct inode *inode,
  280. struct delayed_call *done)
  281. {
  282. const struct cred *old_cred;
  283. const char *p;
  284. if (!dentry)
  285. return ERR_PTR(-ECHILD);
  286. old_cred = ovl_override_creds(dentry->d_sb);
  287. p = vfs_get_link(ovl_dentry_real(dentry), done);
  288. ovl_revert_creds(dentry->d_sb, old_cred);
  289. return p;
  290. }
  291. bool ovl_is_private_xattr(struct super_block *sb, const char *name)
  292. {
  293. struct ovl_fs *ofs = sb->s_fs_info;
  294. if (ofs->config.userxattr)
  295. return strncmp(name, OVL_XATTR_USER_PREFIX,
  296. sizeof(OVL_XATTR_USER_PREFIX) - 1) == 0;
  297. else
  298. return strncmp(name, OVL_XATTR_TRUSTED_PREFIX,
  299. sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1) == 0;
  300. }
  301. int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
  302. const void *value, size_t size, int flags)
  303. {
  304. int err;
  305. struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
  306. struct dentry *upperdentry = ovl_i_dentry_upper(inode);
  307. struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
  308. struct path realpath;
  309. const struct cred *old_cred;
  310. err = ovl_want_write(dentry);
  311. if (err)
  312. goto out;
  313. if (!value && !upperdentry) {
  314. ovl_path_lower(dentry, &realpath);
  315. old_cred = ovl_override_creds(dentry->d_sb);
  316. err = vfs_getxattr(mnt_user_ns(realpath.mnt), realdentry, name, NULL, 0);
  317. ovl_revert_creds(dentry->d_sb, old_cred);
  318. if (err < 0)
  319. goto out_drop_write;
  320. }
  321. if (!upperdentry) {
  322. err = ovl_copy_up(dentry);
  323. if (err)
  324. goto out_drop_write;
  325. realdentry = ovl_dentry_upper(dentry);
  326. }
  327. old_cred = ovl_override_creds(dentry->d_sb);
  328. if (value) {
  329. err = ovl_do_setxattr(ofs, realdentry, name, value, size,
  330. flags);
  331. } else {
  332. WARN_ON(flags != XATTR_REPLACE);
  333. err = ovl_do_removexattr(ofs, realdentry, name);
  334. }
  335. ovl_revert_creds(dentry->d_sb, old_cred);
  336. /* copy c/mtime */
  337. ovl_copyattr(inode);
  338. out_drop_write:
  339. ovl_drop_write(dentry);
  340. out:
  341. return err;
  342. }
  343. int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
  344. void *value, size_t size)
  345. {
  346. ssize_t res;
  347. const struct cred *old_cred;
  348. struct path realpath;
  349. ovl_i_path_real(inode, &realpath);
  350. old_cred = ovl_override_creds(dentry->d_sb);
  351. res = vfs_getxattr(mnt_user_ns(realpath.mnt), realpath.dentry, name, value, size);
  352. ovl_revert_creds(dentry->d_sb, old_cred);
  353. return res;
  354. }
  355. static bool ovl_can_list(struct super_block *sb, const char *s)
  356. {
  357. /* Never list private (.overlay) */
  358. if (ovl_is_private_xattr(sb, s))
  359. return false;
  360. /* List all non-trusted xattrs */
  361. if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
  362. return true;
  363. /* list other trusted for superuser only */
  364. return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
  365. }
  366. ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
  367. {
  368. struct dentry *realdentry = ovl_dentry_real(dentry);
  369. ssize_t res;
  370. size_t len;
  371. char *s;
  372. const struct cred *old_cred;
  373. old_cred = ovl_override_creds(dentry->d_sb);
  374. res = vfs_listxattr(realdentry, list, size);
  375. ovl_revert_creds(dentry->d_sb, old_cred);
  376. if (res <= 0 || size == 0)
  377. return res;
  378. /* filter out private xattrs */
  379. for (s = list, len = res; len;) {
  380. size_t slen = strnlen(s, len) + 1;
  381. /* underlying fs providing us with an broken xattr list? */
  382. if (WARN_ON(slen > len))
  383. return -EIO;
  384. len -= slen;
  385. if (!ovl_can_list(dentry->d_sb, s)) {
  386. res -= slen;
  387. memmove(s, s + slen, len);
  388. } else {
  389. s += slen;
  390. }
  391. }
  392. return res;
  393. }
  394. #ifdef CONFIG_FS_POSIX_ACL
  395. /*
  396. * Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone
  397. * of the POSIX ACLs retrieved from the lower layer to this function to not
  398. * alter the POSIX ACLs for the underlying filesystem.
  399. */
  400. static void ovl_idmap_posix_acl(struct inode *realinode,
  401. struct user_namespace *mnt_userns,
  402. struct posix_acl *acl)
  403. {
  404. struct user_namespace *fs_userns = i_user_ns(realinode);
  405. for (unsigned int i = 0; i < acl->a_count; i++) {
  406. vfsuid_t vfsuid;
  407. vfsgid_t vfsgid;
  408. struct posix_acl_entry *e = &acl->a_entries[i];
  409. switch (e->e_tag) {
  410. case ACL_USER:
  411. vfsuid = make_vfsuid(mnt_userns, fs_userns, e->e_uid);
  412. e->e_uid = vfsuid_into_kuid(vfsuid);
  413. break;
  414. case ACL_GROUP:
  415. vfsgid = make_vfsgid(mnt_userns, fs_userns, e->e_gid);
  416. e->e_gid = vfsgid_into_kgid(vfsgid);
  417. break;
  418. }
  419. }
  420. }
  421. /*
  422. * When the relevant layer is an idmapped mount we need to take the idmapping
  423. * of the layer into account and translate any ACL_{GROUP,USER} values
  424. * according to the idmapped mount.
  425. *
  426. * We cannot alter the ACLs returned from the relevant layer as that would
  427. * alter the cached values filesystem wide for the lower filesystem. Instead we
  428. * can clone the ACLs and then apply the relevant idmapping of the layer.
  429. *
  430. * This is obviously only relevant when idmapped layers are used.
  431. */
  432. struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
  433. {
  434. struct inode *realinode;
  435. struct posix_acl *acl, *clone;
  436. struct path realpath;
  437. /* Careful in RCU walk mode */
  438. realinode = ovl_i_path_real(inode, &realpath);
  439. if (!realinode) {
  440. WARN_ON(!rcu);
  441. return ERR_PTR(-ECHILD);
  442. }
  443. if (!IS_POSIXACL(realinode))
  444. return NULL;
  445. if (rcu) {
  446. acl = get_cached_acl_rcu(realinode, type);
  447. } else {
  448. const struct cred *old_cred;
  449. old_cred = ovl_override_creds(inode->i_sb);
  450. acl = get_acl(realinode, type);
  451. ovl_revert_creds(inode->i_sb, old_cred);
  452. }
  453. /*
  454. * If there are no POSIX ACLs, or we encountered an error,
  455. * or the layer isn't idmapped we don't need to do anything.
  456. */
  457. if (!is_idmapped_mnt(realpath.mnt) || IS_ERR_OR_NULL(acl))
  458. return acl;
  459. /*
  460. * We only get here if the layer is idmapped. So drop out of RCU path
  461. * walk so we can clone the ACLs. There's no need to release the ACLs
  462. * since get_cached_acl_rcu() doesn't take a reference on the ACLs.
  463. */
  464. if (rcu)
  465. return ERR_PTR(-ECHILD);
  466. clone = posix_acl_clone(acl, GFP_KERNEL);
  467. if (!clone)
  468. clone = ERR_PTR(-ENOMEM);
  469. else
  470. ovl_idmap_posix_acl(realinode, mnt_user_ns(realpath.mnt), clone);
  471. /*
  472. * Since we're not in RCU path walk we always need to release the
  473. * original ACLs.
  474. */
  475. posix_acl_release(acl);
  476. return clone;
  477. }
  478. #endif
  479. int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
  480. {
  481. if (flags & S_ATIME) {
  482. struct ovl_fs *ofs = inode->i_sb->s_fs_info;
  483. struct path upperpath = {
  484. .mnt = ovl_upper_mnt(ofs),
  485. .dentry = ovl_upperdentry_dereference(OVL_I(inode)),
  486. };
  487. if (upperpath.dentry) {
  488. touch_atime(&upperpath);
  489. inode->i_atime = d_inode(upperpath.dentry)->i_atime;
  490. }
  491. }
  492. return 0;
  493. }
  494. static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  495. u64 start, u64 len)
  496. {
  497. int err;
  498. struct inode *realinode = ovl_inode_realdata(inode);
  499. const struct cred *old_cred;
  500. if (!realinode->i_op->fiemap)
  501. return -EOPNOTSUPP;
  502. old_cred = ovl_override_creds(inode->i_sb);
  503. err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
  504. ovl_revert_creds(inode->i_sb, old_cred);
  505. return err;
  506. }
  507. /*
  508. * Work around the fact that security_file_ioctl() takes a file argument.
  509. * Introducing security_inode_fileattr_get/set() hooks would solve this issue
  510. * properly.
  511. */
  512. static int ovl_security_fileattr(const struct path *realpath, struct fileattr *fa,
  513. bool set)
  514. {
  515. struct file *file;
  516. unsigned int cmd;
  517. int err;
  518. file = dentry_open(realpath, O_RDONLY, current_cred());
  519. if (IS_ERR(file))
  520. return PTR_ERR(file);
  521. if (set)
  522. cmd = fa->fsx_valid ? FS_IOC_FSSETXATTR : FS_IOC_SETFLAGS;
  523. else
  524. cmd = fa->fsx_valid ? FS_IOC_FSGETXATTR : FS_IOC_GETFLAGS;
  525. err = security_file_ioctl(file, cmd, 0);
  526. fput(file);
  527. return err;
  528. }
  529. int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa)
  530. {
  531. int err;
  532. err = ovl_security_fileattr(realpath, fa, true);
  533. if (err)
  534. return err;
  535. return vfs_fileattr_set(mnt_user_ns(realpath->mnt), realpath->dentry, fa);
  536. }
  537. int ovl_fileattr_set(struct user_namespace *mnt_userns,
  538. struct dentry *dentry, struct fileattr *fa)
  539. {
  540. struct inode *inode = d_inode(dentry);
  541. struct path upperpath;
  542. const struct cred *old_cred;
  543. unsigned int flags;
  544. int err;
  545. err = ovl_want_write(dentry);
  546. if (err)
  547. goto out;
  548. err = ovl_copy_up(dentry);
  549. if (!err) {
  550. ovl_path_real(dentry, &upperpath);
  551. old_cred = ovl_override_creds(inode->i_sb);
  552. /*
  553. * Store immutable/append-only flags in xattr and clear them
  554. * in upper fileattr (in case they were set by older kernel)
  555. * so children of "ovl-immutable" directories lower aliases of
  556. * "ovl-immutable" hardlinks could be copied up.
  557. * Clear xattr when flags are cleared.
  558. */
  559. err = ovl_set_protattr(inode, upperpath.dentry, fa);
  560. if (!err)
  561. err = ovl_real_fileattr_set(&upperpath, fa);
  562. ovl_revert_creds(inode->i_sb, old_cred);
  563. /*
  564. * Merge real inode flags with inode flags read from
  565. * overlay.protattr xattr
  566. */
  567. flags = ovl_inode_real(inode)->i_flags & OVL_COPY_I_FLAGS_MASK;
  568. BUILD_BUG_ON(OVL_PROT_I_FLAGS_MASK & ~OVL_COPY_I_FLAGS_MASK);
  569. flags |= inode->i_flags & OVL_PROT_I_FLAGS_MASK;
  570. inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK);
  571. /* Update ctime */
  572. ovl_copyattr(inode);
  573. }
  574. ovl_drop_write(dentry);
  575. out:
  576. return err;
  577. }
  578. /* Convert inode protection flags to fileattr flags */
  579. static void ovl_fileattr_prot_flags(struct inode *inode, struct fileattr *fa)
  580. {
  581. BUILD_BUG_ON(OVL_PROT_FS_FLAGS_MASK & ~FS_COMMON_FL);
  582. BUILD_BUG_ON(OVL_PROT_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON);
  583. if (inode->i_flags & S_APPEND) {
  584. fa->flags |= FS_APPEND_FL;
  585. fa->fsx_xflags |= FS_XFLAG_APPEND;
  586. }
  587. if (inode->i_flags & S_IMMUTABLE) {
  588. fa->flags |= FS_IMMUTABLE_FL;
  589. fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
  590. }
  591. }
  592. int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa)
  593. {
  594. int err;
  595. err = ovl_security_fileattr(realpath, fa, false);
  596. if (err)
  597. return err;
  598. err = vfs_fileattr_get(realpath->dentry, fa);
  599. if (err == -ENOIOCTLCMD)
  600. err = -ENOTTY;
  601. return err;
  602. }
  603. int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa)
  604. {
  605. struct inode *inode = d_inode(dentry);
  606. struct path realpath;
  607. const struct cred *old_cred;
  608. int err;
  609. ovl_path_real(dentry, &realpath);
  610. old_cred = ovl_override_creds(inode->i_sb);
  611. err = ovl_real_fileattr_get(&realpath, fa);
  612. ovl_fileattr_prot_flags(inode, fa);
  613. ovl_revert_creds(inode->i_sb, old_cred);
  614. return err;
  615. }
  616. static const struct inode_operations ovl_file_inode_operations = {
  617. .setattr = ovl_setattr,
  618. .permission = ovl_permission,
  619. .getattr = ovl_getattr,
  620. .listxattr = ovl_listxattr,
  621. .get_acl = ovl_get_acl,
  622. .update_time = ovl_update_time,
  623. .fiemap = ovl_fiemap,
  624. .fileattr_get = ovl_fileattr_get,
  625. .fileattr_set = ovl_fileattr_set,
  626. };
  627. static const struct inode_operations ovl_symlink_inode_operations = {
  628. .setattr = ovl_setattr,
  629. .get_link = ovl_get_link,
  630. .getattr = ovl_getattr,
  631. .listxattr = ovl_listxattr,
  632. .update_time = ovl_update_time,
  633. };
  634. static const struct inode_operations ovl_special_inode_operations = {
  635. .setattr = ovl_setattr,
  636. .permission = ovl_permission,
  637. .getattr = ovl_getattr,
  638. .listxattr = ovl_listxattr,
  639. .get_acl = ovl_get_acl,
  640. .update_time = ovl_update_time,
  641. };
  642. static const struct address_space_operations ovl_aops = {
  643. /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
  644. .direct_IO = noop_direct_IO,
  645. };
  646. /*
  647. * It is possible to stack overlayfs instance on top of another
  648. * overlayfs instance as lower layer. We need to annotate the
  649. * stackable i_mutex locks according to stack level of the super
  650. * block instance. An overlayfs instance can never be in stack
  651. * depth 0 (there is always a real fs below it). An overlayfs
  652. * inode lock will use the lockdep annotation ovl_i_mutex_key[depth].
  653. *
  654. * For example, here is a snip from /proc/lockdep_chains after
  655. * dir_iterate of nested overlayfs:
  656. *
  657. * [...] &ovl_i_mutex_dir_key[depth] (stack_depth=2)
  658. * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
  659. * [...] &type->i_mutex_dir_key (stack_depth=0)
  660. *
  661. * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
  662. *
  663. * This chain is valid:
  664. * - inode->i_rwsem (inode_lock[2])
  665. * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
  666. * - OVL_I(inode)->lock (ovl_inode_lock[2])
  667. * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
  668. *
  669. * And this chain is valid:
  670. * - inode->i_rwsem (inode_lock[2])
  671. * - OVL_I(inode)->lock (ovl_inode_lock[2])
  672. * - lowerinode->i_rwsem (inode_lock[1])
  673. * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
  674. *
  675. * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
  676. * held, because it is in reverse order of the non-nested case using the same
  677. * upper fs:
  678. * - inode->i_rwsem (inode_lock[1])
  679. * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
  680. * - OVL_I(inode)->lock (ovl_inode_lock[1])
  681. */
  682. #define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
  683. static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
  684. {
  685. #ifdef CONFIG_LOCKDEP
  686. static struct lock_class_key ovl_i_mutex_key[OVL_MAX_NESTING];
  687. static struct lock_class_key ovl_i_mutex_dir_key[OVL_MAX_NESTING];
  688. static struct lock_class_key ovl_i_lock_key[OVL_MAX_NESTING];
  689. int depth = inode->i_sb->s_stack_depth - 1;
  690. if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING))
  691. depth = 0;
  692. if (S_ISDIR(inode->i_mode))
  693. lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]);
  694. else
  695. lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]);
  696. lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]);
  697. #endif
  698. }
  699. static void ovl_next_ino(struct inode *inode)
  700. {
  701. struct ovl_fs *ofs = inode->i_sb->s_fs_info;
  702. inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
  703. if (unlikely(!inode->i_ino))
  704. inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
  705. }
  706. static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
  707. {
  708. int xinobits = ovl_xino_bits(inode->i_sb);
  709. unsigned int xinoshift = 64 - xinobits;
  710. /*
  711. * When d_ino is consistent with st_ino (samefs or i_ino has enough
  712. * bits to encode layer), set the same value used for st_ino to i_ino,
  713. * so inode number exposed via /proc/locks and a like will be
  714. * consistent with d_ino and st_ino values. An i_ino value inconsistent
  715. * with d_ino also causes nfsd readdirplus to fail.
  716. */
  717. inode->i_ino = ino;
  718. if (ovl_same_fs(inode->i_sb)) {
  719. return;
  720. } else if (xinobits && likely(!(ino >> xinoshift))) {
  721. inode->i_ino |= (unsigned long)fsid << (xinoshift + 1);
  722. return;
  723. }
  724. /*
  725. * For directory inodes on non-samefs with xino disabled or xino
  726. * overflow, we allocate a non-persistent inode number, to be used for
  727. * resolving st_ino collisions in ovl_map_dev_ino().
  728. *
  729. * To avoid ino collision with legitimate xino values from upper
  730. * layer (fsid 0), use the lowest xinobit to map the non
  731. * persistent inode numbers to the unified st_ino address space.
  732. */
  733. if (S_ISDIR(inode->i_mode)) {
  734. ovl_next_ino(inode);
  735. if (xinobits) {
  736. inode->i_ino &= ~0UL >> xinobits;
  737. inode->i_ino |= 1UL << xinoshift;
  738. }
  739. }
  740. }
  741. void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
  742. unsigned long ino, int fsid)
  743. {
  744. struct inode *realinode;
  745. struct ovl_inode *oi = OVL_I(inode);
  746. if (oip->upperdentry)
  747. oi->__upperdentry = oip->upperdentry;
  748. if (oip->lowerpath && oip->lowerpath->dentry) {
  749. oi->lowerpath.dentry = dget(oip->lowerpath->dentry);
  750. oi->lowerpath.layer = oip->lowerpath->layer;
  751. }
  752. if (oip->lowerdata)
  753. oi->lowerdata = igrab(d_inode(oip->lowerdata));
  754. realinode = ovl_inode_real(inode);
  755. ovl_copyattr(inode);
  756. ovl_copyflags(realinode, inode);
  757. ovl_map_ino(inode, ino, fsid);
  758. }
  759. static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
  760. {
  761. inode->i_mode = mode;
  762. inode->i_flags |= S_NOCMTIME;
  763. #ifdef CONFIG_FS_POSIX_ACL
  764. inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
  765. #endif
  766. ovl_lockdep_annotate_inode_mutex_key(inode);
  767. switch (mode & S_IFMT) {
  768. case S_IFREG:
  769. inode->i_op = &ovl_file_inode_operations;
  770. inode->i_fop = &ovl_file_operations;
  771. inode->i_mapping->a_ops = &ovl_aops;
  772. break;
  773. case S_IFDIR:
  774. inode->i_op = &ovl_dir_inode_operations;
  775. inode->i_fop = &ovl_dir_operations;
  776. break;
  777. case S_IFLNK:
  778. inode->i_op = &ovl_symlink_inode_operations;
  779. break;
  780. default:
  781. inode->i_op = &ovl_special_inode_operations;
  782. init_special_inode(inode, mode, rdev);
  783. break;
  784. }
  785. }
  786. /*
  787. * With inodes index enabled, an overlay inode nlink counts the union of upper
  788. * hardlinks and non-covered lower hardlinks. During the lifetime of a non-pure
  789. * upper inode, the following nlink modifying operations can happen:
  790. *
  791. * 1. Lower hardlink copy up
  792. * 2. Upper hardlink created, unlinked or renamed over
  793. * 3. Lower hardlink whiteout or renamed over
  794. *
  795. * For the first, copy up case, the union nlink does not change, whether the
  796. * operation succeeds or fails, but the upper inode nlink may change.
  797. * Therefore, before copy up, we store the union nlink value relative to the
  798. * lower inode nlink in the index inode xattr .overlay.nlink.
  799. *
  800. * For the second, upper hardlink case, the union nlink should be incremented
  801. * or decremented IFF the operation succeeds, aligned with nlink change of the
  802. * upper inode. Therefore, before link/unlink/rename, we store the union nlink
  803. * value relative to the upper inode nlink in the index inode.
  804. *
  805. * For the last, lower cover up case, we simplify things by preceding the
  806. * whiteout or cover up with copy up. This makes sure that there is an index
  807. * upper inode where the nlink xattr can be stored before the copied up upper
  808. * entry is unlink.
  809. */
  810. #define OVL_NLINK_ADD_UPPER (1 << 0)
  811. /*
  812. * On-disk format for indexed nlink:
  813. *
  814. * nlink relative to the upper inode - "U[+-]NUM"
  815. * nlink relative to the lower inode - "L[+-]NUM"
  816. */
  817. static int ovl_set_nlink_common(struct dentry *dentry,
  818. struct dentry *realdentry, const char *format)
  819. {
  820. struct inode *inode = d_inode(dentry);
  821. struct inode *realinode = d_inode(realdentry);
  822. char buf[13];
  823. int len;
  824. len = snprintf(buf, sizeof(buf), format,
  825. (int) (inode->i_nlink - realinode->i_nlink));
  826. if (WARN_ON(len >= sizeof(buf)))
  827. return -EIO;
  828. return ovl_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry),
  829. OVL_XATTR_NLINK, buf, len);
  830. }
  831. int ovl_set_nlink_upper(struct dentry *dentry)
  832. {
  833. return ovl_set_nlink_common(dentry, ovl_dentry_upper(dentry), "U%+i");
  834. }
  835. int ovl_set_nlink_lower(struct dentry *dentry)
  836. {
  837. return ovl_set_nlink_common(dentry, ovl_dentry_lower(dentry), "L%+i");
  838. }
  839. unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
  840. struct dentry *upperdentry,
  841. unsigned int fallback)
  842. {
  843. int nlink_diff;
  844. int nlink;
  845. char buf[13];
  846. int err;
  847. if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1)
  848. return fallback;
  849. err = ovl_getxattr_upper(ofs, upperdentry, OVL_XATTR_NLINK,
  850. &buf, sizeof(buf) - 1);
  851. if (err < 0)
  852. goto fail;
  853. buf[err] = '\0';
  854. if ((buf[0] != 'L' && buf[0] != 'U') ||
  855. (buf[1] != '+' && buf[1] != '-'))
  856. goto fail;
  857. err = kstrtoint(buf + 1, 10, &nlink_diff);
  858. if (err < 0)
  859. goto fail;
  860. nlink = d_inode(buf[0] == 'L' ? lowerdentry : upperdentry)->i_nlink;
  861. nlink += nlink_diff;
  862. if (nlink <= 0)
  863. goto fail;
  864. return nlink;
  865. fail:
  866. pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
  867. upperdentry, err);
  868. return fallback;
  869. }
  870. struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
  871. {
  872. struct inode *inode;
  873. inode = new_inode(sb);
  874. if (inode)
  875. ovl_fill_inode(inode, mode, rdev);
  876. return inode;
  877. }
  878. static int ovl_inode_test(struct inode *inode, void *data)
  879. {
  880. return inode->i_private == data;
  881. }
  882. static int ovl_inode_set(struct inode *inode, void *data)
  883. {
  884. inode->i_private = data;
  885. return 0;
  886. }
  887. static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
  888. struct dentry *upperdentry, bool strict)
  889. {
  890. /*
  891. * For directories, @strict verify from lookup path performs consistency
  892. * checks, so NULL lower/upper in dentry must match NULL lower/upper in
  893. * inode. Non @strict verify from NFS handle decode path passes NULL for
  894. * 'unknown' lower/upper.
  895. */
  896. if (S_ISDIR(inode->i_mode) && strict) {
  897. /* Real lower dir moved to upper layer under us? */
  898. if (!lowerdentry && ovl_inode_lower(inode))
  899. return false;
  900. /* Lookup of an uncovered redirect origin? */
  901. if (!upperdentry && ovl_inode_upper(inode))
  902. return false;
  903. }
  904. /*
  905. * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
  906. * This happens when finding a copied up overlay inode for a renamed
  907. * or hardlinked overlay dentry and lower dentry cannot be followed
  908. * by origin because lower fs does not support file handles.
  909. */
  910. if (lowerdentry && ovl_inode_lower(inode) != d_inode(lowerdentry))
  911. return false;
  912. /*
  913. * Allow non-NULL __upperdentry in inode even if upperdentry is NULL.
  914. * This happens when finding a lower alias for a copied up hard link.
  915. */
  916. if (upperdentry && ovl_inode_upper(inode) != d_inode(upperdentry))
  917. return false;
  918. return true;
  919. }
  920. struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
  921. bool is_upper)
  922. {
  923. struct inode *inode, *key = d_inode(real);
  924. inode = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
  925. if (!inode)
  926. return NULL;
  927. if (!ovl_verify_inode(inode, is_upper ? NULL : real,
  928. is_upper ? real : NULL, false)) {
  929. iput(inode);
  930. return ERR_PTR(-ESTALE);
  931. }
  932. return inode;
  933. }
  934. bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
  935. {
  936. struct inode *key = d_inode(dir);
  937. struct inode *trap;
  938. bool res;
  939. trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
  940. if (!trap)
  941. return false;
  942. res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
  943. !ovl_inode_lower(trap);
  944. iput(trap);
  945. return res;
  946. }
  947. /*
  948. * Create an inode cache entry for layer root dir, that will intentionally
  949. * fail ovl_verify_inode(), so any lookup that will find some layer root
  950. * will fail.
  951. */
  952. struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
  953. {
  954. struct inode *key = d_inode(dir);
  955. struct inode *trap;
  956. if (!d_is_dir(dir))
  957. return ERR_PTR(-ENOTDIR);
  958. trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
  959. ovl_inode_set, key);
  960. if (!trap)
  961. return ERR_PTR(-ENOMEM);
  962. if (!(trap->i_state & I_NEW)) {
  963. /* Conflicting layer roots? */
  964. iput(trap);
  965. return ERR_PTR(-ELOOP);
  966. }
  967. trap->i_mode = S_IFDIR;
  968. trap->i_flags = S_DEAD;
  969. unlock_new_inode(trap);
  970. return trap;
  971. }
  972. /*
  973. * Does overlay inode need to be hashed by lower inode?
  974. */
  975. static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
  976. struct dentry *lower, bool index)
  977. {
  978. struct ovl_fs *ofs = sb->s_fs_info;
  979. /* No, if pure upper */
  980. if (!lower)
  981. return false;
  982. /* Yes, if already indexed */
  983. if (index)
  984. return true;
  985. /* Yes, if won't be copied up */
  986. if (!ovl_upper_mnt(ofs))
  987. return true;
  988. /* No, if lower hardlink is or will be broken on copy up */
  989. if ((upper || !ovl_indexdir(sb)) &&
  990. !d_is_dir(lower) && d_inode(lower)->i_nlink > 1)
  991. return false;
  992. /* No, if non-indexed upper with NFS export */
  993. if (sb->s_export_op && upper)
  994. return false;
  995. /* Otherwise, hash by lower inode for fsnotify */
  996. return true;
  997. }
  998. static struct inode *ovl_iget5(struct super_block *sb, struct inode *newinode,
  999. struct inode *key)
  1000. {
  1001. return newinode ? inode_insert5(newinode, (unsigned long) key,
  1002. ovl_inode_test, ovl_inode_set, key) :
  1003. iget5_locked(sb, (unsigned long) key,
  1004. ovl_inode_test, ovl_inode_set, key);
  1005. }
  1006. struct inode *ovl_get_inode(struct super_block *sb,
  1007. struct ovl_inode_params *oip)
  1008. {
  1009. struct ovl_fs *ofs = OVL_FS(sb);
  1010. struct dentry *upperdentry = oip->upperdentry;
  1011. struct ovl_path *lowerpath = oip->lowerpath;
  1012. struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
  1013. struct inode *inode;
  1014. struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
  1015. struct path realpath = {
  1016. .dentry = upperdentry ?: lowerdentry,
  1017. .mnt = upperdentry ? ovl_upper_mnt(ofs) : lowerpath->layer->mnt,
  1018. };
  1019. bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
  1020. oip->index);
  1021. int fsid = bylower ? lowerpath->layer->fsid : 0;
  1022. bool is_dir;
  1023. unsigned long ino = 0;
  1024. int err = oip->newinode ? -EEXIST : -ENOMEM;
  1025. if (!realinode)
  1026. realinode = d_inode(lowerdentry);
  1027. /*
  1028. * Copy up origin (lower) may exist for non-indexed upper, but we must
  1029. * not use lower as hash key if this is a broken hardlink.
  1030. */
  1031. is_dir = S_ISDIR(realinode->i_mode);
  1032. if (upperdentry || bylower) {
  1033. struct inode *key = d_inode(bylower ? lowerdentry :
  1034. upperdentry);
  1035. unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
  1036. inode = ovl_iget5(sb, oip->newinode, key);
  1037. if (!inode)
  1038. goto out_err;
  1039. if (!(inode->i_state & I_NEW)) {
  1040. /*
  1041. * Verify that the underlying files stored in the inode
  1042. * match those in the dentry.
  1043. */
  1044. if (!ovl_verify_inode(inode, lowerdentry, upperdentry,
  1045. true)) {
  1046. iput(inode);
  1047. err = -ESTALE;
  1048. goto out_err;
  1049. }
  1050. dput(upperdentry);
  1051. kfree(oip->redirect);
  1052. goto out;
  1053. }
  1054. /* Recalculate nlink for non-dir due to indexing */
  1055. if (!is_dir)
  1056. nlink = ovl_get_nlink(ofs, lowerdentry, upperdentry,
  1057. nlink);
  1058. set_nlink(inode, nlink);
  1059. ino = key->i_ino;
  1060. } else {
  1061. /* Lower hardlink that will be broken on copy up */
  1062. inode = new_inode(sb);
  1063. if (!inode) {
  1064. err = -ENOMEM;
  1065. goto out_err;
  1066. }
  1067. ino = realinode->i_ino;
  1068. fsid = lowerpath->layer->fsid;
  1069. }
  1070. ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
  1071. ovl_inode_init(inode, oip, ino, fsid);
  1072. if (upperdentry && ovl_is_impuredir(sb, upperdentry))
  1073. ovl_set_flag(OVL_IMPURE, inode);
  1074. if (oip->index)
  1075. ovl_set_flag(OVL_INDEX, inode);
  1076. OVL_I(inode)->redirect = oip->redirect;
  1077. if (bylower)
  1078. ovl_set_flag(OVL_CONST_INO, inode);
  1079. /* Check for non-merge dir that may have whiteouts */
  1080. if (is_dir) {
  1081. if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
  1082. ovl_path_check_origin_xattr(ofs, &realpath)) {
  1083. ovl_set_flag(OVL_WHITEOUTS, inode);
  1084. }
  1085. }
  1086. /* Check for immutable/append-only inode flags in xattr */
  1087. if (upperdentry)
  1088. ovl_check_protattr(inode, upperdentry);
  1089. if (inode->i_state & I_NEW)
  1090. unlock_new_inode(inode);
  1091. out:
  1092. return inode;
  1093. out_err:
  1094. pr_warn_ratelimited("failed to get inode (%i)\n", err);
  1095. inode = ERR_PTR(err);
  1096. goto out;
  1097. }