inode.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Copyright (C) 2011 Novell Inc.
  5. */
  6. #include <linux/fs.h>
  7. #include <linux/slab.h>
  8. #include <linux/cred.h>
  9. #include <linux/xattr.h>
  10. #include <linux/posix_acl.h>
  11. #include <linux/ratelimit.h>
  12. #include <linux/fiemap.h>
  13. #include <linux/fileattr.h>
  14. #include <linux/security.h>
  15. #include <linux/namei.h>
  16. #include "overlayfs.h"
  17. int ovl_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
  18. struct iattr *attr)
  19. {
  20. int err;
  21. struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
  22. bool full_copy_up = false;
  23. struct dentry *upperdentry;
  24. const struct cred *old_cred;
  25. err = setattr_prepare(&init_user_ns, dentry, attr);
  26. if (err)
  27. return err;
  28. err = ovl_want_write(dentry);
  29. if (err)
  30. goto out;
  31. if (attr->ia_valid & ATTR_SIZE) {
  32. /* Truncate should trigger data copy up as well */
  33. full_copy_up = true;
  34. }
  35. if (!full_copy_up)
  36. err = ovl_copy_up(dentry);
  37. else
  38. err = ovl_copy_up_with_data(dentry);
  39. if (!err) {
  40. struct inode *winode = NULL;
  41. upperdentry = ovl_dentry_upper(dentry);
  42. if (attr->ia_valid & ATTR_SIZE) {
  43. winode = d_inode(upperdentry);
  44. err = get_write_access(winode);
  45. if (err)
  46. goto out_drop_write;
  47. }
  48. if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
  49. attr->ia_valid &= ~ATTR_MODE;
  50. /*
  51. * We might have to translate ovl file into real file object
  52. * once use cases emerge. For now, simply don't let underlying
  53. * filesystem rely on attr->ia_file
  54. */
  55. attr->ia_valid &= ~ATTR_FILE;
  56. /*
  57. * If open(O_TRUNC) is done, VFS calls ->setattr with ATTR_OPEN
  58. * set. Overlayfs does not pass O_TRUNC flag to underlying
  59. * filesystem during open -> do not pass ATTR_OPEN. This
  60. * disables optimization in fuse which assumes open(O_TRUNC)
  61. * already set file size to 0. But we never passed O_TRUNC to
  62. * fuse. So by clearing ATTR_OPEN, fuse will be forced to send
  63. * setattr request to server.
  64. */
  65. attr->ia_valid &= ~ATTR_OPEN;
  66. inode_lock(upperdentry->d_inode);
  67. old_cred = ovl_override_creds(dentry->d_sb);
  68. err = ovl_do_notify_change(ofs, upperdentry, attr);
  69. ovl_revert_creds(dentry->d_sb, old_cred);
  70. if (!err)
  71. ovl_copyattr(dentry->d_inode);
  72. inode_unlock(upperdentry->d_inode);
  73. if (winode)
  74. put_write_access(winode);
  75. }
  76. out_drop_write:
  77. ovl_drop_write(dentry);
  78. out:
  79. return err;
  80. }
  81. static void ovl_map_dev_ino(struct dentry *dentry, struct kstat *stat, int fsid)
  82. {
  83. bool samefs = ovl_same_fs(dentry->d_sb);
  84. unsigned int xinobits = ovl_xino_bits(dentry->d_sb);
  85. unsigned int xinoshift = 64 - xinobits;
  86. if (samefs) {
  87. /*
  88. * When all layers are on the same fs, all real inode
  89. * number are unique, so we use the overlay st_dev,
  90. * which is friendly to du -x.
  91. */
  92. stat->dev = dentry->d_sb->s_dev;
  93. return;
  94. } else if (xinobits) {
  95. /*
  96. * All inode numbers of underlying fs should not be using the
  97. * high xinobits, so we use high xinobits to partition the
  98. * overlay st_ino address space. The high bits holds the fsid
  99. * (upper fsid is 0). The lowest xinobit is reserved for mapping
  100. * the non-persistent inode numbers range in case of overflow.
  101. * This way all overlay inode numbers are unique and use the
  102. * overlay st_dev.
  103. */
  104. if (likely(!(stat->ino >> xinoshift))) {
  105. stat->ino |= ((u64)fsid) << (xinoshift + 1);
  106. stat->dev = dentry->d_sb->s_dev;
  107. return;
  108. } else if (ovl_xino_warn(dentry->d_sb)) {
  109. pr_warn_ratelimited("inode number too big (%pd2, ino=%llu, xinobits=%d)\n",
  110. dentry, stat->ino, xinobits);
  111. }
  112. }
  113. /* The inode could not be mapped to a unified st_ino address space */
  114. if (S_ISDIR(dentry->d_inode->i_mode)) {
  115. /*
  116. * Always use the overlay st_dev for directories, so 'find
  117. * -xdev' will scan the entire overlay mount and won't cross the
  118. * overlay mount boundaries.
  119. *
  120. * If not all layers are on the same fs the pair {real st_ino;
  121. * overlay st_dev} is not unique, so use the non persistent
  122. * overlay st_ino for directories.
  123. */
  124. stat->dev = dentry->d_sb->s_dev;
  125. stat->ino = dentry->d_inode->i_ino;
  126. } else {
  127. /*
  128. * For non-samefs setup, if we cannot map all layers st_ino
  129. * to a unified address space, we need to make sure that st_dev
  130. * is unique per underlying fs, so we use the unique anonymous
  131. * bdev assigned to the underlying fs.
  132. */
  133. stat->dev = OVL_FS(dentry->d_sb)->fs[fsid].pseudo_dev;
  134. }
  135. }
  136. int ovl_getattr(struct user_namespace *mnt_userns, const struct path *path,
  137. struct kstat *stat, u32 request_mask, unsigned int flags)
  138. {
  139. struct dentry *dentry = path->dentry;
  140. enum ovl_path_type type;
  141. struct path realpath;
  142. const struct cred *old_cred;
  143. struct inode *inode = d_inode(dentry);
  144. bool is_dir = S_ISDIR(inode->i_mode);
  145. int fsid = 0;
  146. int err;
  147. bool metacopy_blocks = false;
  148. metacopy_blocks = ovl_is_metacopy_dentry(dentry);
  149. #ifdef CONFIG_KSU_SUSFS_SUS_OVERLAYFS
  150. ovl_path_lowerdata(dentry, &realpath);
  151. if (likely(realpath.mnt && realpath.dentry)) {
  152. old_cred = ovl_override_creds(dentry->d_sb);
  153. err = vfs_getattr(&realpath, stat, request_mask, flags);
  154. if (err)
  155. goto out;
  156. if (realpath.dentry->d_inode) {
  157. generic_fill_statx_attr(realpath.dentry->d_inode, stat);
  158. }
  159. goto out;
  160. }
  161. #endif
  162. type = ovl_path_real(dentry, &realpath);
  163. old_cred = ovl_override_creds(dentry->d_sb);
  164. err = vfs_getattr(&realpath, stat, request_mask, flags);
  165. if (err)
  166. goto out;
  167. /* Report the effective immutable/append-only STATX flags */
  168. generic_fill_statx_attr(inode, stat);
  169. /*
  170. * For non-dir or same fs, we use st_ino of the copy up origin.
  171. * This guaranties constant st_dev/st_ino across copy up.
  172. * With xino feature and non-samefs, we use st_ino of the copy up
  173. * origin masked with high bits that represent the layer id.
  174. *
  175. * If lower filesystem supports NFS file handles, this also guaranties
  176. * persistent st_ino across mount cycle.
  177. */
  178. if (!is_dir || ovl_same_dev(dentry->d_sb)) {
  179. if (!OVL_TYPE_UPPER(type)) {
  180. fsid = ovl_layer_lower(dentry)->fsid;
  181. } else if (OVL_TYPE_ORIGIN(type)) {
  182. struct kstat lowerstat;
  183. u32 lowermask = STATX_INO | STATX_BLOCKS |
  184. (!is_dir ? STATX_NLINK : 0);
  185. ovl_path_lower(dentry, &realpath);
  186. err = vfs_getattr(&realpath, &lowerstat,
  187. lowermask, flags);
  188. if (err)
  189. goto out;
  190. /*
  191. * Lower hardlinks may be broken on copy up to different
  192. * upper files, so we cannot use the lower origin st_ino
  193. * for those different files, even for the same fs case.
  194. *
  195. * Similarly, several redirected dirs can point to the
  196. * same dir on a lower layer. With the "verify_lower"
  197. * feature, we do not use the lower origin st_ino, if
  198. * we haven't verified that this redirect is unique.
  199. *
  200. * With inodes index enabled, it is safe to use st_ino
  201. * of an indexed origin. The index validates that the
  202. * upper hardlink is not broken and that a redirected
  203. * dir is the only redirect to that origin.
  204. */
  205. if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
  206. (!ovl_verify_lower(dentry->d_sb) &&
  207. (is_dir || lowerstat.nlink == 1))) {
  208. fsid = ovl_layer_lower(dentry)->fsid;
  209. stat->ino = lowerstat.ino;
  210. }
  211. /*
  212. * If we are querying a metacopy dentry and lower
  213. * dentry is data dentry, then use the blocks we
  214. * queried just now. We don't have to do additional
  215. * vfs_getattr(). If lower itself is metacopy, then
  216. * additional vfs_getattr() is unavoidable.
  217. */
  218. if (metacopy_blocks &&
  219. realpath.dentry == ovl_dentry_lowerdata(dentry)) {
  220. stat->blocks = lowerstat.blocks;
  221. metacopy_blocks = false;
  222. }
  223. }
  224. if (metacopy_blocks) {
  225. /*
  226. * If lower is not same as lowerdata or if there was
  227. * no origin on upper, we can end up here.
  228. */
  229. struct kstat lowerdatastat;
  230. u32 lowermask = STATX_BLOCKS;
  231. ovl_path_lowerdata(dentry, &realpath);
  232. err = vfs_getattr(&realpath, &lowerdatastat,
  233. lowermask, flags);
  234. if (err)
  235. goto out;
  236. stat->blocks = lowerdatastat.blocks;
  237. }
  238. }
  239. ovl_map_dev_ino(dentry, stat, fsid);
  240. /*
  241. * It's probably not worth it to count subdirs to get the
  242. * correct link count. nlink=1 seems to pacify 'find' and
  243. * other utilities.
  244. */
  245. if (is_dir && OVL_TYPE_MERGE(type))
  246. stat->nlink = 1;
  247. /*
  248. * Return the overlay inode nlinks for indexed upper inodes.
  249. * Overlay inode nlink counts the union of the upper hardlinks
  250. * and non-covered lower hardlinks. It does not include the upper
  251. * index hardlink.
  252. */
  253. if (!is_dir && ovl_test_flag(OVL_INDEX, d_inode(dentry)))
  254. stat->nlink = dentry->d_inode->i_nlink;
  255. out:
  256. ovl_revert_creds(dentry->d_sb, old_cred);
  257. return err;
  258. }
  259. int ovl_permission(struct user_namespace *mnt_userns,
  260. struct inode *inode, int mask)
  261. {
  262. struct inode *upperinode = ovl_inode_upper(inode);
  263. struct inode *realinode;
  264. struct path realpath;
  265. const struct cred *old_cred;
  266. int err;
  267. /* Careful in RCU walk mode */
  268. realinode = ovl_i_path_real(inode, &realpath);
  269. if (!realinode) {
  270. WARN_ON(!(mask & MAY_NOT_BLOCK));
  271. return -ECHILD;
  272. }
  273. /*
  274. * Check overlay inode with the creds of task and underlying inode
  275. * with creds of mounter
  276. */
  277. err = generic_permission(&init_user_ns, inode, mask);
  278. if (err)
  279. return err;
  280. old_cred = ovl_override_creds(inode->i_sb);
  281. if (!upperinode &&
  282. !special_file(realinode->i_mode) && mask & MAY_WRITE) {
  283. mask &= ~(MAY_WRITE | MAY_APPEND);
  284. /* Make sure mounter can read file for copy up later */
  285. mask |= MAY_READ;
  286. }
  287. err = inode_permission(mnt_user_ns(realpath.mnt), realinode, mask);
  288. ovl_revert_creds(inode->i_sb, old_cred);
  289. return err;
  290. }
  291. static const char *ovl_get_link(struct dentry *dentry,
  292. struct inode *inode,
  293. struct delayed_call *done)
  294. {
  295. const struct cred *old_cred;
  296. const char *p;
  297. if (!dentry)
  298. return ERR_PTR(-ECHILD);
  299. old_cred = ovl_override_creds(dentry->d_sb);
  300. p = vfs_get_link(ovl_dentry_real(dentry), done);
  301. ovl_revert_creds(dentry->d_sb, old_cred);
  302. return p;
  303. }
  304. bool ovl_is_private_xattr(struct super_block *sb, const char *name)
  305. {
  306. struct ovl_fs *ofs = sb->s_fs_info;
  307. if (ofs->config.userxattr)
  308. return strncmp(name, OVL_XATTR_USER_PREFIX,
  309. sizeof(OVL_XATTR_USER_PREFIX) - 1) == 0;
  310. else
  311. return strncmp(name, OVL_XATTR_TRUSTED_PREFIX,
  312. sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1) == 0;
  313. }
  314. int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
  315. const void *value, size_t size, int flags)
  316. {
  317. int err;
  318. struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
  319. struct dentry *upperdentry = ovl_i_dentry_upper(inode);
  320. struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
  321. struct path realpath;
  322. const struct cred *old_cred;
  323. err = ovl_want_write(dentry);
  324. if (err)
  325. goto out;
  326. if (!value && !upperdentry) {
  327. ovl_path_lower(dentry, &realpath);
  328. old_cred = ovl_override_creds(dentry->d_sb);
  329. err = vfs_getxattr(mnt_user_ns(realpath.mnt), realdentry, name, NULL, 0);
  330. ovl_revert_creds(dentry->d_sb, old_cred);
  331. if (err < 0)
  332. goto out_drop_write;
  333. }
  334. if (!upperdentry) {
  335. err = ovl_copy_up(dentry);
  336. if (err)
  337. goto out_drop_write;
  338. realdentry = ovl_dentry_upper(dentry);
  339. }
  340. old_cred = ovl_override_creds(dentry->d_sb);
  341. if (value) {
  342. err = ovl_do_setxattr(ofs, realdentry, name, value, size,
  343. flags);
  344. } else {
  345. WARN_ON(flags != XATTR_REPLACE);
  346. err = ovl_do_removexattr(ofs, realdentry, name);
  347. }
  348. ovl_revert_creds(dentry->d_sb, old_cred);
  349. /* copy c/mtime */
  350. ovl_copyattr(inode);
  351. out_drop_write:
  352. ovl_drop_write(dentry);
  353. out:
  354. return err;
  355. }
  356. int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
  357. void *value, size_t size)
  358. {
  359. ssize_t res;
  360. const struct cred *old_cred;
  361. struct path realpath;
  362. ovl_i_path_real(inode, &realpath);
  363. old_cred = ovl_override_creds(dentry->d_sb);
  364. res = vfs_getxattr(mnt_user_ns(realpath.mnt), realpath.dentry, name, value, size);
  365. ovl_revert_creds(dentry->d_sb, old_cred);
  366. return res;
  367. }
  368. static bool ovl_can_list(struct super_block *sb, const char *s)
  369. {
  370. /* Never list private (.overlay) */
  371. if (ovl_is_private_xattr(sb, s))
  372. return false;
  373. /* List all non-trusted xattrs */
  374. if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
  375. return true;
  376. /* list other trusted for superuser only */
  377. return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
  378. }
  379. ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
  380. {
  381. struct dentry *realdentry = ovl_dentry_real(dentry);
  382. ssize_t res;
  383. size_t len;
  384. char *s;
  385. const struct cred *old_cred;
  386. old_cred = ovl_override_creds(dentry->d_sb);
  387. res = vfs_listxattr(realdentry, list, size);
  388. ovl_revert_creds(dentry->d_sb, old_cred);
  389. if (res <= 0 || size == 0)
  390. return res;
  391. /* filter out private xattrs */
  392. for (s = list, len = res; len;) {
  393. size_t slen = strnlen(s, len) + 1;
  394. /* underlying fs providing us with an broken xattr list? */
  395. if (WARN_ON(slen > len))
  396. return -EIO;
  397. len -= slen;
  398. if (!ovl_can_list(dentry->d_sb, s)) {
  399. res -= slen;
  400. memmove(s, s + slen, len);
  401. } else {
  402. s += slen;
  403. }
  404. }
  405. return res;
  406. }
  407. #ifdef CONFIG_FS_POSIX_ACL
  408. /*
  409. * Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone
  410. * of the POSIX ACLs retrieved from the lower layer to this function to not
  411. * alter the POSIX ACLs for the underlying filesystem.
  412. */
  413. static void ovl_idmap_posix_acl(struct inode *realinode,
  414. struct user_namespace *mnt_userns,
  415. struct posix_acl *acl)
  416. {
  417. struct user_namespace *fs_userns = i_user_ns(realinode);
  418. for (unsigned int i = 0; i < acl->a_count; i++) {
  419. vfsuid_t vfsuid;
  420. vfsgid_t vfsgid;
  421. struct posix_acl_entry *e = &acl->a_entries[i];
  422. switch (e->e_tag) {
  423. case ACL_USER:
  424. vfsuid = make_vfsuid(mnt_userns, fs_userns, e->e_uid);
  425. e->e_uid = vfsuid_into_kuid(vfsuid);
  426. break;
  427. case ACL_GROUP:
  428. vfsgid = make_vfsgid(mnt_userns, fs_userns, e->e_gid);
  429. e->e_gid = vfsgid_into_kgid(vfsgid);
  430. break;
  431. }
  432. }
  433. }
  434. /*
  435. * When the relevant layer is an idmapped mount we need to take the idmapping
  436. * of the layer into account and translate any ACL_{GROUP,USER} values
  437. * according to the idmapped mount.
  438. *
  439. * We cannot alter the ACLs returned from the relevant layer as that would
  440. * alter the cached values filesystem wide for the lower filesystem. Instead we
  441. * can clone the ACLs and then apply the relevant idmapping of the layer.
  442. *
  443. * This is obviously only relevant when idmapped layers are used.
  444. */
  445. struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
  446. {
  447. struct inode *realinode;
  448. struct posix_acl *acl, *clone;
  449. struct path realpath;
  450. /* Careful in RCU walk mode */
  451. realinode = ovl_i_path_real(inode, &realpath);
  452. if (!realinode) {
  453. WARN_ON(!rcu);
  454. return ERR_PTR(-ECHILD);
  455. }
  456. if (!IS_POSIXACL(realinode))
  457. return NULL;
  458. if (rcu) {
  459. acl = get_cached_acl_rcu(realinode, type);
  460. } else {
  461. const struct cred *old_cred;
  462. old_cred = ovl_override_creds(inode->i_sb);
  463. acl = get_acl(realinode, type);
  464. ovl_revert_creds(inode->i_sb, old_cred);
  465. }
  466. /*
  467. * If there are no POSIX ACLs, or we encountered an error,
  468. * or the layer isn't idmapped we don't need to do anything.
  469. */
  470. if (!is_idmapped_mnt(realpath.mnt) || IS_ERR_OR_NULL(acl))
  471. return acl;
  472. /*
  473. * We only get here if the layer is idmapped. So drop out of RCU path
  474. * walk so we can clone the ACLs. There's no need to release the ACLs
  475. * since get_cached_acl_rcu() doesn't take a reference on the ACLs.
  476. */
  477. if (rcu)
  478. return ERR_PTR(-ECHILD);
  479. clone = posix_acl_clone(acl, GFP_KERNEL);
  480. if (!clone)
  481. clone = ERR_PTR(-ENOMEM);
  482. else
  483. ovl_idmap_posix_acl(realinode, mnt_user_ns(realpath.mnt), clone);
  484. /*
  485. * Since we're not in RCU path walk we always need to release the
  486. * original ACLs.
  487. */
  488. posix_acl_release(acl);
  489. return clone;
  490. }
  491. #endif
  492. int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
  493. {
  494. if (flags & S_ATIME) {
  495. struct ovl_fs *ofs = inode->i_sb->s_fs_info;
  496. struct path upperpath = {
  497. .mnt = ovl_upper_mnt(ofs),
  498. .dentry = ovl_upperdentry_dereference(OVL_I(inode)),
  499. };
  500. if (upperpath.dentry) {
  501. touch_atime(&upperpath);
  502. inode->i_atime = d_inode(upperpath.dentry)->i_atime;
  503. }
  504. }
  505. return 0;
  506. }
  507. static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  508. u64 start, u64 len)
  509. {
  510. int err;
  511. struct inode *realinode = ovl_inode_realdata(inode);
  512. const struct cred *old_cred;
  513. if (!realinode->i_op->fiemap)
  514. return -EOPNOTSUPP;
  515. old_cred = ovl_override_creds(inode->i_sb);
  516. err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
  517. ovl_revert_creds(inode->i_sb, old_cred);
  518. return err;
  519. }
  520. /*
  521. * Work around the fact that security_file_ioctl() takes a file argument.
  522. * Introducing security_inode_fileattr_get/set() hooks would solve this issue
  523. * properly.
  524. */
  525. static int ovl_security_fileattr(const struct path *realpath, struct fileattr *fa,
  526. bool set)
  527. {
  528. struct file *file;
  529. unsigned int cmd;
  530. int err;
  531. file = dentry_open(realpath, O_RDONLY, current_cred());
  532. if (IS_ERR(file))
  533. return PTR_ERR(file);
  534. if (set)
  535. cmd = fa->fsx_valid ? FS_IOC_FSSETXATTR : FS_IOC_SETFLAGS;
  536. else
  537. cmd = fa->fsx_valid ? FS_IOC_FSGETXATTR : FS_IOC_GETFLAGS;
  538. err = security_file_ioctl(file, cmd, 0);
  539. fput(file);
  540. return err;
  541. }
  542. int ovl_real_fileattr_set(const struct path *realpath, struct fileattr *fa)
  543. {
  544. int err;
  545. err = ovl_security_fileattr(realpath, fa, true);
  546. if (err)
  547. return err;
  548. return vfs_fileattr_set(mnt_user_ns(realpath->mnt), realpath->dentry, fa);
  549. }
  550. int ovl_fileattr_set(struct user_namespace *mnt_userns,
  551. struct dentry *dentry, struct fileattr *fa)
  552. {
  553. struct inode *inode = d_inode(dentry);
  554. struct path upperpath;
  555. const struct cred *old_cred;
  556. unsigned int flags;
  557. int err;
  558. err = ovl_want_write(dentry);
  559. if (err)
  560. goto out;
  561. err = ovl_copy_up(dentry);
  562. if (!err) {
  563. ovl_path_real(dentry, &upperpath);
  564. old_cred = ovl_override_creds(inode->i_sb);
  565. /*
  566. * Store immutable/append-only flags in xattr and clear them
  567. * in upper fileattr (in case they were set by older kernel)
  568. * so children of "ovl-immutable" directories lower aliases of
  569. * "ovl-immutable" hardlinks could be copied up.
  570. * Clear xattr when flags are cleared.
  571. */
  572. err = ovl_set_protattr(inode, upperpath.dentry, fa);
  573. if (!err)
  574. err = ovl_real_fileattr_set(&upperpath, fa);
  575. ovl_revert_creds(inode->i_sb, old_cred);
  576. /*
  577. * Merge real inode flags with inode flags read from
  578. * overlay.protattr xattr
  579. */
  580. flags = ovl_inode_real(inode)->i_flags & OVL_COPY_I_FLAGS_MASK;
  581. BUILD_BUG_ON(OVL_PROT_I_FLAGS_MASK & ~OVL_COPY_I_FLAGS_MASK);
  582. flags |= inode->i_flags & OVL_PROT_I_FLAGS_MASK;
  583. inode_set_flags(inode, flags, OVL_COPY_I_FLAGS_MASK);
  584. /* Update ctime */
  585. ovl_copyattr(inode);
  586. }
  587. ovl_drop_write(dentry);
  588. out:
  589. return err;
  590. }
  591. /* Convert inode protection flags to fileattr flags */
  592. static void ovl_fileattr_prot_flags(struct inode *inode, struct fileattr *fa)
  593. {
  594. BUILD_BUG_ON(OVL_PROT_FS_FLAGS_MASK & ~FS_COMMON_FL);
  595. BUILD_BUG_ON(OVL_PROT_FSX_FLAGS_MASK & ~FS_XFLAG_COMMON);
  596. if (inode->i_flags & S_APPEND) {
  597. fa->flags |= FS_APPEND_FL;
  598. fa->fsx_xflags |= FS_XFLAG_APPEND;
  599. }
  600. if (inode->i_flags & S_IMMUTABLE) {
  601. fa->flags |= FS_IMMUTABLE_FL;
  602. fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
  603. }
  604. }
  605. int ovl_real_fileattr_get(const struct path *realpath, struct fileattr *fa)
  606. {
  607. int err;
  608. err = ovl_security_fileattr(realpath, fa, false);
  609. if (err)
  610. return err;
  611. err = vfs_fileattr_get(realpath->dentry, fa);
  612. if (err == -ENOIOCTLCMD)
  613. err = -ENOTTY;
  614. return err;
  615. }
  616. int ovl_fileattr_get(struct dentry *dentry, struct fileattr *fa)
  617. {
  618. struct inode *inode = d_inode(dentry);
  619. struct path realpath;
  620. const struct cred *old_cred;
  621. int err;
  622. ovl_path_real(dentry, &realpath);
  623. old_cred = ovl_override_creds(inode->i_sb);
  624. err = ovl_real_fileattr_get(&realpath, fa);
  625. ovl_fileattr_prot_flags(inode, fa);
  626. ovl_revert_creds(inode->i_sb, old_cred);
  627. return err;
  628. }
  629. static const struct inode_operations ovl_file_inode_operations = {
  630. .setattr = ovl_setattr,
  631. .permission = ovl_permission,
  632. .getattr = ovl_getattr,
  633. .listxattr = ovl_listxattr,
  634. .get_acl = ovl_get_acl,
  635. .update_time = ovl_update_time,
  636. .fiemap = ovl_fiemap,
  637. .fileattr_get = ovl_fileattr_get,
  638. .fileattr_set = ovl_fileattr_set,
  639. };
  640. static const struct inode_operations ovl_symlink_inode_operations = {
  641. .setattr = ovl_setattr,
  642. .get_link = ovl_get_link,
  643. .getattr = ovl_getattr,
  644. .listxattr = ovl_listxattr,
  645. .update_time = ovl_update_time,
  646. };
  647. static const struct inode_operations ovl_special_inode_operations = {
  648. .setattr = ovl_setattr,
  649. .permission = ovl_permission,
  650. .getattr = ovl_getattr,
  651. .listxattr = ovl_listxattr,
  652. .get_acl = ovl_get_acl,
  653. .update_time = ovl_update_time,
  654. };
  655. static const struct address_space_operations ovl_aops = {
  656. /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
  657. .direct_IO = noop_direct_IO,
  658. };
  659. /*
  660. * It is possible to stack overlayfs instance on top of another
  661. * overlayfs instance as lower layer. We need to annotate the
  662. * stackable i_mutex locks according to stack level of the super
  663. * block instance. An overlayfs instance can never be in stack
  664. * depth 0 (there is always a real fs below it). An overlayfs
  665. * inode lock will use the lockdep annotation ovl_i_mutex_key[depth].
  666. *
  667. * For example, here is a snip from /proc/lockdep_chains after
  668. * dir_iterate of nested overlayfs:
  669. *
  670. * [...] &ovl_i_mutex_dir_key[depth] (stack_depth=2)
  671. * [...] &ovl_i_mutex_dir_key[depth]#2 (stack_depth=1)
  672. * [...] &type->i_mutex_dir_key (stack_depth=0)
  673. *
  674. * Locking order w.r.t ovl_want_write() is important for nested overlayfs.
  675. *
  676. * This chain is valid:
  677. * - inode->i_rwsem (inode_lock[2])
  678. * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
  679. * - OVL_I(inode)->lock (ovl_inode_lock[2])
  680. * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
  681. *
  682. * And this chain is valid:
  683. * - inode->i_rwsem (inode_lock[2])
  684. * - OVL_I(inode)->lock (ovl_inode_lock[2])
  685. * - lowerinode->i_rwsem (inode_lock[1])
  686. * - OVL_I(lowerinode)->lock (ovl_inode_lock[1])
  687. *
  688. * But lowerinode->i_rwsem SHOULD NOT be acquired while ovl_want_write() is
  689. * held, because it is in reverse order of the non-nested case using the same
  690. * upper fs:
  691. * - inode->i_rwsem (inode_lock[1])
  692. * - upper_mnt->mnt_sb->s_writers (ovl_want_write[0])
  693. * - OVL_I(inode)->lock (ovl_inode_lock[1])
  694. */
  695. #define OVL_MAX_NESTING FILESYSTEM_MAX_STACK_DEPTH
  696. static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
  697. {
  698. #ifdef CONFIG_LOCKDEP
  699. static struct lock_class_key ovl_i_mutex_key[OVL_MAX_NESTING];
  700. static struct lock_class_key ovl_i_mutex_dir_key[OVL_MAX_NESTING];
  701. static struct lock_class_key ovl_i_lock_key[OVL_MAX_NESTING];
  702. int depth = inode->i_sb->s_stack_depth - 1;
  703. if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING))
  704. depth = 0;
  705. if (S_ISDIR(inode->i_mode))
  706. lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]);
  707. else
  708. lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]);
  709. lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]);
  710. #endif
  711. }
  712. static void ovl_next_ino(struct inode *inode)
  713. {
  714. struct ovl_fs *ofs = inode->i_sb->s_fs_info;
  715. inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
  716. if (unlikely(!inode->i_ino))
  717. inode->i_ino = atomic_long_inc_return(&ofs->last_ino);
  718. }
  719. static void ovl_map_ino(struct inode *inode, unsigned long ino, int fsid)
  720. {
  721. int xinobits = ovl_xino_bits(inode->i_sb);
  722. unsigned int xinoshift = 64 - xinobits;
  723. /*
  724. * When d_ino is consistent with st_ino (samefs or i_ino has enough
  725. * bits to encode layer), set the same value used for st_ino to i_ino,
  726. * so inode number exposed via /proc/locks and a like will be
  727. * consistent with d_ino and st_ino values. An i_ino value inconsistent
  728. * with d_ino also causes nfsd readdirplus to fail.
  729. */
  730. inode->i_ino = ino;
  731. if (ovl_same_fs(inode->i_sb)) {
  732. return;
  733. } else if (xinobits && likely(!(ino >> xinoshift))) {
  734. inode->i_ino |= (unsigned long)fsid << (xinoshift + 1);
  735. return;
  736. }
  737. /*
  738. * For directory inodes on non-samefs with xino disabled or xino
  739. * overflow, we allocate a non-persistent inode number, to be used for
  740. * resolving st_ino collisions in ovl_map_dev_ino().
  741. *
  742. * To avoid ino collision with legitimate xino values from upper
  743. * layer (fsid 0), use the lowest xinobit to map the non
  744. * persistent inode numbers to the unified st_ino address space.
  745. */
  746. if (S_ISDIR(inode->i_mode)) {
  747. ovl_next_ino(inode);
  748. if (xinobits) {
  749. inode->i_ino &= ~0UL >> xinobits;
  750. inode->i_ino |= 1UL << xinoshift;
  751. }
  752. }
  753. }
  754. void ovl_inode_init(struct inode *inode, struct ovl_inode_params *oip,
  755. unsigned long ino, int fsid)
  756. {
  757. struct inode *realinode;
  758. struct ovl_inode *oi = OVL_I(inode);
  759. if (oip->upperdentry)
  760. oi->__upperdentry = oip->upperdentry;
  761. if (oip->lowerpath && oip->lowerpath->dentry) {
  762. oi->lowerpath.dentry = dget(oip->lowerpath->dentry);
  763. oi->lowerpath.layer = oip->lowerpath->layer;
  764. }
  765. if (oip->lowerdata)
  766. oi->lowerdata = igrab(d_inode(oip->lowerdata));
  767. realinode = ovl_inode_real(inode);
  768. ovl_copyattr(inode);
  769. ovl_copyflags(realinode, inode);
  770. ovl_map_ino(inode, ino, fsid);
  771. }
  772. static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
  773. {
  774. inode->i_mode = mode;
  775. inode->i_flags |= S_NOCMTIME;
  776. #ifdef CONFIG_FS_POSIX_ACL
  777. inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
  778. #endif
  779. ovl_lockdep_annotate_inode_mutex_key(inode);
  780. switch (mode & S_IFMT) {
  781. case S_IFREG:
  782. inode->i_op = &ovl_file_inode_operations;
  783. inode->i_fop = &ovl_file_operations;
  784. inode->i_mapping->a_ops = &ovl_aops;
  785. break;
  786. case S_IFDIR:
  787. inode->i_op = &ovl_dir_inode_operations;
  788. inode->i_fop = &ovl_dir_operations;
  789. break;
  790. case S_IFLNK:
  791. inode->i_op = &ovl_symlink_inode_operations;
  792. break;
  793. default:
  794. inode->i_op = &ovl_special_inode_operations;
  795. init_special_inode(inode, mode, rdev);
  796. break;
  797. }
  798. }
  799. /*
  800. * With inodes index enabled, an overlay inode nlink counts the union of upper
  801. * hardlinks and non-covered lower hardlinks. During the lifetime of a non-pure
  802. * upper inode, the following nlink modifying operations can happen:
  803. *
  804. * 1. Lower hardlink copy up
  805. * 2. Upper hardlink created, unlinked or renamed over
  806. * 3. Lower hardlink whiteout or renamed over
  807. *
  808. * For the first, copy up case, the union nlink does not change, whether the
  809. * operation succeeds or fails, but the upper inode nlink may change.
  810. * Therefore, before copy up, we store the union nlink value relative to the
  811. * lower inode nlink in the index inode xattr .overlay.nlink.
  812. *
  813. * For the second, upper hardlink case, the union nlink should be incremented
  814. * or decremented IFF the operation succeeds, aligned with nlink change of the
  815. * upper inode. Therefore, before link/unlink/rename, we store the union nlink
  816. * value relative to the upper inode nlink in the index inode.
  817. *
  818. * For the last, lower cover up case, we simplify things by preceding the
  819. * whiteout or cover up with copy up. This makes sure that there is an index
  820. * upper inode where the nlink xattr can be stored before the copied up upper
  821. * entry is unlink.
  822. */
  823. #define OVL_NLINK_ADD_UPPER (1 << 0)
  824. /*
  825. * On-disk format for indexed nlink:
  826. *
  827. * nlink relative to the upper inode - "U[+-]NUM"
  828. * nlink relative to the lower inode - "L[+-]NUM"
  829. */
  830. static int ovl_set_nlink_common(struct dentry *dentry,
  831. struct dentry *realdentry, const char *format)
  832. {
  833. struct inode *inode = d_inode(dentry);
  834. struct inode *realinode = d_inode(realdentry);
  835. char buf[13];
  836. int len;
  837. len = snprintf(buf, sizeof(buf), format,
  838. (int) (inode->i_nlink - realinode->i_nlink));
  839. if (WARN_ON(len >= sizeof(buf)))
  840. return -EIO;
  841. return ovl_setxattr(OVL_FS(inode->i_sb), ovl_dentry_upper(dentry),
  842. OVL_XATTR_NLINK, buf, len);
  843. }
  844. int ovl_set_nlink_upper(struct dentry *dentry)
  845. {
  846. return ovl_set_nlink_common(dentry, ovl_dentry_upper(dentry), "U%+i");
  847. }
  848. int ovl_set_nlink_lower(struct dentry *dentry)
  849. {
  850. return ovl_set_nlink_common(dentry, ovl_dentry_lower(dentry), "L%+i");
  851. }
  852. unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
  853. struct dentry *upperdentry,
  854. unsigned int fallback)
  855. {
  856. int nlink_diff;
  857. int nlink;
  858. char buf[13];
  859. int err;
  860. if (!lowerdentry || !upperdentry || d_inode(lowerdentry)->i_nlink == 1)
  861. return fallback;
  862. err = ovl_getxattr_upper(ofs, upperdentry, OVL_XATTR_NLINK,
  863. &buf, sizeof(buf) - 1);
  864. if (err < 0)
  865. goto fail;
  866. buf[err] = '\0';
  867. if ((buf[0] != 'L' && buf[0] != 'U') ||
  868. (buf[1] != '+' && buf[1] != '-'))
  869. goto fail;
  870. err = kstrtoint(buf + 1, 10, &nlink_diff);
  871. if (err < 0)
  872. goto fail;
  873. nlink = d_inode(buf[0] == 'L' ? lowerdentry : upperdentry)->i_nlink;
  874. nlink += nlink_diff;
  875. if (nlink <= 0)
  876. goto fail;
  877. return nlink;
  878. fail:
  879. pr_warn_ratelimited("failed to get index nlink (%pd2, err=%i)\n",
  880. upperdentry, err);
  881. return fallback;
  882. }
  883. struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
  884. {
  885. struct inode *inode;
  886. inode = new_inode(sb);
  887. if (inode)
  888. ovl_fill_inode(inode, mode, rdev);
  889. return inode;
  890. }
  891. static int ovl_inode_test(struct inode *inode, void *data)
  892. {
  893. return inode->i_private == data;
  894. }
  895. static int ovl_inode_set(struct inode *inode, void *data)
  896. {
  897. inode->i_private = data;
  898. return 0;
  899. }
  900. static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
  901. struct dentry *upperdentry, bool strict)
  902. {
  903. /*
  904. * For directories, @strict verify from lookup path performs consistency
  905. * checks, so NULL lower/upper in dentry must match NULL lower/upper in
  906. * inode. Non @strict verify from NFS handle decode path passes NULL for
  907. * 'unknown' lower/upper.
  908. */
  909. if (S_ISDIR(inode->i_mode) && strict) {
  910. /* Real lower dir moved to upper layer under us? */
  911. if (!lowerdentry && ovl_inode_lower(inode))
  912. return false;
  913. /* Lookup of an uncovered redirect origin? */
  914. if (!upperdentry && ovl_inode_upper(inode))
  915. return false;
  916. }
  917. /*
  918. * Allow non-NULL lower inode in ovl_inode even if lowerdentry is NULL.
  919. * This happens when finding a copied up overlay inode for a renamed
  920. * or hardlinked overlay dentry and lower dentry cannot be followed
  921. * by origin because lower fs does not support file handles.
  922. */
  923. if (lowerdentry && ovl_inode_lower(inode) != d_inode(lowerdentry))
  924. return false;
  925. /*
  926. * Allow non-NULL __upperdentry in inode even if upperdentry is NULL.
  927. * This happens when finding a lower alias for a copied up hard link.
  928. */
  929. if (upperdentry && ovl_inode_upper(inode) != d_inode(upperdentry))
  930. return false;
  931. return true;
  932. }
  933. struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
  934. bool is_upper)
  935. {
  936. struct inode *inode, *key = d_inode(real);
  937. inode = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
  938. if (!inode)
  939. return NULL;
  940. if (!ovl_verify_inode(inode, is_upper ? NULL : real,
  941. is_upper ? real : NULL, false)) {
  942. iput(inode);
  943. return ERR_PTR(-ESTALE);
  944. }
  945. return inode;
  946. }
  947. bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
  948. {
  949. struct inode *key = d_inode(dir);
  950. struct inode *trap;
  951. bool res;
  952. trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
  953. if (!trap)
  954. return false;
  955. res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
  956. !ovl_inode_lower(trap);
  957. iput(trap);
  958. return res;
  959. }
  960. /*
  961. * Create an inode cache entry for layer root dir, that will intentionally
  962. * fail ovl_verify_inode(), so any lookup that will find some layer root
  963. * will fail.
  964. */
  965. struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
  966. {
  967. struct inode *key = d_inode(dir);
  968. struct inode *trap;
  969. if (!d_is_dir(dir))
  970. return ERR_PTR(-ENOTDIR);
  971. trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
  972. ovl_inode_set, key);
  973. if (!trap)
  974. return ERR_PTR(-ENOMEM);
  975. if (!(trap->i_state & I_NEW)) {
  976. /* Conflicting layer roots? */
  977. iput(trap);
  978. return ERR_PTR(-ELOOP);
  979. }
  980. trap->i_mode = S_IFDIR;
  981. trap->i_flags = S_DEAD;
  982. unlock_new_inode(trap);
  983. return trap;
  984. }
  985. /*
  986. * Does overlay inode need to be hashed by lower inode?
  987. */
  988. static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
  989. struct dentry *lower, bool index)
  990. {
  991. struct ovl_fs *ofs = sb->s_fs_info;
  992. /* No, if pure upper */
  993. if (!lower)
  994. return false;
  995. /* Yes, if already indexed */
  996. if (index)
  997. return true;
  998. /* Yes, if won't be copied up */
  999. if (!ovl_upper_mnt(ofs))
  1000. return true;
  1001. /* No, if lower hardlink is or will be broken on copy up */
  1002. if ((upper || !ovl_indexdir(sb)) &&
  1003. !d_is_dir(lower) && d_inode(lower)->i_nlink > 1)
  1004. return false;
  1005. /* No, if non-indexed upper with NFS export */
  1006. if (sb->s_export_op && upper)
  1007. return false;
  1008. /* Otherwise, hash by lower inode for fsnotify */
  1009. return true;
  1010. }
  1011. static struct inode *ovl_iget5(struct super_block *sb, struct inode *newinode,
  1012. struct inode *key)
  1013. {
  1014. return newinode ? inode_insert5(newinode, (unsigned long) key,
  1015. ovl_inode_test, ovl_inode_set, key) :
  1016. iget5_locked(sb, (unsigned long) key,
  1017. ovl_inode_test, ovl_inode_set, key);
  1018. }
  1019. struct inode *ovl_get_inode(struct super_block *sb,
  1020. struct ovl_inode_params *oip)
  1021. {
  1022. struct ovl_fs *ofs = OVL_FS(sb);
  1023. struct dentry *upperdentry = oip->upperdentry;
  1024. struct ovl_path *lowerpath = oip->lowerpath;
  1025. struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
  1026. struct inode *inode;
  1027. struct dentry *lowerdentry = lowerpath ? lowerpath->dentry : NULL;
  1028. struct path realpath = {
  1029. .dentry = upperdentry ?: lowerdentry,
  1030. .mnt = upperdentry ? ovl_upper_mnt(ofs) : lowerpath->layer->mnt,
  1031. };
  1032. bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
  1033. oip->index);
  1034. int fsid = bylower ? lowerpath->layer->fsid : 0;
  1035. bool is_dir;
  1036. unsigned long ino = 0;
  1037. int err = oip->newinode ? -EEXIST : -ENOMEM;
  1038. if (!realinode)
  1039. realinode = d_inode(lowerdentry);
  1040. /*
  1041. * Copy up origin (lower) may exist for non-indexed upper, but we must
  1042. * not use lower as hash key if this is a broken hardlink.
  1043. */
  1044. is_dir = S_ISDIR(realinode->i_mode);
  1045. if (upperdentry || bylower) {
  1046. struct inode *key = d_inode(bylower ? lowerdentry :
  1047. upperdentry);
  1048. unsigned int nlink = is_dir ? 1 : realinode->i_nlink;
  1049. inode = ovl_iget5(sb, oip->newinode, key);
  1050. if (!inode)
  1051. goto out_err;
  1052. if (!(inode->i_state & I_NEW)) {
  1053. /*
  1054. * Verify that the underlying files stored in the inode
  1055. * match those in the dentry.
  1056. */
  1057. if (!ovl_verify_inode(inode, lowerdentry, upperdentry,
  1058. true)) {
  1059. iput(inode);
  1060. err = -ESTALE;
  1061. goto out_err;
  1062. }
  1063. dput(upperdentry);
  1064. kfree(oip->redirect);
  1065. goto out;
  1066. }
  1067. /* Recalculate nlink for non-dir due to indexing */
  1068. if (!is_dir)
  1069. nlink = ovl_get_nlink(ofs, lowerdentry, upperdentry,
  1070. nlink);
  1071. set_nlink(inode, nlink);
  1072. ino = key->i_ino;
  1073. } else {
  1074. /* Lower hardlink that will be broken on copy up */
  1075. inode = new_inode(sb);
  1076. if (!inode) {
  1077. err = -ENOMEM;
  1078. goto out_err;
  1079. }
  1080. ino = realinode->i_ino;
  1081. fsid = lowerpath->layer->fsid;
  1082. }
  1083. ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
  1084. ovl_inode_init(inode, oip, ino, fsid);
  1085. if (upperdentry && ovl_is_impuredir(sb, upperdentry))
  1086. ovl_set_flag(OVL_IMPURE, inode);
  1087. if (oip->index)
  1088. ovl_set_flag(OVL_INDEX, inode);
  1089. OVL_I(inode)->redirect = oip->redirect;
  1090. if (bylower)
  1091. ovl_set_flag(OVL_CONST_INO, inode);
  1092. /* Check for non-merge dir that may have whiteouts */
  1093. if (is_dir) {
  1094. if (((upperdentry && lowerdentry) || oip->numlower > 1) ||
  1095. ovl_path_check_origin_xattr(ofs, &realpath)) {
  1096. ovl_set_flag(OVL_WHITEOUTS, inode);
  1097. }
  1098. }
  1099. /* Check for immutable/append-only inode flags in xattr */
  1100. if (upperdentry)
  1101. ovl_check_protattr(inode, upperdentry);
  1102. if (inode->i_state & I_NEW)
  1103. unlock_new_inode(inode);
  1104. out:
  1105. return inode;
  1106. out_err:
  1107. pr_warn_ratelimited("failed to get inode (%i)\n", err);
  1108. inode = ERR_PTR(err);
  1109. goto out;
  1110. }