inode.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * (C) 2001 Clemson University and The University of Chicago
  4. * Copyright 2018 Omnibond Systems, L.L.C.
  5. *
  6. * See COPYING in top-level directory.
  7. */
  8. /*
  9. * Linux VFS inode operations.
  10. */
  11. #include <linux/blkdev.h>
  12. #include <linux/fileattr.h>
  13. #include "protocol.h"
  14. #include "orangefs-kernel.h"
  15. #include "orangefs-bufmap.h"
  16. static int orangefs_writepage_locked(struct page *page,
  17. struct writeback_control *wbc)
  18. {
  19. struct inode *inode = page->mapping->host;
  20. struct orangefs_write_range *wr = NULL;
  21. struct iov_iter iter;
  22. struct bio_vec bv;
  23. size_t len, wlen;
  24. ssize_t ret;
  25. loff_t off;
  26. set_page_writeback(page);
  27. len = i_size_read(inode);
  28. if (PagePrivate(page)) {
  29. wr = (struct orangefs_write_range *)page_private(page);
  30. WARN_ON(wr->pos >= len);
  31. off = wr->pos;
  32. if (off + wr->len > len)
  33. wlen = len - off;
  34. else
  35. wlen = wr->len;
  36. } else {
  37. WARN_ON(1);
  38. off = page_offset(page);
  39. if (off + PAGE_SIZE > len)
  40. wlen = len - off;
  41. else
  42. wlen = PAGE_SIZE;
  43. }
  44. /* Should've been handled in orangefs_invalidate_folio. */
  45. WARN_ON(off == len || off + wlen > len);
  46. bv.bv_page = page;
  47. bv.bv_len = wlen;
  48. bv.bv_offset = off % PAGE_SIZE;
  49. WARN_ON(wlen == 0);
  50. iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen);
  51. ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen,
  52. len, wr, NULL, NULL);
  53. if (ret < 0) {
  54. SetPageError(page);
  55. mapping_set_error(page->mapping, ret);
  56. } else {
  57. ret = 0;
  58. }
  59. kfree(detach_page_private(page));
  60. return ret;
  61. }
  62. static int orangefs_writepage(struct page *page, struct writeback_control *wbc)
  63. {
  64. int ret;
  65. ret = orangefs_writepage_locked(page, wbc);
  66. unlock_page(page);
  67. end_page_writeback(page);
  68. return ret;
  69. }
  70. struct orangefs_writepages {
  71. loff_t off;
  72. size_t len;
  73. kuid_t uid;
  74. kgid_t gid;
  75. int maxpages;
  76. int npages;
  77. struct page **pages;
  78. struct bio_vec *bv;
  79. };
  80. static int orangefs_writepages_work(struct orangefs_writepages *ow,
  81. struct writeback_control *wbc)
  82. {
  83. struct inode *inode = ow->pages[0]->mapping->host;
  84. struct orangefs_write_range *wrp, wr;
  85. struct iov_iter iter;
  86. ssize_t ret;
  87. size_t len;
  88. loff_t off;
  89. int i;
  90. len = i_size_read(inode);
  91. for (i = 0; i < ow->npages; i++) {
  92. set_page_writeback(ow->pages[i]);
  93. ow->bv[i].bv_page = ow->pages[i];
  94. ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE,
  95. ow->off + ow->len) -
  96. max(ow->off, page_offset(ow->pages[i]));
  97. if (i == 0)
  98. ow->bv[i].bv_offset = ow->off -
  99. page_offset(ow->pages[i]);
  100. else
  101. ow->bv[i].bv_offset = 0;
  102. }
  103. iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->npages, ow->len);
  104. WARN_ON(ow->off >= len);
  105. if (ow->off + ow->len > len)
  106. ow->len = len - ow->off;
  107. off = ow->off;
  108. wr.uid = ow->uid;
  109. wr.gid = ow->gid;
  110. ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len,
  111. 0, &wr, NULL, NULL);
  112. if (ret < 0) {
  113. for (i = 0; i < ow->npages; i++) {
  114. SetPageError(ow->pages[i]);
  115. mapping_set_error(ow->pages[i]->mapping, ret);
  116. if (PagePrivate(ow->pages[i])) {
  117. wrp = (struct orangefs_write_range *)
  118. page_private(ow->pages[i]);
  119. ClearPagePrivate(ow->pages[i]);
  120. put_page(ow->pages[i]);
  121. kfree(wrp);
  122. }
  123. end_page_writeback(ow->pages[i]);
  124. unlock_page(ow->pages[i]);
  125. }
  126. } else {
  127. ret = 0;
  128. for (i = 0; i < ow->npages; i++) {
  129. if (PagePrivate(ow->pages[i])) {
  130. wrp = (struct orangefs_write_range *)
  131. page_private(ow->pages[i]);
  132. ClearPagePrivate(ow->pages[i]);
  133. put_page(ow->pages[i]);
  134. kfree(wrp);
  135. }
  136. end_page_writeback(ow->pages[i]);
  137. unlock_page(ow->pages[i]);
  138. }
  139. }
  140. return ret;
  141. }
  142. static int orangefs_writepages_callback(struct page *page,
  143. struct writeback_control *wbc, void *data)
  144. {
  145. struct orangefs_writepages *ow = data;
  146. struct orangefs_write_range *wr;
  147. int ret;
  148. if (!PagePrivate(page)) {
  149. unlock_page(page);
  150. /* It's not private so there's nothing to write, right? */
  151. printk("writepages_callback not private!\n");
  152. BUG();
  153. return 0;
  154. }
  155. wr = (struct orangefs_write_range *)page_private(page);
  156. ret = -1;
  157. if (ow->npages == 0) {
  158. ow->off = wr->pos;
  159. ow->len = wr->len;
  160. ow->uid = wr->uid;
  161. ow->gid = wr->gid;
  162. ow->pages[ow->npages++] = page;
  163. ret = 0;
  164. goto done;
  165. }
  166. if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) {
  167. orangefs_writepages_work(ow, wbc);
  168. ow->npages = 0;
  169. ret = -1;
  170. goto done;
  171. }
  172. if (ow->off + ow->len == wr->pos) {
  173. ow->len += wr->len;
  174. ow->pages[ow->npages++] = page;
  175. ret = 0;
  176. goto done;
  177. }
  178. done:
  179. if (ret == -1) {
  180. if (ow->npages) {
  181. orangefs_writepages_work(ow, wbc);
  182. ow->npages = 0;
  183. }
  184. ret = orangefs_writepage_locked(page, wbc);
  185. mapping_set_error(page->mapping, ret);
  186. unlock_page(page);
  187. end_page_writeback(page);
  188. } else {
  189. if (ow->npages == ow->maxpages) {
  190. orangefs_writepages_work(ow, wbc);
  191. ow->npages = 0;
  192. }
  193. }
  194. return ret;
  195. }
  196. static int orangefs_writepages(struct address_space *mapping,
  197. struct writeback_control *wbc)
  198. {
  199. struct orangefs_writepages *ow;
  200. struct blk_plug plug;
  201. int ret;
  202. ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL);
  203. if (!ow)
  204. return -ENOMEM;
  205. ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE;
  206. ow->pages = kcalloc(ow->maxpages, sizeof(struct page *), GFP_KERNEL);
  207. if (!ow->pages) {
  208. kfree(ow);
  209. return -ENOMEM;
  210. }
  211. ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL);
  212. if (!ow->bv) {
  213. kfree(ow->pages);
  214. kfree(ow);
  215. return -ENOMEM;
  216. }
  217. blk_start_plug(&plug);
  218. ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow);
  219. if (ow->npages)
  220. ret = orangefs_writepages_work(ow, wbc);
  221. blk_finish_plug(&plug);
  222. kfree(ow->pages);
  223. kfree(ow->bv);
  224. kfree(ow);
  225. return ret;
  226. }
  227. static int orangefs_launder_folio(struct folio *);
  228. static void orangefs_readahead(struct readahead_control *rac)
  229. {
  230. loff_t offset;
  231. struct iov_iter iter;
  232. struct inode *inode = rac->mapping->host;
  233. struct xarray *i_pages;
  234. struct page *page;
  235. loff_t new_start = readahead_pos(rac);
  236. int ret;
  237. size_t new_len = 0;
  238. loff_t bytes_remaining = inode->i_size - readahead_pos(rac);
  239. loff_t pages_remaining = bytes_remaining / PAGE_SIZE;
  240. if (pages_remaining >= 1024)
  241. new_len = 4194304;
  242. else if (pages_remaining > readahead_count(rac))
  243. new_len = bytes_remaining;
  244. if (new_len)
  245. readahead_expand(rac, new_start, new_len);
  246. offset = readahead_pos(rac);
  247. i_pages = &rac->mapping->i_pages;
  248. iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac));
  249. /* read in the pages. */
  250. if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode,
  251. &offset, &iter, readahead_length(rac),
  252. inode->i_size, NULL, NULL, rac->file)) < 0)
  253. gossip_debug(GOSSIP_FILE_DEBUG,
  254. "%s: wait_for_direct_io failed. \n", __func__);
  255. else
  256. ret = 0;
  257. /* clean up. */
  258. while ((page = readahead_page(rac))) {
  259. page_endio(page, false, ret);
  260. put_page(page);
  261. }
  262. }
  263. static int orangefs_read_folio(struct file *file, struct folio *folio)
  264. {
  265. struct inode *inode = folio->mapping->host;
  266. struct iov_iter iter;
  267. struct bio_vec bv;
  268. ssize_t ret;
  269. loff_t off; /* offset of this folio in the file */
  270. if (folio_test_dirty(folio))
  271. orangefs_launder_folio(folio);
  272. off = folio_pos(folio);
  273. bv.bv_page = &folio->page;
  274. bv.bv_len = folio_size(folio);
  275. bv.bv_offset = 0;
  276. iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio));
  277. ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
  278. folio_size(folio), inode->i_size, NULL, NULL, file);
  279. /* this will only zero remaining unread portions of the folio data */
  280. iov_iter_zero(~0U, &iter);
  281. /* takes care of potential aliasing */
  282. flush_dcache_folio(folio);
  283. if (ret < 0) {
  284. folio_set_error(folio);
  285. } else {
  286. folio_mark_uptodate(folio);
  287. ret = 0;
  288. }
  289. /* unlock the folio after the ->read_folio() routine completes */
  290. folio_unlock(folio);
  291. return ret;
  292. }
  293. static int orangefs_write_begin(struct file *file,
  294. struct address_space *mapping, loff_t pos, unsigned len,
  295. struct page **pagep, void **fsdata)
  296. {
  297. struct orangefs_write_range *wr;
  298. struct folio *folio;
  299. struct page *page;
  300. pgoff_t index;
  301. int ret;
  302. index = pos >> PAGE_SHIFT;
  303. page = grab_cache_page_write_begin(mapping, index);
  304. if (!page)
  305. return -ENOMEM;
  306. *pagep = page;
  307. folio = page_folio(page);
  308. if (folio_test_dirty(folio) && !folio_test_private(folio)) {
  309. /*
  310. * Should be impossible. If it happens, launder the page
  311. * since we don't know what's dirty. This will WARN in
  312. * orangefs_writepage_locked.
  313. */
  314. ret = orangefs_launder_folio(folio);
  315. if (ret)
  316. return ret;
  317. }
  318. if (folio_test_private(folio)) {
  319. struct orangefs_write_range *wr;
  320. wr = folio_get_private(folio);
  321. if (wr->pos + wr->len == pos &&
  322. uid_eq(wr->uid, current_fsuid()) &&
  323. gid_eq(wr->gid, current_fsgid())) {
  324. wr->len += len;
  325. goto okay;
  326. } else {
  327. ret = orangefs_launder_folio(folio);
  328. if (ret)
  329. return ret;
  330. }
  331. }
  332. wr = kmalloc(sizeof *wr, GFP_KERNEL);
  333. if (!wr)
  334. return -ENOMEM;
  335. wr->pos = pos;
  336. wr->len = len;
  337. wr->uid = current_fsuid();
  338. wr->gid = current_fsgid();
  339. folio_attach_private(folio, wr);
  340. okay:
  341. return 0;
  342. }
  343. static int orangefs_write_end(struct file *file, struct address_space *mapping,
  344. loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata)
  345. {
  346. struct inode *inode = page->mapping->host;
  347. loff_t last_pos = pos + copied;
  348. /*
  349. * No need to use i_size_read() here, the i_size
  350. * cannot change under us because we hold the i_mutex.
  351. */
  352. if (last_pos > inode->i_size)
  353. i_size_write(inode, last_pos);
  354. /* zero the stale part of the page if we did a short copy */
  355. if (!PageUptodate(page)) {
  356. unsigned from = pos & (PAGE_SIZE - 1);
  357. if (copied < len) {
  358. zero_user(page, from + copied, len - copied);
  359. }
  360. /* Set fully written pages uptodate. */
  361. if (pos == page_offset(page) &&
  362. (len == PAGE_SIZE || pos + len == inode->i_size)) {
  363. zero_user_segment(page, from + copied, PAGE_SIZE);
  364. SetPageUptodate(page);
  365. }
  366. }
  367. set_page_dirty(page);
  368. unlock_page(page);
  369. put_page(page);
  370. mark_inode_dirty_sync(file_inode(file));
  371. return copied;
  372. }
  373. static void orangefs_invalidate_folio(struct folio *folio,
  374. size_t offset, size_t length)
  375. {
  376. struct orangefs_write_range *wr = folio_get_private(folio);
  377. if (offset == 0 && length == PAGE_SIZE) {
  378. kfree(folio_detach_private(folio));
  379. return;
  380. /* write range entirely within invalidate range (or equal) */
  381. } else if (folio_pos(folio) + offset <= wr->pos &&
  382. wr->pos + wr->len <= folio_pos(folio) + offset + length) {
  383. kfree(folio_detach_private(folio));
  384. /* XXX is this right? only caller in fs */
  385. folio_cancel_dirty(folio);
  386. return;
  387. /* invalidate range chops off end of write range */
  388. } else if (wr->pos < folio_pos(folio) + offset &&
  389. wr->pos + wr->len <= folio_pos(folio) + offset + length &&
  390. folio_pos(folio) + offset < wr->pos + wr->len) {
  391. size_t x;
  392. x = wr->pos + wr->len - (folio_pos(folio) + offset);
  393. WARN_ON(x > wr->len);
  394. wr->len -= x;
  395. wr->uid = current_fsuid();
  396. wr->gid = current_fsgid();
  397. /* invalidate range chops off beginning of write range */
  398. } else if (folio_pos(folio) + offset <= wr->pos &&
  399. folio_pos(folio) + offset + length < wr->pos + wr->len &&
  400. wr->pos < folio_pos(folio) + offset + length) {
  401. size_t x;
  402. x = folio_pos(folio) + offset + length - wr->pos;
  403. WARN_ON(x > wr->len);
  404. wr->pos += x;
  405. wr->len -= x;
  406. wr->uid = current_fsuid();
  407. wr->gid = current_fsgid();
  408. /* invalidate range entirely within write range (punch hole) */
  409. } else if (wr->pos < folio_pos(folio) + offset &&
  410. folio_pos(folio) + offset + length < wr->pos + wr->len) {
  411. /* XXX what do we do here... should not WARN_ON */
  412. WARN_ON(1);
  413. /* punch hole */
  414. /*
  415. * should we just ignore this and write it out anyway?
  416. * it hardly makes sense
  417. */
  418. return;
  419. /* non-overlapping ranges */
  420. } else {
  421. /* WARN if they do overlap */
  422. if (!((folio_pos(folio) + offset + length <= wr->pos) ^
  423. (wr->pos + wr->len <= folio_pos(folio) + offset))) {
  424. WARN_ON(1);
  425. printk("invalidate range offset %llu length %zu\n",
  426. folio_pos(folio) + offset, length);
  427. printk("write range offset %llu length %zu\n",
  428. wr->pos, wr->len);
  429. }
  430. return;
  431. }
  432. /*
  433. * Above there are returns where wr is freed or where we WARN.
  434. * Thus the following runs if wr was modified above.
  435. */
  436. orangefs_launder_folio(folio);
  437. }
  438. static bool orangefs_release_folio(struct folio *folio, gfp_t foo)
  439. {
  440. return !folio_test_private(folio);
  441. }
  442. static void orangefs_free_folio(struct folio *folio)
  443. {
  444. kfree(folio_detach_private(folio));
  445. }
  446. static int orangefs_launder_folio(struct folio *folio)
  447. {
  448. int r = 0;
  449. struct writeback_control wbc = {
  450. .sync_mode = WB_SYNC_ALL,
  451. .nr_to_write = 0,
  452. };
  453. folio_wait_writeback(folio);
  454. if (folio_clear_dirty_for_io(folio)) {
  455. r = orangefs_writepage_locked(&folio->page, &wbc);
  456. folio_end_writeback(folio);
  457. }
  458. return r;
  459. }
  460. static ssize_t orangefs_direct_IO(struct kiocb *iocb,
  461. struct iov_iter *iter)
  462. {
  463. /*
  464. * Comment from original do_readv_writev:
  465. * Common entry point for read/write/readv/writev
  466. * This function will dispatch it to either the direct I/O
  467. * or buffered I/O path depending on the mount options and/or
  468. * augmented/extended metadata attached to the file.
  469. * Note: File extended attributes override any mount options.
  470. */
  471. struct file *file = iocb->ki_filp;
  472. loff_t pos = iocb->ki_pos;
  473. enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
  474. ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
  475. loff_t *offset = &pos;
  476. struct inode *inode = file->f_mapping->host;
  477. struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
  478. struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
  479. size_t count = iov_iter_count(iter);
  480. ssize_t total_count = 0;
  481. ssize_t ret = -EINVAL;
  482. int i = 0;
  483. gossip_debug(GOSSIP_FILE_DEBUG,
  484. "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
  485. __func__,
  486. handle,
  487. (int)count);
  488. if (type == ORANGEFS_IO_WRITE) {
  489. gossip_debug(GOSSIP_FILE_DEBUG,
  490. "%s(%pU): proceeding with offset : %llu, "
  491. "size %d\n",
  492. __func__,
  493. handle,
  494. llu(*offset),
  495. (int)count);
  496. }
  497. if (count == 0) {
  498. ret = 0;
  499. goto out;
  500. }
  501. while (iov_iter_count(iter)) {
  502. size_t each_count = iov_iter_count(iter);
  503. size_t amt_complete;
  504. i++;
  505. /* how much to transfer in this loop iteration */
  506. if (each_count > orangefs_bufmap_size_query())
  507. each_count = orangefs_bufmap_size_query();
  508. gossip_debug(GOSSIP_FILE_DEBUG,
  509. "%s(%pU): size of each_count(%d)\n",
  510. __func__,
  511. handle,
  512. (int)each_count);
  513. gossip_debug(GOSSIP_FILE_DEBUG,
  514. "%s(%pU): BEFORE wait_for_io: offset is %d\n",
  515. __func__,
  516. handle,
  517. (int)*offset);
  518. ret = wait_for_direct_io(type, inode, offset, iter,
  519. each_count, 0, NULL, NULL, file);
  520. gossip_debug(GOSSIP_FILE_DEBUG,
  521. "%s(%pU): return from wait_for_io:%d\n",
  522. __func__,
  523. handle,
  524. (int)ret);
  525. if (ret < 0)
  526. goto out;
  527. *offset += ret;
  528. total_count += ret;
  529. amt_complete = ret;
  530. gossip_debug(GOSSIP_FILE_DEBUG,
  531. "%s(%pU): AFTER wait_for_io: offset is %d\n",
  532. __func__,
  533. handle,
  534. (int)*offset);
  535. /*
  536. * if we got a short I/O operations,
  537. * fall out and return what we got so far
  538. */
  539. if (amt_complete < each_count)
  540. break;
  541. } /*end while */
  542. out:
  543. if (total_count > 0)
  544. ret = total_count;
  545. if (ret > 0) {
  546. if (type == ORANGEFS_IO_READ) {
  547. file_accessed(file);
  548. } else {
  549. file_update_time(file);
  550. if (*offset > i_size_read(inode))
  551. i_size_write(inode, *offset);
  552. }
  553. }
  554. gossip_debug(GOSSIP_FILE_DEBUG,
  555. "%s(%pU): Value(%d) returned.\n",
  556. __func__,
  557. handle,
  558. (int)ret);
  559. return ret;
  560. }
  561. /** ORANGEFS2 implementation of address space operations */
  562. static const struct address_space_operations orangefs_address_operations = {
  563. .writepage = orangefs_writepage,
  564. .readahead = orangefs_readahead,
  565. .read_folio = orangefs_read_folio,
  566. .writepages = orangefs_writepages,
  567. .dirty_folio = filemap_dirty_folio,
  568. .write_begin = orangefs_write_begin,
  569. .write_end = orangefs_write_end,
  570. .invalidate_folio = orangefs_invalidate_folio,
  571. .release_folio = orangefs_release_folio,
  572. .free_folio = orangefs_free_folio,
  573. .launder_folio = orangefs_launder_folio,
  574. .direct_IO = orangefs_direct_IO,
  575. };
  576. vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
  577. {
  578. struct folio *folio = page_folio(vmf->page);
  579. struct inode *inode = file_inode(vmf->vma->vm_file);
  580. struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
  581. unsigned long *bitlock = &orangefs_inode->bitlock;
  582. vm_fault_t ret;
  583. struct orangefs_write_range *wr;
  584. sb_start_pagefault(inode->i_sb);
  585. if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) {
  586. ret = VM_FAULT_RETRY;
  587. goto out;
  588. }
  589. folio_lock(folio);
  590. if (folio_test_dirty(folio) && !folio_test_private(folio)) {
  591. /*
  592. * Should be impossible. If it happens, launder the folio
  593. * since we don't know what's dirty. This will WARN in
  594. * orangefs_writepage_locked.
  595. */
  596. if (orangefs_launder_folio(folio)) {
  597. ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
  598. goto out;
  599. }
  600. }
  601. if (folio_test_private(folio)) {
  602. wr = folio_get_private(folio);
  603. if (uid_eq(wr->uid, current_fsuid()) &&
  604. gid_eq(wr->gid, current_fsgid())) {
  605. wr->pos = page_offset(vmf->page);
  606. wr->len = PAGE_SIZE;
  607. goto okay;
  608. } else {
  609. if (orangefs_launder_folio(folio)) {
  610. ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
  611. goto out;
  612. }
  613. }
  614. }
  615. wr = kmalloc(sizeof *wr, GFP_KERNEL);
  616. if (!wr) {
  617. ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
  618. goto out;
  619. }
  620. wr->pos = page_offset(vmf->page);
  621. wr->len = PAGE_SIZE;
  622. wr->uid = current_fsuid();
  623. wr->gid = current_fsgid();
  624. folio_attach_private(folio, wr);
  625. okay:
  626. file_update_time(vmf->vma->vm_file);
  627. if (folio->mapping != inode->i_mapping) {
  628. folio_unlock(folio);
  629. ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
  630. goto out;
  631. }
  632. /*
  633. * We mark the folio dirty already here so that when freeze is in
  634. * progress, we are guaranteed that writeback during freezing will
  635. * see the dirty folio and writeprotect it again.
  636. */
  637. folio_mark_dirty(folio);
  638. folio_wait_stable(folio);
  639. ret = VM_FAULT_LOCKED;
  640. out:
  641. sb_end_pagefault(inode->i_sb);
  642. return ret;
  643. }
  644. static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
  645. {
  646. struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
  647. struct orangefs_kernel_op_s *new_op;
  648. loff_t orig_size;
  649. int ret = -EINVAL;
  650. gossip_debug(GOSSIP_INODE_DEBUG,
  651. "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
  652. __func__,
  653. get_khandle_from_ino(inode),
  654. &orangefs_inode->refn.khandle,
  655. orangefs_inode->refn.fs_id,
  656. iattr->ia_size);
  657. /* Ensure that we have a up to date size, so we know if it changed. */
  658. ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE);
  659. if (ret == -ESTALE)
  660. ret = -EIO;
  661. if (ret) {
  662. gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n",
  663. __func__, ret);
  664. return ret;
  665. }
  666. orig_size = i_size_read(inode);
  667. /* This is truncate_setsize in a different order. */
  668. truncate_pagecache(inode, iattr->ia_size);
  669. i_size_write(inode, iattr->ia_size);
  670. if (iattr->ia_size > orig_size)
  671. pagecache_isize_extended(inode, orig_size, iattr->ia_size);
  672. new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
  673. if (!new_op)
  674. return -ENOMEM;
  675. new_op->upcall.req.truncate.refn = orangefs_inode->refn;
  676. new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
  677. ret = service_operation(new_op,
  678. __func__,
  679. get_interruptible_flag(inode));
  680. /*
  681. * the truncate has no downcall members to retrieve, but
  682. * the status value tells us if it went through ok or not
  683. */
  684. gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret);
  685. op_release(new_op);
  686. if (ret != 0)
  687. return ret;
  688. if (orig_size != i_size_read(inode))
  689. iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
  690. return ret;
  691. }
  692. int __orangefs_setattr(struct inode *inode, struct iattr *iattr)
  693. {
  694. int ret;
  695. if (iattr->ia_valid & ATTR_MODE) {
  696. if (iattr->ia_mode & (S_ISVTX)) {
  697. if (is_root_handle(inode)) {
  698. /*
  699. * allow sticky bit to be set on root (since
  700. * it shows up that way by default anyhow),
  701. * but don't show it to the server
  702. */
  703. iattr->ia_mode -= S_ISVTX;
  704. } else {
  705. gossip_debug(GOSSIP_UTILS_DEBUG,
  706. "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
  707. ret = -EINVAL;
  708. goto out;
  709. }
  710. }
  711. if (iattr->ia_mode & (S_ISUID)) {
  712. gossip_debug(GOSSIP_UTILS_DEBUG,
  713. "Attempting to set setuid bit (not supported); returning EINVAL.\n");
  714. ret = -EINVAL;
  715. goto out;
  716. }
  717. }
  718. if (iattr->ia_valid & ATTR_SIZE) {
  719. ret = orangefs_setattr_size(inode, iattr);
  720. if (ret)
  721. goto out;
  722. }
  723. again:
  724. spin_lock(&inode->i_lock);
  725. if (ORANGEFS_I(inode)->attr_valid) {
  726. if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) &&
  727. gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) {
  728. ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
  729. } else {
  730. spin_unlock(&inode->i_lock);
  731. write_inode_now(inode, 1);
  732. goto again;
  733. }
  734. } else {
  735. ORANGEFS_I(inode)->attr_valid = iattr->ia_valid;
  736. ORANGEFS_I(inode)->attr_uid = current_fsuid();
  737. ORANGEFS_I(inode)->attr_gid = current_fsgid();
  738. }
  739. setattr_copy(&init_user_ns, inode, iattr);
  740. spin_unlock(&inode->i_lock);
  741. mark_inode_dirty(inode);
  742. if (iattr->ia_valid & ATTR_MODE)
  743. /* change mod on a file that has ACLs */
  744. ret = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
  745. ret = 0;
  746. out:
  747. return ret;
  748. }
  749. /*
  750. * Change attributes of an object referenced by dentry.
  751. */
  752. int orangefs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
  753. struct iattr *iattr)
  754. {
  755. int ret;
  756. gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n",
  757. dentry);
  758. ret = setattr_prepare(&init_user_ns, dentry, iattr);
  759. if (ret)
  760. goto out;
  761. ret = __orangefs_setattr(d_inode(dentry), iattr);
  762. sync_inode_metadata(d_inode(dentry), 1);
  763. out:
  764. gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n",
  765. ret);
  766. return ret;
  767. }
  768. /*
  769. * Obtain attributes of an object given a dentry
  770. */
  771. int orangefs_getattr(struct user_namespace *mnt_userns, const struct path *path,
  772. struct kstat *stat, u32 request_mask, unsigned int flags)
  773. {
  774. int ret;
  775. struct inode *inode = path->dentry->d_inode;
  776. gossip_debug(GOSSIP_INODE_DEBUG,
  777. "orangefs_getattr: called on %pd mask %u\n",
  778. path->dentry, request_mask);
  779. ret = orangefs_inode_getattr(inode,
  780. request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
  781. if (ret == 0) {
  782. generic_fillattr(&init_user_ns, inode, stat);
  783. /* override block size reported to stat */
  784. if (!(request_mask & STATX_SIZE))
  785. stat->result_mask &= ~STATX_SIZE;
  786. generic_fill_statx_attr(inode, stat);
  787. }
  788. return ret;
  789. }
  790. int orangefs_permission(struct user_namespace *mnt_userns,
  791. struct inode *inode, int mask)
  792. {
  793. int ret;
  794. if (mask & MAY_NOT_BLOCK)
  795. return -ECHILD;
  796. gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
  797. /* Make sure the permission (and other common attrs) are up to date. */
  798. ret = orangefs_inode_getattr(inode, 0);
  799. if (ret < 0)
  800. return ret;
  801. return generic_permission(&init_user_ns, inode, mask);
  802. }
  803. int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
  804. {
  805. struct iattr iattr;
  806. gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
  807. get_khandle_from_ino(inode));
  808. generic_update_time(inode, time, flags);
  809. memset(&iattr, 0, sizeof iattr);
  810. if (flags & S_ATIME)
  811. iattr.ia_valid |= ATTR_ATIME;
  812. if (flags & S_CTIME)
  813. iattr.ia_valid |= ATTR_CTIME;
  814. if (flags & S_MTIME)
  815. iattr.ia_valid |= ATTR_MTIME;
  816. return __orangefs_setattr(inode, &iattr);
  817. }
  818. static int orangefs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
  819. {
  820. u64 val = 0;
  821. int ret;
  822. gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__,
  823. dentry);
  824. ret = orangefs_inode_getxattr(d_inode(dentry),
  825. "user.pvfs2.meta_hint",
  826. &val, sizeof(val));
  827. if (ret < 0 && ret != -ENODATA)
  828. return ret;
  829. gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val);
  830. fileattr_fill_flags(fa, val);
  831. return 0;
  832. }
  833. static int orangefs_fileattr_set(struct user_namespace *mnt_userns,
  834. struct dentry *dentry, struct fileattr *fa)
  835. {
  836. u64 val = 0;
  837. gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__,
  838. dentry);
  839. /*
  840. * ORANGEFS_MIRROR_FL is set internally when the mirroring mode is
  841. * turned on for a file. The user is not allowed to turn on this bit,
  842. * but the bit is present if the user first gets the flags and then
  843. * updates the flags with some new settings. So, we ignore it in the
  844. * following edit. bligon.
  845. */
  846. if (fileattr_has_fsx(fa) ||
  847. (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL | ORANGEFS_MIRROR_FL))) {
  848. gossip_err("%s: only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n",
  849. __func__);
  850. return -EOPNOTSUPP;
  851. }
  852. val = fa->flags;
  853. gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val);
  854. return orangefs_inode_setxattr(d_inode(dentry),
  855. "user.pvfs2.meta_hint",
  856. &val, sizeof(val), 0);
  857. }
  858. /* ORANGEFS2 implementation of VFS inode operations for files */
  859. static const struct inode_operations orangefs_file_inode_operations = {
  860. .get_acl = orangefs_get_acl,
  861. .set_acl = orangefs_set_acl,
  862. .setattr = orangefs_setattr,
  863. .getattr = orangefs_getattr,
  864. .listxattr = orangefs_listxattr,
  865. .permission = orangefs_permission,
  866. .update_time = orangefs_update_time,
  867. .fileattr_get = orangefs_fileattr_get,
  868. .fileattr_set = orangefs_fileattr_set,
  869. };
  870. static int orangefs_init_iops(struct inode *inode)
  871. {
  872. inode->i_mapping->a_ops = &orangefs_address_operations;
  873. switch (inode->i_mode & S_IFMT) {
  874. case S_IFREG:
  875. inode->i_op = &orangefs_file_inode_operations;
  876. inode->i_fop = &orangefs_file_operations;
  877. break;
  878. case S_IFLNK:
  879. inode->i_op = &orangefs_symlink_inode_operations;
  880. break;
  881. case S_IFDIR:
  882. inode->i_op = &orangefs_dir_inode_operations;
  883. inode->i_fop = &orangefs_dir_operations;
  884. break;
  885. default:
  886. gossip_debug(GOSSIP_INODE_DEBUG,
  887. "%s: unsupported mode\n",
  888. __func__);
  889. return -EINVAL;
  890. }
  891. return 0;
  892. }
  893. /*
  894. * Given an ORANGEFS object identifier (fsid, handle), convert it into
  895. * a ino_t type that will be used as a hash-index from where the handle will
  896. * be searched for in the VFS hash table of inodes.
  897. */
  898. static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
  899. {
  900. if (!ref)
  901. return 0;
  902. return orangefs_khandle_to_ino(&(ref->khandle));
  903. }
  904. /*
  905. * Called to set up an inode from iget5_locked.
  906. */
  907. static int orangefs_set_inode(struct inode *inode, void *data)
  908. {
  909. struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
  910. ORANGEFS_I(inode)->refn.fs_id = ref->fs_id;
  911. ORANGEFS_I(inode)->refn.khandle = ref->khandle;
  912. ORANGEFS_I(inode)->attr_valid = 0;
  913. hash_init(ORANGEFS_I(inode)->xattr_cache);
  914. ORANGEFS_I(inode)->mapping_time = jiffies - 1;
  915. ORANGEFS_I(inode)->bitlock = 0;
  916. return 0;
  917. }
  918. /*
  919. * Called to determine if handles match.
  920. */
  921. static int orangefs_test_inode(struct inode *inode, void *data)
  922. {
  923. struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
  924. struct orangefs_inode_s *orangefs_inode = NULL;
  925. orangefs_inode = ORANGEFS_I(inode);
  926. /* test handles and fs_ids... */
  927. return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle),
  928. &(ref->khandle)) &&
  929. orangefs_inode->refn.fs_id == ref->fs_id);
  930. }
  931. /*
  932. * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
  933. * file handle.
  934. *
  935. * @sb: the file system super block instance.
  936. * @ref: The ORANGEFS object for which we are trying to locate an inode.
  937. */
  938. struct inode *orangefs_iget(struct super_block *sb,
  939. struct orangefs_object_kref *ref)
  940. {
  941. struct inode *inode = NULL;
  942. unsigned long hash;
  943. int error;
  944. hash = orangefs_handle_hash(ref);
  945. inode = iget5_locked(sb,
  946. hash,
  947. orangefs_test_inode,
  948. orangefs_set_inode,
  949. ref);
  950. if (!inode)
  951. return ERR_PTR(-ENOMEM);
  952. if (!(inode->i_state & I_NEW))
  953. return inode;
  954. error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
  955. if (error) {
  956. iget_failed(inode);
  957. return ERR_PTR(error);
  958. }
  959. inode->i_ino = hash; /* needed for stat etc */
  960. orangefs_init_iops(inode);
  961. unlock_new_inode(inode);
  962. gossip_debug(GOSSIP_INODE_DEBUG,
  963. "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
  964. &ref->khandle,
  965. ref->fs_id,
  966. hash,
  967. inode->i_ino);
  968. return inode;
  969. }
  970. /*
  971. * Allocate an inode for a newly created file and insert it into the inode hash.
  972. */
  973. struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
  974. int mode, dev_t dev, struct orangefs_object_kref *ref)
  975. {
  976. unsigned long hash = orangefs_handle_hash(ref);
  977. struct inode *inode;
  978. int error;
  979. gossip_debug(GOSSIP_INODE_DEBUG,
  980. "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
  981. __func__,
  982. sb,
  983. MAJOR(dev),
  984. MINOR(dev),
  985. mode);
  986. inode = new_inode(sb);
  987. if (!inode)
  988. return ERR_PTR(-ENOMEM);
  989. orangefs_set_inode(inode, ref);
  990. inode->i_ino = hash; /* needed for stat etc */
  991. error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW);
  992. if (error)
  993. goto out_iput;
  994. orangefs_init_iops(inode);
  995. inode->i_rdev = dev;
  996. error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
  997. if (error < 0)
  998. goto out_iput;
  999. gossip_debug(GOSSIP_INODE_DEBUG,
  1000. "Initializing ACL's for inode %pU\n",
  1001. get_khandle_from_ino(inode));
  1002. orangefs_init_acl(inode, dir);
  1003. return inode;
  1004. out_iput:
  1005. iput(inode);
  1006. return ERR_PTR(error);
  1007. }