xfs_iomap.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  4. * Copyright (c) 2016-2018 Christoph Hellwig.
  5. * All Rights Reserved.
  6. */
  7. #include "xfs.h"
  8. #include "xfs_fs.h"
  9. #include "xfs_shared.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_trans_resv.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_inode.h"
  15. #include "xfs_btree.h"
  16. #include "xfs_bmap_btree.h"
  17. #include "xfs_bmap.h"
  18. #include "xfs_bmap_util.h"
  19. #include "xfs_errortag.h"
  20. #include "xfs_error.h"
  21. #include "xfs_trans.h"
  22. #include "xfs_trans_space.h"
  23. #include "xfs_inode_item.h"
  24. #include "xfs_iomap.h"
  25. #include "xfs_trace.h"
  26. #include "xfs_quota.h"
  27. #include "xfs_dquot_item.h"
  28. #include "xfs_dquot.h"
  29. #include "xfs_reflink.h"
  30. #define XFS_ALLOC_ALIGN(mp, off) \
  31. (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
  32. static int
  33. xfs_alert_fsblock_zero(
  34. xfs_inode_t *ip,
  35. xfs_bmbt_irec_t *imap)
  36. {
  37. xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
  38. "Access to block zero in inode %llu "
  39. "start_block: %llx start_off: %llx "
  40. "blkcnt: %llx extent-state: %x",
  41. (unsigned long long)ip->i_ino,
  42. (unsigned long long)imap->br_startblock,
  43. (unsigned long long)imap->br_startoff,
  44. (unsigned long long)imap->br_blockcount,
  45. imap->br_state);
  46. return -EFSCORRUPTED;
  47. }
  48. int
  49. xfs_bmbt_to_iomap(
  50. struct xfs_inode *ip,
  51. struct iomap *iomap,
  52. struct xfs_bmbt_irec *imap,
  53. unsigned int mapping_flags,
  54. u16 iomap_flags)
  55. {
  56. struct xfs_mount *mp = ip->i_mount;
  57. struct xfs_buftarg *target = xfs_inode_buftarg(ip);
  58. if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
  59. return xfs_alert_fsblock_zero(ip, imap);
  60. if (imap->br_startblock == HOLESTARTBLOCK) {
  61. iomap->addr = IOMAP_NULL_ADDR;
  62. iomap->type = IOMAP_HOLE;
  63. } else if (imap->br_startblock == DELAYSTARTBLOCK ||
  64. isnullstartblock(imap->br_startblock)) {
  65. iomap->addr = IOMAP_NULL_ADDR;
  66. iomap->type = IOMAP_DELALLOC;
  67. } else {
  68. iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
  69. if (mapping_flags & IOMAP_DAX)
  70. iomap->addr += target->bt_dax_part_off;
  71. if (imap->br_state == XFS_EXT_UNWRITTEN)
  72. iomap->type = IOMAP_UNWRITTEN;
  73. else
  74. iomap->type = IOMAP_MAPPED;
  75. }
  76. iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
  77. iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
  78. if (mapping_flags & IOMAP_DAX)
  79. iomap->dax_dev = target->bt_daxdev;
  80. else
  81. iomap->bdev = target->bt_bdev;
  82. iomap->flags = iomap_flags;
  83. if (xfs_ipincount(ip) &&
  84. (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
  85. iomap->flags |= IOMAP_F_DIRTY;
  86. return 0;
  87. }
  88. static void
  89. xfs_hole_to_iomap(
  90. struct xfs_inode *ip,
  91. struct iomap *iomap,
  92. xfs_fileoff_t offset_fsb,
  93. xfs_fileoff_t end_fsb)
  94. {
  95. struct xfs_buftarg *target = xfs_inode_buftarg(ip);
  96. iomap->addr = IOMAP_NULL_ADDR;
  97. iomap->type = IOMAP_HOLE;
  98. iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
  99. iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
  100. iomap->bdev = target->bt_bdev;
  101. iomap->dax_dev = target->bt_daxdev;
  102. }
  103. static inline xfs_fileoff_t
  104. xfs_iomap_end_fsb(
  105. struct xfs_mount *mp,
  106. loff_t offset,
  107. loff_t count)
  108. {
  109. ASSERT(offset <= mp->m_super->s_maxbytes);
  110. return min(XFS_B_TO_FSB(mp, offset + count),
  111. XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
  112. }
  113. static xfs_extlen_t
  114. xfs_eof_alignment(
  115. struct xfs_inode *ip)
  116. {
  117. struct xfs_mount *mp = ip->i_mount;
  118. xfs_extlen_t align = 0;
  119. if (!XFS_IS_REALTIME_INODE(ip)) {
  120. /*
  121. * Round up the allocation request to a stripe unit
  122. * (m_dalign) boundary if the file size is >= stripe unit
  123. * size, and we are allocating past the allocation eof.
  124. *
  125. * If mounted with the "-o swalloc" option the alignment is
  126. * increased from the strip unit size to the stripe width.
  127. */
  128. if (mp->m_swidth && xfs_has_swalloc(mp))
  129. align = mp->m_swidth;
  130. else if (mp->m_dalign)
  131. align = mp->m_dalign;
  132. if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
  133. align = 0;
  134. }
  135. return align;
  136. }
  137. /*
  138. * Check if last_fsb is outside the last extent, and if so grow it to the next
  139. * stripe unit boundary.
  140. */
  141. xfs_fileoff_t
  142. xfs_iomap_eof_align_last_fsb(
  143. struct xfs_inode *ip,
  144. xfs_fileoff_t end_fsb)
  145. {
  146. struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
  147. xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
  148. xfs_extlen_t align = xfs_eof_alignment(ip);
  149. struct xfs_bmbt_irec irec;
  150. struct xfs_iext_cursor icur;
  151. ASSERT(!xfs_need_iread_extents(ifp));
  152. /*
  153. * Always round up the allocation request to the extent hint boundary.
  154. */
  155. if (extsz) {
  156. if (align)
  157. align = roundup_64(align, extsz);
  158. else
  159. align = extsz;
  160. }
  161. if (align) {
  162. xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
  163. xfs_iext_last(ifp, &icur);
  164. if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
  165. aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
  166. return aligned_end_fsb;
  167. }
  168. return end_fsb;
  169. }
  170. int
  171. xfs_iomap_write_direct(
  172. struct xfs_inode *ip,
  173. xfs_fileoff_t offset_fsb,
  174. xfs_fileoff_t count_fsb,
  175. unsigned int flags,
  176. struct xfs_bmbt_irec *imap)
  177. {
  178. struct xfs_mount *mp = ip->i_mount;
  179. struct xfs_trans *tp;
  180. xfs_filblks_t resaligned;
  181. int nimaps;
  182. unsigned int dblocks, rblocks;
  183. bool force = false;
  184. int error;
  185. int bmapi_flags = XFS_BMAPI_PREALLOC;
  186. int nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
  187. ASSERT(count_fsb > 0);
  188. resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
  189. xfs_get_extsz_hint(ip));
  190. if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
  191. dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  192. rblocks = resaligned;
  193. } else {
  194. dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
  195. rblocks = 0;
  196. }
  197. error = xfs_qm_dqattach(ip);
  198. if (error)
  199. return error;
  200. /*
  201. * For DAX, we do not allocate unwritten extents, but instead we zero
  202. * the block before we commit the transaction. Ideally we'd like to do
  203. * this outside the transaction context, but if we commit and then crash
  204. * we may not have zeroed the blocks and this will be exposed on
  205. * recovery of the allocation. Hence we must zero before commit.
  206. *
  207. * Further, if we are mapping unwritten extents here, we need to zero
  208. * and convert them to written so that we don't need an unwritten extent
  209. * callback for DAX. This also means that we need to be able to dip into
  210. * the reserve block pool for bmbt block allocation if there is no space
  211. * left but we need to do unwritten extent conversion.
  212. */
  213. if (flags & IOMAP_DAX) {
  214. bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
  215. if (imap->br_state == XFS_EXT_UNWRITTEN) {
  216. force = true;
  217. nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
  218. dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
  219. }
  220. }
  221. error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
  222. rblocks, force, &tp);
  223. if (error)
  224. return error;
  225. error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, nr_exts);
  226. if (error == -EFBIG)
  227. error = xfs_iext_count_upgrade(tp, ip, nr_exts);
  228. if (error)
  229. goto out_trans_cancel;
  230. /*
  231. * From this point onwards we overwrite the imap pointer that the
  232. * caller gave to us.
  233. */
  234. nimaps = 1;
  235. error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
  236. imap, &nimaps);
  237. if (error)
  238. goto out_trans_cancel;
  239. /*
  240. * Complete the transaction
  241. */
  242. error = xfs_trans_commit(tp);
  243. if (error)
  244. goto out_unlock;
  245. /*
  246. * Copy any maps to caller's array and return any error.
  247. */
  248. if (nimaps == 0) {
  249. error = -ENOSPC;
  250. goto out_unlock;
  251. }
  252. if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
  253. error = xfs_alert_fsblock_zero(ip, imap);
  254. out_unlock:
  255. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  256. return error;
  257. out_trans_cancel:
  258. xfs_trans_cancel(tp);
  259. goto out_unlock;
  260. }
  261. STATIC bool
  262. xfs_quota_need_throttle(
  263. struct xfs_inode *ip,
  264. xfs_dqtype_t type,
  265. xfs_fsblock_t alloc_blocks)
  266. {
  267. struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
  268. if (!dq || !xfs_this_quota_on(ip->i_mount, type))
  269. return false;
  270. /* no hi watermark, no throttle */
  271. if (!dq->q_prealloc_hi_wmark)
  272. return false;
  273. /* under the lo watermark, no throttle */
  274. if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
  275. return false;
  276. return true;
  277. }
  278. STATIC void
  279. xfs_quota_calc_throttle(
  280. struct xfs_inode *ip,
  281. xfs_dqtype_t type,
  282. xfs_fsblock_t *qblocks,
  283. int *qshift,
  284. int64_t *qfreesp)
  285. {
  286. struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
  287. int64_t freesp;
  288. int shift = 0;
  289. /* no dq, or over hi wmark, squash the prealloc completely */
  290. if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
  291. *qblocks = 0;
  292. *qfreesp = 0;
  293. return;
  294. }
  295. freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
  296. if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
  297. shift = 2;
  298. if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
  299. shift += 2;
  300. if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
  301. shift += 2;
  302. }
  303. if (freesp < *qfreesp)
  304. *qfreesp = freesp;
  305. /* only overwrite the throttle values if we are more aggressive */
  306. if ((freesp >> shift) < (*qblocks >> *qshift)) {
  307. *qblocks = freesp;
  308. *qshift = shift;
  309. }
  310. }
  311. /*
  312. * If we don't have a user specified preallocation size, dynamically increase
  313. * the preallocation size as the size of the file grows. Cap the maximum size
  314. * at a single extent or less if the filesystem is near full. The closer the
  315. * filesystem is to being full, the smaller the maximum preallocation.
  316. */
  317. STATIC xfs_fsblock_t
  318. xfs_iomap_prealloc_size(
  319. struct xfs_inode *ip,
  320. int whichfork,
  321. loff_t offset,
  322. loff_t count,
  323. struct xfs_iext_cursor *icur)
  324. {
  325. struct xfs_iext_cursor ncur = *icur;
  326. struct xfs_bmbt_irec prev, got;
  327. struct xfs_mount *mp = ip->i_mount;
  328. struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
  329. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  330. int64_t freesp;
  331. xfs_fsblock_t qblocks;
  332. xfs_fsblock_t alloc_blocks = 0;
  333. xfs_extlen_t plen;
  334. int shift = 0;
  335. int qshift = 0;
  336. /*
  337. * As an exception we don't do any preallocation at all if the file is
  338. * smaller than the minimum preallocation and we are using the default
  339. * dynamic preallocation scheme, as it is likely this is the only write
  340. * to the file that is going to be done.
  341. */
  342. if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
  343. return 0;
  344. /*
  345. * Use the minimum preallocation size for small files or if we are
  346. * writing right after a hole.
  347. */
  348. if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
  349. !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
  350. prev.br_startoff + prev.br_blockcount < offset_fsb)
  351. return mp->m_allocsize_blocks;
  352. /*
  353. * Take the size of the preceding data extents as the basis for the
  354. * preallocation size. Note that we don't care if the previous extents
  355. * are written or not.
  356. */
  357. plen = prev.br_blockcount;
  358. while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
  359. if (plen > XFS_MAX_BMBT_EXTLEN / 2 ||
  360. isnullstartblock(got.br_startblock) ||
  361. got.br_startoff + got.br_blockcount != prev.br_startoff ||
  362. got.br_startblock + got.br_blockcount != prev.br_startblock)
  363. break;
  364. plen += got.br_blockcount;
  365. prev = got;
  366. }
  367. /*
  368. * If the size of the extents is greater than half the maximum extent
  369. * length, then use the current offset as the basis. This ensures that
  370. * for large files the preallocation size always extends to
  371. * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
  372. * unit/width alignment of real extents.
  373. */
  374. alloc_blocks = plen * 2;
  375. if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
  376. alloc_blocks = XFS_B_TO_FSB(mp, offset);
  377. qblocks = alloc_blocks;
  378. /*
  379. * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
  380. * down to the nearest power of two value after throttling. To prevent
  381. * the round down from unconditionally reducing the maximum supported
  382. * prealloc size, we round up first, apply appropriate throttling, round
  383. * down and cap the value to XFS_BMBT_MAX_EXTLEN.
  384. */
  385. alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN),
  386. alloc_blocks);
  387. freesp = percpu_counter_read_positive(&mp->m_fdblocks);
  388. if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
  389. shift = 2;
  390. if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
  391. shift++;
  392. if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
  393. shift++;
  394. if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
  395. shift++;
  396. if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
  397. shift++;
  398. }
  399. /*
  400. * Check each quota to cap the prealloc size, provide a shift value to
  401. * throttle with and adjust amount of available space.
  402. */
  403. if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
  404. xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
  405. &freesp);
  406. if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
  407. xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
  408. &freesp);
  409. if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
  410. xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
  411. &freesp);
  412. /*
  413. * The final prealloc size is set to the minimum of free space available
  414. * in each of the quotas and the overall filesystem.
  415. *
  416. * The shift throttle value is set to the maximum value as determined by
  417. * the global low free space values and per-quota low free space values.
  418. */
  419. alloc_blocks = min(alloc_blocks, qblocks);
  420. shift = max(shift, qshift);
  421. if (shift)
  422. alloc_blocks >>= shift;
  423. /*
  424. * rounddown_pow_of_two() returns an undefined result if we pass in
  425. * alloc_blocks = 0.
  426. */
  427. if (alloc_blocks)
  428. alloc_blocks = rounddown_pow_of_two(alloc_blocks);
  429. if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
  430. alloc_blocks = XFS_MAX_BMBT_EXTLEN;
  431. /*
  432. * If we are still trying to allocate more space than is
  433. * available, squash the prealloc hard. This can happen if we
  434. * have a large file on a small filesystem and the above
  435. * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
  436. */
  437. while (alloc_blocks && alloc_blocks >= freesp)
  438. alloc_blocks >>= 4;
  439. if (alloc_blocks < mp->m_allocsize_blocks)
  440. alloc_blocks = mp->m_allocsize_blocks;
  441. trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
  442. mp->m_allocsize_blocks);
  443. return alloc_blocks;
  444. }
  445. int
  446. xfs_iomap_write_unwritten(
  447. xfs_inode_t *ip,
  448. xfs_off_t offset,
  449. xfs_off_t count,
  450. bool update_isize)
  451. {
  452. xfs_mount_t *mp = ip->i_mount;
  453. xfs_fileoff_t offset_fsb;
  454. xfs_filblks_t count_fsb;
  455. xfs_filblks_t numblks_fsb;
  456. int nimaps;
  457. xfs_trans_t *tp;
  458. xfs_bmbt_irec_t imap;
  459. struct inode *inode = VFS_I(ip);
  460. xfs_fsize_t i_size;
  461. uint resblks;
  462. int error;
  463. trace_xfs_unwritten_convert(ip, offset, count);
  464. offset_fsb = XFS_B_TO_FSBT(mp, offset);
  465. count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
  466. count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
  467. /*
  468. * Reserve enough blocks in this transaction for two complete extent
  469. * btree splits. We may be converting the middle part of an unwritten
  470. * extent and in this case we will insert two new extents in the btree
  471. * each of which could cause a full split.
  472. *
  473. * This reservation amount will be used in the first call to
  474. * xfs_bmbt_split() to select an AG with enough space to satisfy the
  475. * rest of the operation.
  476. */
  477. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
  478. /* Attach dquots so that bmbt splits are accounted correctly. */
  479. error = xfs_qm_dqattach(ip);
  480. if (error)
  481. return error;
  482. do {
  483. /*
  484. * Set up a transaction to convert the range of extents
  485. * from unwritten to real. Do allocations in a loop until
  486. * we have covered the range passed in.
  487. *
  488. * Note that we can't risk to recursing back into the filesystem
  489. * here as we might be asked to write out the same inode that we
  490. * complete here and might deadlock on the iolock.
  491. */
  492. error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
  493. 0, true, &tp);
  494. if (error)
  495. return error;
  496. error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
  497. XFS_IEXT_WRITE_UNWRITTEN_CNT);
  498. if (error == -EFBIG)
  499. error = xfs_iext_count_upgrade(tp, ip,
  500. XFS_IEXT_WRITE_UNWRITTEN_CNT);
  501. if (error)
  502. goto error_on_bmapi_transaction;
  503. /*
  504. * Modify the unwritten extent state of the buffer.
  505. */
  506. nimaps = 1;
  507. error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
  508. XFS_BMAPI_CONVERT, resblks, &imap,
  509. &nimaps);
  510. if (error)
  511. goto error_on_bmapi_transaction;
  512. /*
  513. * Log the updated inode size as we go. We have to be careful
  514. * to only log it up to the actual write offset if it is
  515. * halfway into a block.
  516. */
  517. i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
  518. if (i_size > offset + count)
  519. i_size = offset + count;
  520. if (update_isize && i_size > i_size_read(inode))
  521. i_size_write(inode, i_size);
  522. i_size = xfs_new_eof(ip, i_size);
  523. if (i_size) {
  524. ip->i_disk_size = i_size;
  525. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  526. }
  527. error = xfs_trans_commit(tp);
  528. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  529. if (error)
  530. return error;
  531. if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock)))
  532. return xfs_alert_fsblock_zero(ip, &imap);
  533. if ((numblks_fsb = imap.br_blockcount) == 0) {
  534. /*
  535. * The numblks_fsb value should always get
  536. * smaller, otherwise the loop is stuck.
  537. */
  538. ASSERT(imap.br_blockcount);
  539. break;
  540. }
  541. offset_fsb += numblks_fsb;
  542. count_fsb -= numblks_fsb;
  543. } while (count_fsb > 0);
  544. return 0;
  545. error_on_bmapi_transaction:
  546. xfs_trans_cancel(tp);
  547. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  548. return error;
  549. }
  550. static inline bool
  551. imap_needs_alloc(
  552. struct inode *inode,
  553. unsigned flags,
  554. struct xfs_bmbt_irec *imap,
  555. int nimaps)
  556. {
  557. /* don't allocate blocks when just zeroing */
  558. if (flags & IOMAP_ZERO)
  559. return false;
  560. if (!nimaps ||
  561. imap->br_startblock == HOLESTARTBLOCK ||
  562. imap->br_startblock == DELAYSTARTBLOCK)
  563. return true;
  564. /* we convert unwritten extents before copying the data for DAX */
  565. if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
  566. return true;
  567. return false;
  568. }
  569. static inline bool
  570. imap_needs_cow(
  571. struct xfs_inode *ip,
  572. unsigned int flags,
  573. struct xfs_bmbt_irec *imap,
  574. int nimaps)
  575. {
  576. if (!xfs_is_cow_inode(ip))
  577. return false;
  578. /* when zeroing we don't have to COW holes or unwritten extents */
  579. if (flags & IOMAP_ZERO) {
  580. if (!nimaps ||
  581. imap->br_startblock == HOLESTARTBLOCK ||
  582. imap->br_state == XFS_EXT_UNWRITTEN)
  583. return false;
  584. }
  585. return true;
  586. }
  587. static int
  588. xfs_ilock_for_iomap(
  589. struct xfs_inode *ip,
  590. unsigned flags,
  591. unsigned *lockmode)
  592. {
  593. unsigned int mode = *lockmode;
  594. bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
  595. /*
  596. * COW writes may allocate delalloc space or convert unwritten COW
  597. * extents, so we need to make sure to take the lock exclusively here.
  598. */
  599. if (xfs_is_cow_inode(ip) && is_write)
  600. mode = XFS_ILOCK_EXCL;
  601. /*
  602. * Extents not yet cached requires exclusive access, don't block. This
  603. * is an opencoded xfs_ilock_data_map_shared() call but with
  604. * non-blocking behaviour.
  605. */
  606. if (xfs_need_iread_extents(&ip->i_df)) {
  607. if (flags & IOMAP_NOWAIT)
  608. return -EAGAIN;
  609. mode = XFS_ILOCK_EXCL;
  610. }
  611. relock:
  612. if (flags & IOMAP_NOWAIT) {
  613. if (!xfs_ilock_nowait(ip, mode))
  614. return -EAGAIN;
  615. } else {
  616. xfs_ilock(ip, mode);
  617. }
  618. /*
  619. * The reflink iflag could have changed since the earlier unlocked
  620. * check, so if we got ILOCK_SHARED for a write and but we're now a
  621. * reflink inode we have to switch to ILOCK_EXCL and relock.
  622. */
  623. if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
  624. xfs_iunlock(ip, mode);
  625. mode = XFS_ILOCK_EXCL;
  626. goto relock;
  627. }
  628. *lockmode = mode;
  629. return 0;
  630. }
  631. /*
  632. * Check that the imap we are going to return to the caller spans the entire
  633. * range that the caller requested for the IO.
  634. */
  635. static bool
  636. imap_spans_range(
  637. struct xfs_bmbt_irec *imap,
  638. xfs_fileoff_t offset_fsb,
  639. xfs_fileoff_t end_fsb)
  640. {
  641. if (imap->br_startoff > offset_fsb)
  642. return false;
  643. if (imap->br_startoff + imap->br_blockcount < end_fsb)
  644. return false;
  645. return true;
  646. }
  647. static int
  648. xfs_direct_write_iomap_begin(
  649. struct inode *inode,
  650. loff_t offset,
  651. loff_t length,
  652. unsigned flags,
  653. struct iomap *iomap,
  654. struct iomap *srcmap)
  655. {
  656. struct xfs_inode *ip = XFS_I(inode);
  657. struct xfs_mount *mp = ip->i_mount;
  658. struct xfs_bmbt_irec imap, cmap;
  659. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  660. xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
  661. int nimaps = 1, error = 0;
  662. bool shared = false;
  663. u16 iomap_flags = 0;
  664. unsigned int lockmode = XFS_ILOCK_SHARED;
  665. ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
  666. if (xfs_is_shutdown(mp))
  667. return -EIO;
  668. /*
  669. * Writes that span EOF might trigger an IO size update on completion,
  670. * so consider them to be dirty for the purposes of O_DSYNC even if
  671. * there is no other metadata changes pending or have been made here.
  672. */
  673. if (offset + length > i_size_read(inode))
  674. iomap_flags |= IOMAP_F_DIRTY;
  675. error = xfs_ilock_for_iomap(ip, flags, &lockmode);
  676. if (error)
  677. return error;
  678. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
  679. &nimaps, 0);
  680. if (error)
  681. goto out_unlock;
  682. if (imap_needs_cow(ip, flags, &imap, nimaps)) {
  683. error = -EAGAIN;
  684. if (flags & IOMAP_NOWAIT)
  685. goto out_unlock;
  686. /* may drop and re-acquire the ilock */
  687. error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
  688. &lockmode,
  689. (flags & IOMAP_DIRECT) || IS_DAX(inode));
  690. if (error)
  691. goto out_unlock;
  692. if (shared)
  693. goto out_found_cow;
  694. end_fsb = imap.br_startoff + imap.br_blockcount;
  695. length = XFS_FSB_TO_B(mp, end_fsb) - offset;
  696. }
  697. if (imap_needs_alloc(inode, flags, &imap, nimaps))
  698. goto allocate_blocks;
  699. /*
  700. * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
  701. * a single map so that we avoid partial IO failures due to the rest of
  702. * the I/O range not covered by this map triggering an EAGAIN condition
  703. * when it is subsequently mapped and aborting the I/O.
  704. */
  705. if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
  706. error = -EAGAIN;
  707. if (!imap_spans_range(&imap, offset_fsb, end_fsb))
  708. goto out_unlock;
  709. }
  710. /*
  711. * For overwrite only I/O, we cannot convert unwritten extents without
  712. * requiring sub-block zeroing. This can only be done under an
  713. * exclusive IOLOCK, hence return -EAGAIN if this is not a written
  714. * extent to tell the caller to try again.
  715. */
  716. if (flags & IOMAP_OVERWRITE_ONLY) {
  717. error = -EAGAIN;
  718. if (imap.br_state != XFS_EXT_NORM &&
  719. ((offset | length) & mp->m_blockmask))
  720. goto out_unlock;
  721. }
  722. xfs_iunlock(ip, lockmode);
  723. trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
  724. return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags);
  725. allocate_blocks:
  726. error = -EAGAIN;
  727. if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
  728. goto out_unlock;
  729. /*
  730. * We cap the maximum length we map to a sane size to keep the chunks
  731. * of work done where somewhat symmetric with the work writeback does.
  732. * This is a completely arbitrary number pulled out of thin air as a
  733. * best guess for initial testing.
  734. *
  735. * Note that the values needs to be less than 32-bits wide until the
  736. * lower level functions are updated.
  737. */
  738. length = min_t(loff_t, length, 1024 * PAGE_SIZE);
  739. end_fsb = xfs_iomap_end_fsb(mp, offset, length);
  740. if (offset + length > XFS_ISIZE(ip))
  741. end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
  742. else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
  743. end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
  744. xfs_iunlock(ip, lockmode);
  745. error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
  746. flags, &imap);
  747. if (error)
  748. return error;
  749. trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
  750. return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
  751. iomap_flags | IOMAP_F_NEW);
  752. out_found_cow:
  753. xfs_iunlock(ip, lockmode);
  754. length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
  755. trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
  756. if (imap.br_startblock != HOLESTARTBLOCK) {
  757. error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
  758. if (error)
  759. return error;
  760. }
  761. return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED);
  762. out_unlock:
  763. if (lockmode)
  764. xfs_iunlock(ip, lockmode);
  765. return error;
  766. }
  767. const struct iomap_ops xfs_direct_write_iomap_ops = {
  768. .iomap_begin = xfs_direct_write_iomap_begin,
  769. };
  770. static int
  771. xfs_dax_write_iomap_end(
  772. struct inode *inode,
  773. loff_t pos,
  774. loff_t length,
  775. ssize_t written,
  776. unsigned flags,
  777. struct iomap *iomap)
  778. {
  779. struct xfs_inode *ip = XFS_I(inode);
  780. if (!xfs_is_cow_inode(ip))
  781. return 0;
  782. if (!written) {
  783. xfs_reflink_cancel_cow_range(ip, pos, length, true);
  784. return 0;
  785. }
  786. return xfs_reflink_end_cow(ip, pos, written);
  787. }
  788. const struct iomap_ops xfs_dax_write_iomap_ops = {
  789. .iomap_begin = xfs_direct_write_iomap_begin,
  790. .iomap_end = xfs_dax_write_iomap_end,
  791. };
  792. static int
  793. xfs_buffered_write_iomap_begin(
  794. struct inode *inode,
  795. loff_t offset,
  796. loff_t count,
  797. unsigned flags,
  798. struct iomap *iomap,
  799. struct iomap *srcmap)
  800. {
  801. struct xfs_inode *ip = XFS_I(inode);
  802. struct xfs_mount *mp = ip->i_mount;
  803. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  804. xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
  805. struct xfs_bmbt_irec imap, cmap;
  806. struct xfs_iext_cursor icur, ccur;
  807. xfs_fsblock_t prealloc_blocks = 0;
  808. bool eof = false, cow_eof = false, shared = false;
  809. int allocfork = XFS_DATA_FORK;
  810. int error = 0;
  811. unsigned int lockmode = XFS_ILOCK_EXCL;
  812. if (xfs_is_shutdown(mp))
  813. return -EIO;
  814. /* we can't use delayed allocations when using extent size hints */
  815. if (xfs_get_extsz_hint(ip))
  816. return xfs_direct_write_iomap_begin(inode, offset, count,
  817. flags, iomap, srcmap);
  818. ASSERT(!XFS_IS_REALTIME_INODE(ip));
  819. error = xfs_ilock_for_iomap(ip, flags, &lockmode);
  820. if (error)
  821. return error;
  822. if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
  823. XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
  824. error = -EFSCORRUPTED;
  825. goto out_unlock;
  826. }
  827. XFS_STATS_INC(mp, xs_blk_mapw);
  828. error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
  829. if (error)
  830. goto out_unlock;
  831. /*
  832. * Search the data fork first to look up our source mapping. We
  833. * always need the data fork map, as we have to return it to the
  834. * iomap code so that the higher level write code can read data in to
  835. * perform read-modify-write cycles for unaligned writes.
  836. */
  837. eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
  838. if (eof)
  839. imap.br_startoff = end_fsb; /* fake hole until the end */
  840. /* We never need to allocate blocks for zeroing a hole. */
  841. if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
  842. xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
  843. goto out_unlock;
  844. }
  845. /*
  846. * Search the COW fork extent list even if we did not find a data fork
  847. * extent. This serves two purposes: first this implements the
  848. * speculative preallocation using cowextsize, so that we also unshare
  849. * block adjacent to shared blocks instead of just the shared blocks
  850. * themselves. Second the lookup in the extent list is generally faster
  851. * than going out to the shared extent tree.
  852. */
  853. if (xfs_is_cow_inode(ip)) {
  854. if (!ip->i_cowfp) {
  855. ASSERT(!xfs_is_reflink_inode(ip));
  856. xfs_ifork_init_cow(ip);
  857. }
  858. cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
  859. &ccur, &cmap);
  860. if (!cow_eof && cmap.br_startoff <= offset_fsb) {
  861. trace_xfs_reflink_cow_found(ip, &cmap);
  862. goto found_cow;
  863. }
  864. }
  865. if (imap.br_startoff <= offset_fsb) {
  866. /*
  867. * For reflink files we may need a delalloc reservation when
  868. * overwriting shared extents. This includes zeroing of
  869. * existing extents that contain data.
  870. */
  871. if (!xfs_is_cow_inode(ip) ||
  872. ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
  873. trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
  874. &imap);
  875. goto found_imap;
  876. }
  877. xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
  878. /* Trim the mapping to the nearest shared extent boundary. */
  879. error = xfs_bmap_trim_cow(ip, &imap, &shared);
  880. if (error)
  881. goto out_unlock;
  882. /* Not shared? Just report the (potentially capped) extent. */
  883. if (!shared) {
  884. trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
  885. &imap);
  886. goto found_imap;
  887. }
  888. /*
  889. * Fork all the shared blocks from our write offset until the
  890. * end of the extent.
  891. */
  892. allocfork = XFS_COW_FORK;
  893. end_fsb = imap.br_startoff + imap.br_blockcount;
  894. } else {
  895. /*
  896. * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
  897. * pages to keep the chunks of work done where somewhat
  898. * symmetric with the work writeback does. This is a completely
  899. * arbitrary number pulled out of thin air.
  900. *
  901. * Note that the values needs to be less than 32-bits wide until
  902. * the lower level functions are updated.
  903. */
  904. count = min_t(loff_t, count, 1024 * PAGE_SIZE);
  905. end_fsb = xfs_iomap_end_fsb(mp, offset, count);
  906. if (xfs_is_always_cow_inode(ip))
  907. allocfork = XFS_COW_FORK;
  908. }
  909. error = xfs_qm_dqattach_locked(ip, false);
  910. if (error)
  911. goto out_unlock;
  912. if (eof && offset + count > XFS_ISIZE(ip)) {
  913. /*
  914. * Determine the initial size of the preallocation.
  915. * We clean up any extra preallocation when the file is closed.
  916. */
  917. if (xfs_has_allocsize(mp))
  918. prealloc_blocks = mp->m_allocsize_blocks;
  919. else
  920. prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
  921. offset, count, &icur);
  922. if (prealloc_blocks) {
  923. xfs_extlen_t align;
  924. xfs_off_t end_offset;
  925. xfs_fileoff_t p_end_fsb;
  926. end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
  927. p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
  928. prealloc_blocks;
  929. align = xfs_eof_alignment(ip);
  930. if (align)
  931. p_end_fsb = roundup_64(p_end_fsb, align);
  932. p_end_fsb = min(p_end_fsb,
  933. XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
  934. ASSERT(p_end_fsb > offset_fsb);
  935. prealloc_blocks = p_end_fsb - end_fsb;
  936. }
  937. }
  938. retry:
  939. error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
  940. end_fsb - offset_fsb, prealloc_blocks,
  941. allocfork == XFS_DATA_FORK ? &imap : &cmap,
  942. allocfork == XFS_DATA_FORK ? &icur : &ccur,
  943. allocfork == XFS_DATA_FORK ? eof : cow_eof);
  944. switch (error) {
  945. case 0:
  946. break;
  947. case -ENOSPC:
  948. case -EDQUOT:
  949. /* retry without any preallocation */
  950. trace_xfs_delalloc_enospc(ip, offset, count);
  951. if (prealloc_blocks) {
  952. prealloc_blocks = 0;
  953. goto retry;
  954. }
  955. fallthrough;
  956. default:
  957. goto out_unlock;
  958. }
  959. if (allocfork == XFS_COW_FORK) {
  960. trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
  961. goto found_cow;
  962. }
  963. /*
  964. * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
  965. * them out if the write happens to fail.
  966. */
  967. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  968. trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
  969. return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW);
  970. found_imap:
  971. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  972. return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
  973. found_cow:
  974. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  975. if (imap.br_startoff <= offset_fsb) {
  976. error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
  977. if (error)
  978. return error;
  979. return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
  980. IOMAP_F_SHARED);
  981. }
  982. xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
  983. return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0);
  984. out_unlock:
  985. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  986. return error;
  987. }
  988. static int
  989. xfs_buffered_write_iomap_end(
  990. struct inode *inode,
  991. loff_t offset,
  992. loff_t length,
  993. ssize_t written,
  994. unsigned flags,
  995. struct iomap *iomap)
  996. {
  997. struct xfs_inode *ip = XFS_I(inode);
  998. struct xfs_mount *mp = ip->i_mount;
  999. xfs_fileoff_t start_fsb;
  1000. xfs_fileoff_t end_fsb;
  1001. int error = 0;
  1002. if (iomap->type != IOMAP_DELALLOC)
  1003. return 0;
  1004. /*
  1005. * Behave as if the write failed if drop writes is enabled. Set the NEW
  1006. * flag to force delalloc cleanup.
  1007. */
  1008. if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
  1009. iomap->flags |= IOMAP_F_NEW;
  1010. written = 0;
  1011. }
  1012. /*
  1013. * start_fsb refers to the first unused block after a short write. If
  1014. * nothing was written, round offset down to point at the first block in
  1015. * the range.
  1016. */
  1017. if (unlikely(!written))
  1018. start_fsb = XFS_B_TO_FSBT(mp, offset);
  1019. else
  1020. start_fsb = XFS_B_TO_FSB(mp, offset + written);
  1021. end_fsb = XFS_B_TO_FSB(mp, offset + length);
  1022. /*
  1023. * Trim delalloc blocks if they were allocated by this write and we
  1024. * didn't manage to write the whole range.
  1025. *
  1026. * We don't need to care about racing delalloc as we hold i_mutex
  1027. * across the reserve/allocate/unreserve calls. If there are delalloc
  1028. * blocks in the range, they are ours.
  1029. */
  1030. if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
  1031. truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
  1032. XFS_FSB_TO_B(mp, end_fsb) - 1);
  1033. error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
  1034. end_fsb - start_fsb);
  1035. if (error && !xfs_is_shutdown(mp)) {
  1036. xfs_alert(mp, "%s: unable to clean up ino %lld",
  1037. __func__, ip->i_ino);
  1038. return error;
  1039. }
  1040. }
  1041. return 0;
  1042. }
  1043. const struct iomap_ops xfs_buffered_write_iomap_ops = {
  1044. .iomap_begin = xfs_buffered_write_iomap_begin,
  1045. .iomap_end = xfs_buffered_write_iomap_end,
  1046. };
  1047. static int
  1048. xfs_read_iomap_begin(
  1049. struct inode *inode,
  1050. loff_t offset,
  1051. loff_t length,
  1052. unsigned flags,
  1053. struct iomap *iomap,
  1054. struct iomap *srcmap)
  1055. {
  1056. struct xfs_inode *ip = XFS_I(inode);
  1057. struct xfs_mount *mp = ip->i_mount;
  1058. struct xfs_bmbt_irec imap;
  1059. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1060. xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
  1061. int nimaps = 1, error = 0;
  1062. bool shared = false;
  1063. unsigned int lockmode = XFS_ILOCK_SHARED;
  1064. ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
  1065. if (xfs_is_shutdown(mp))
  1066. return -EIO;
  1067. error = xfs_ilock_for_iomap(ip, flags, &lockmode);
  1068. if (error)
  1069. return error;
  1070. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
  1071. &nimaps, 0);
  1072. if (!error && (flags & IOMAP_REPORT))
  1073. error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
  1074. xfs_iunlock(ip, lockmode);
  1075. if (error)
  1076. return error;
  1077. trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
  1078. return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
  1079. shared ? IOMAP_F_SHARED : 0);
  1080. }
  1081. const struct iomap_ops xfs_read_iomap_ops = {
  1082. .iomap_begin = xfs_read_iomap_begin,
  1083. };
  1084. static int
  1085. xfs_seek_iomap_begin(
  1086. struct inode *inode,
  1087. loff_t offset,
  1088. loff_t length,
  1089. unsigned flags,
  1090. struct iomap *iomap,
  1091. struct iomap *srcmap)
  1092. {
  1093. struct xfs_inode *ip = XFS_I(inode);
  1094. struct xfs_mount *mp = ip->i_mount;
  1095. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1096. xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
  1097. xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
  1098. struct xfs_iext_cursor icur;
  1099. struct xfs_bmbt_irec imap, cmap;
  1100. int error = 0;
  1101. unsigned lockmode;
  1102. if (xfs_is_shutdown(mp))
  1103. return -EIO;
  1104. lockmode = xfs_ilock_data_map_shared(ip);
  1105. error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
  1106. if (error)
  1107. goto out_unlock;
  1108. if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
  1109. /*
  1110. * If we found a data extent we are done.
  1111. */
  1112. if (imap.br_startoff <= offset_fsb)
  1113. goto done;
  1114. data_fsb = imap.br_startoff;
  1115. } else {
  1116. /*
  1117. * Fake a hole until the end of the file.
  1118. */
  1119. data_fsb = xfs_iomap_end_fsb(mp, offset, length);
  1120. }
  1121. /*
  1122. * If a COW fork extent covers the hole, report it - capped to the next
  1123. * data fork extent:
  1124. */
  1125. if (xfs_inode_has_cow_data(ip) &&
  1126. xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
  1127. cow_fsb = cmap.br_startoff;
  1128. if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
  1129. if (data_fsb < cow_fsb + cmap.br_blockcount)
  1130. end_fsb = min(end_fsb, data_fsb);
  1131. xfs_trim_extent(&cmap, offset_fsb, end_fsb);
  1132. error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
  1133. IOMAP_F_SHARED);
  1134. /*
  1135. * This is a COW extent, so we must probe the page cache
  1136. * because there could be dirty page cache being backed
  1137. * by this extent.
  1138. */
  1139. iomap->type = IOMAP_UNWRITTEN;
  1140. goto out_unlock;
  1141. }
  1142. /*
  1143. * Else report a hole, capped to the next found data or COW extent.
  1144. */
  1145. if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
  1146. imap.br_blockcount = cow_fsb - offset_fsb;
  1147. else
  1148. imap.br_blockcount = data_fsb - offset_fsb;
  1149. imap.br_startoff = offset_fsb;
  1150. imap.br_startblock = HOLESTARTBLOCK;
  1151. imap.br_state = XFS_EXT_NORM;
  1152. done:
  1153. xfs_trim_extent(&imap, offset_fsb, end_fsb);
  1154. error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
  1155. out_unlock:
  1156. xfs_iunlock(ip, lockmode);
  1157. return error;
  1158. }
  1159. const struct iomap_ops xfs_seek_iomap_ops = {
  1160. .iomap_begin = xfs_seek_iomap_begin,
  1161. };
  1162. static int
  1163. xfs_xattr_iomap_begin(
  1164. struct inode *inode,
  1165. loff_t offset,
  1166. loff_t length,
  1167. unsigned flags,
  1168. struct iomap *iomap,
  1169. struct iomap *srcmap)
  1170. {
  1171. struct xfs_inode *ip = XFS_I(inode);
  1172. struct xfs_mount *mp = ip->i_mount;
  1173. xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
  1174. xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
  1175. struct xfs_bmbt_irec imap;
  1176. int nimaps = 1, error = 0;
  1177. unsigned lockmode;
  1178. if (xfs_is_shutdown(mp))
  1179. return -EIO;
  1180. lockmode = xfs_ilock_attr_map_shared(ip);
  1181. /* if there are no attribute fork or extents, return ENOENT */
  1182. if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
  1183. error = -ENOENT;
  1184. goto out_unlock;
  1185. }
  1186. ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
  1187. error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
  1188. &nimaps, XFS_BMAPI_ATTRFORK);
  1189. out_unlock:
  1190. xfs_iunlock(ip, lockmode);
  1191. if (error)
  1192. return error;
  1193. ASSERT(nimaps);
  1194. return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
  1195. }
  1196. const struct iomap_ops xfs_xattr_iomap_ops = {
  1197. .iomap_begin = xfs_xattr_iomap_begin,
  1198. };
  1199. int
  1200. xfs_zero_range(
  1201. struct xfs_inode *ip,
  1202. loff_t pos,
  1203. loff_t len,
  1204. bool *did_zero)
  1205. {
  1206. struct inode *inode = VFS_I(ip);
  1207. if (IS_DAX(inode))
  1208. return dax_zero_range(inode, pos, len, did_zero,
  1209. &xfs_direct_write_iomap_ops);
  1210. return iomap_zero_range(inode, pos, len, did_zero,
  1211. &xfs_buffered_write_iomap_ops);
  1212. }
  1213. int
  1214. xfs_truncate_page(
  1215. struct xfs_inode *ip,
  1216. loff_t pos,
  1217. bool *did_zero)
  1218. {
  1219. struct inode *inode = VFS_I(ip);
  1220. if (IS_DAX(inode))
  1221. return dax_truncate_page(inode, pos, did_zero,
  1222. &xfs_direct_write_iomap_ops);
  1223. return iomap_truncate_page(inode, pos, did_zero,
  1224. &xfs_buffered_write_iomap_ops);
  1225. }