fast_commit.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/ext4/fast_commit.c
  4. *
  5. * Written by Harshad Shirwadkar <[email protected]>
  6. *
  7. * Ext4 fast commits routines.
  8. */
  9. #include "ext4.h"
  10. #include "ext4_jbd2.h"
  11. #include "ext4_extents.h"
  12. #include "mballoc.h"
  13. /*
  14. * Ext4 Fast Commits
  15. * -----------------
  16. *
  17. * Ext4 fast commits implement fine grained journalling for Ext4.
  18. *
  19. * Fast commits are organized as a log of tag-length-value (TLV) structs. (See
  20. * struct ext4_fc_tl). Each TLV contains some delta that is replayed TLV by
  21. * TLV during the recovery phase. For the scenarios for which we currently
  22. * don't have replay code, fast commit falls back to full commits.
  23. * Fast commits record delta in one of the following three categories.
  24. *
  25. * (A) Directory entry updates:
  26. *
  27. * - EXT4_FC_TAG_UNLINK - records directory entry unlink
  28. * - EXT4_FC_TAG_LINK - records directory entry link
  29. * - EXT4_FC_TAG_CREAT - records inode and directory entry creation
  30. *
  31. * (B) File specific data range updates:
  32. *
  33. * - EXT4_FC_TAG_ADD_RANGE - records addition of new blocks to an inode
  34. * - EXT4_FC_TAG_DEL_RANGE - records deletion of blocks from an inode
  35. *
  36. * (C) Inode metadata (mtime / ctime etc):
  37. *
  38. * - EXT4_FC_TAG_INODE - record the inode that should be replayed
  39. * during recovery. Note that iblocks field is
  40. * not replayed and instead derived during
  41. * replay.
  42. * Commit Operation
  43. * ----------------
  44. * With fast commits, we maintain all the directory entry operations in the
  45. * order in which they are issued in an in-memory queue. This queue is flushed
  46. * to disk during the commit operation. We also maintain a list of inodes
  47. * that need to be committed during a fast commit in another in memory queue of
  48. * inodes. During the commit operation, we commit in the following order:
  49. *
  50. * [1] Lock inodes for any further data updates by setting COMMITTING state
  51. * [2] Submit data buffers of all the inodes
  52. * [3] Wait for [2] to complete
  53. * [4] Commit all the directory entry updates in the fast commit space
  54. * [5] Commit all the changed inode structures
  55. * [6] Write tail tag (this tag ensures the atomicity, please read the following
  56. * section for more details).
  57. * [7] Wait for [4], [5] and [6] to complete.
  58. *
  59. * All the inode updates must call ext4_fc_start_update() before starting an
  60. * update. If such an ongoing update is present, fast commit waits for it to
  61. * complete. The completion of such an update is marked by
  62. * ext4_fc_stop_update().
  63. *
  64. * Fast Commit Ineligibility
  65. * -------------------------
  66. *
  67. * Not all operations are supported by fast commits today (e.g extended
  68. * attributes). Fast commit ineligibility is marked by calling
  69. * ext4_fc_mark_ineligible(): This makes next fast commit operation to fall back
  70. * to full commit.
  71. *
  72. * Atomicity of commits
  73. * --------------------
  74. * In order to guarantee atomicity during the commit operation, fast commit
  75. * uses "EXT4_FC_TAG_TAIL" tag that marks a fast commit as complete. Tail
  76. * tag contains CRC of the contents and TID of the transaction after which
  77. * this fast commit should be applied. Recovery code replays fast commit
  78. * logs only if there's at least 1 valid tail present. For every fast commit
  79. * operation, there is 1 tail. This means, we may end up with multiple tails
  80. * in the fast commit space. Here's an example:
  81. *
  82. * - Create a new file A and remove existing file B
  83. * - fsync()
  84. * - Append contents to file A
  85. * - Truncate file A
  86. * - fsync()
  87. *
  88. * The fast commit space at the end of above operations would look like this:
  89. * [HEAD] [CREAT A] [UNLINK B] [TAIL] [ADD_RANGE A] [DEL_RANGE A] [TAIL]
  90. * |<--- Fast Commit 1 --->|<--- Fast Commit 2 ---->|
  91. *
  92. * Replay code should thus check for all the valid tails in the FC area.
  93. *
  94. * Fast Commit Replay Idempotence
  95. * ------------------------------
  96. *
  97. * Fast commits tags are idempotent in nature provided the recovery code follows
  98. * certain rules. The guiding principle that the commit path follows while
  99. * committing is that it stores the result of a particular operation instead of
  100. * storing the procedure.
  101. *
  102. * Let's consider this rename operation: 'mv /a /b'. Let's assume dirent '/a'
  103. * was associated with inode 10. During fast commit, instead of storing this
  104. * operation as a procedure "rename a to b", we store the resulting file system
  105. * state as a "series" of outcomes:
  106. *
  107. * - Link dirent b to inode 10
  108. * - Unlink dirent a
  109. * - Inode <10> with valid refcount
  110. *
  111. * Now when recovery code runs, it needs "enforce" this state on the file
  112. * system. This is what guarantees idempotence of fast commit replay.
  113. *
  114. * Let's take an example of a procedure that is not idempotent and see how fast
  115. * commits make it idempotent. Consider following sequence of operations:
  116. *
  117. * rm A; mv B A; read A
  118. * (x) (y) (z)
  119. *
  120. * (x), (y) and (z) are the points at which we can crash. If we store this
  121. * sequence of operations as is then the replay is not idempotent. Let's say
  122. * while in replay, we crash at (z). During the second replay, file A (which was
  123. * actually created as a result of "mv B A" operation) would get deleted. Thus,
  124. * file named A would be absent when we try to read A. So, this sequence of
  125. * operations is not idempotent. However, as mentioned above, instead of storing
  126. * the procedure fast commits store the outcome of each procedure. Thus the fast
  127. * commit log for above procedure would be as follows:
  128. *
  129. * (Let's assume dirent A was linked to inode 10 and dirent B was linked to
  130. * inode 11 before the replay)
  131. *
  132. * [Unlink A] [Link A to inode 11] [Unlink B] [Inode 11]
  133. * (w) (x) (y) (z)
  134. *
  135. * If we crash at (z), we will have file A linked to inode 11. During the second
  136. * replay, we will remove file A (inode 11). But we will create it back and make
  137. * it point to inode 11. We won't find B, so we'll just skip that step. At this
  138. * point, the refcount for inode 11 is not reliable, but that gets fixed by the
  139. * replay of last inode 11 tag. Crashes at points (w), (x) and (y) get handled
  140. * similarly. Thus, by converting a non-idempotent procedure into a series of
  141. * idempotent outcomes, fast commits ensured idempotence during the replay.
  142. *
  143. * TODOs
  144. * -----
  145. *
  146. * 0) Fast commit replay path hardening: Fast commit replay code should use
  147. * journal handles to make sure all the updates it does during the replay
  148. * path are atomic. With that if we crash during fast commit replay, after
  149. * trying to do recovery again, we will find a file system where fast commit
  150. * area is invalid (because new full commit would be found). In order to deal
  151. * with that, fast commit replay code should ensure that the "FC_REPLAY"
  152. * superblock state is persisted before starting the replay, so that after
  153. * the crash, fast commit recovery code can look at that flag and perform
  154. * fast commit recovery even if that area is invalidated by later full
  155. * commits.
  156. *
  157. * 1) Fast commit's commit path locks the entire file system during fast
  158. * commit. This has significant performance penalty. Instead of that, we
  159. * should use ext4_fc_start/stop_update functions to start inode level
  160. * updates from ext4_journal_start/stop. Once we do that we can drop file
  161. * system locking during commit path.
  162. *
  163. * 2) Handle more ineligible cases.
  164. */
  165. #include <trace/events/ext4.h>
  166. static struct kmem_cache *ext4_fc_dentry_cachep;
  167. static void ext4_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
  168. {
  169. BUFFER_TRACE(bh, "");
  170. if (uptodate) {
  171. ext4_debug("%s: Block %lld up-to-date",
  172. __func__, bh->b_blocknr);
  173. set_buffer_uptodate(bh);
  174. } else {
  175. ext4_debug("%s: Block %lld not up-to-date",
  176. __func__, bh->b_blocknr);
  177. clear_buffer_uptodate(bh);
  178. }
  179. unlock_buffer(bh);
  180. }
  181. static inline void ext4_fc_reset_inode(struct inode *inode)
  182. {
  183. struct ext4_inode_info *ei = EXT4_I(inode);
  184. ei->i_fc_lblk_start = 0;
  185. ei->i_fc_lblk_len = 0;
  186. }
  187. void ext4_fc_init_inode(struct inode *inode)
  188. {
  189. struct ext4_inode_info *ei = EXT4_I(inode);
  190. ext4_fc_reset_inode(inode);
  191. ext4_clear_inode_state(inode, EXT4_STATE_FC_COMMITTING);
  192. INIT_LIST_HEAD(&ei->i_fc_list);
  193. INIT_LIST_HEAD(&ei->i_fc_dilist);
  194. init_waitqueue_head(&ei->i_fc_wait);
  195. atomic_set(&ei->i_fc_updates, 0);
  196. }
  197. /* This function must be called with sbi->s_fc_lock held. */
  198. static void ext4_fc_wait_committing_inode(struct inode *inode)
  199. __releases(&EXT4_SB(inode->i_sb)->s_fc_lock)
  200. {
  201. wait_queue_head_t *wq;
  202. struct ext4_inode_info *ei = EXT4_I(inode);
  203. #if (BITS_PER_LONG < 64)
  204. DEFINE_WAIT_BIT(wait, &ei->i_state_flags,
  205. EXT4_STATE_FC_COMMITTING);
  206. wq = bit_waitqueue(&ei->i_state_flags,
  207. EXT4_STATE_FC_COMMITTING);
  208. #else
  209. DEFINE_WAIT_BIT(wait, &ei->i_flags,
  210. EXT4_STATE_FC_COMMITTING);
  211. wq = bit_waitqueue(&ei->i_flags,
  212. EXT4_STATE_FC_COMMITTING);
  213. #endif
  214. lockdep_assert_held(&EXT4_SB(inode->i_sb)->s_fc_lock);
  215. prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
  216. spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
  217. schedule();
  218. finish_wait(wq, &wait.wq_entry);
  219. }
  220. static bool ext4_fc_disabled(struct super_block *sb)
  221. {
  222. return (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
  223. (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY));
  224. }
  225. /*
  226. * Inform Ext4's fast about start of an inode update
  227. *
  228. * This function is called by the high level call VFS callbacks before
  229. * performing any inode update. This function blocks if there's an ongoing
  230. * fast commit on the inode in question.
  231. */
  232. void ext4_fc_start_update(struct inode *inode)
  233. {
  234. struct ext4_inode_info *ei = EXT4_I(inode);
  235. if (ext4_fc_disabled(inode->i_sb))
  236. return;
  237. restart:
  238. spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock);
  239. if (list_empty(&ei->i_fc_list))
  240. goto out;
  241. if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
  242. ext4_fc_wait_committing_inode(inode);
  243. goto restart;
  244. }
  245. out:
  246. atomic_inc(&ei->i_fc_updates);
  247. spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
  248. }
  249. /*
  250. * Stop inode update and wake up waiting fast commits if any.
  251. */
  252. void ext4_fc_stop_update(struct inode *inode)
  253. {
  254. struct ext4_inode_info *ei = EXT4_I(inode);
  255. if (ext4_fc_disabled(inode->i_sb))
  256. return;
  257. if (atomic_dec_and_test(&ei->i_fc_updates))
  258. wake_up_all(&ei->i_fc_wait);
  259. }
  260. /*
  261. * Remove inode from fast commit list. If the inode is being committed
  262. * we wait until inode commit is done.
  263. */
  264. void ext4_fc_del(struct inode *inode)
  265. {
  266. struct ext4_inode_info *ei = EXT4_I(inode);
  267. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  268. struct ext4_fc_dentry_update *fc_dentry;
  269. if (ext4_fc_disabled(inode->i_sb))
  270. return;
  271. restart:
  272. spin_lock(&EXT4_SB(inode->i_sb)->s_fc_lock);
  273. if (list_empty(&ei->i_fc_list) && list_empty(&ei->i_fc_dilist)) {
  274. spin_unlock(&EXT4_SB(inode->i_sb)->s_fc_lock);
  275. return;
  276. }
  277. if (ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING)) {
  278. ext4_fc_wait_committing_inode(inode);
  279. goto restart;
  280. }
  281. if (!list_empty(&ei->i_fc_list))
  282. list_del_init(&ei->i_fc_list);
  283. /*
  284. * Since this inode is getting removed, let's also remove all FC
  285. * dentry create references, since it is not needed to log it anyways.
  286. */
  287. if (list_empty(&ei->i_fc_dilist)) {
  288. spin_unlock(&sbi->s_fc_lock);
  289. return;
  290. }
  291. fc_dentry = list_first_entry(&ei->i_fc_dilist, struct ext4_fc_dentry_update, fcd_dilist);
  292. WARN_ON(fc_dentry->fcd_op != EXT4_FC_TAG_CREAT);
  293. list_del_init(&fc_dentry->fcd_list);
  294. list_del_init(&fc_dentry->fcd_dilist);
  295. WARN_ON(!list_empty(&ei->i_fc_dilist));
  296. spin_unlock(&sbi->s_fc_lock);
  297. if (fc_dentry->fcd_name.name &&
  298. fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
  299. kfree(fc_dentry->fcd_name.name);
  300. kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
  301. return;
  302. }
  303. /*
  304. * Mark file system as fast commit ineligible, and record latest
  305. * ineligible transaction tid. This means until the recorded
  306. * transaction, commit operation would result in a full jbd2 commit.
  307. */
  308. void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle)
  309. {
  310. struct ext4_sb_info *sbi = EXT4_SB(sb);
  311. tid_t tid;
  312. if (ext4_fc_disabled(sb))
  313. return;
  314. ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
  315. if (handle && !IS_ERR(handle))
  316. tid = handle->h_transaction->t_tid;
  317. else {
  318. read_lock(&sbi->s_journal->j_state_lock);
  319. tid = sbi->s_journal->j_running_transaction ?
  320. sbi->s_journal->j_running_transaction->t_tid : 0;
  321. read_unlock(&sbi->s_journal->j_state_lock);
  322. }
  323. spin_lock(&sbi->s_fc_lock);
  324. if (sbi->s_fc_ineligible_tid < tid)
  325. sbi->s_fc_ineligible_tid = tid;
  326. spin_unlock(&sbi->s_fc_lock);
  327. WARN_ON(reason >= EXT4_FC_REASON_MAX);
  328. sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
  329. }
  330. /*
  331. * Generic fast commit tracking function. If this is the first time this we are
  332. * called after a full commit, we initialize fast commit fields and then call
  333. * __fc_track_fn() with update = 0. If we have already been called after a full
  334. * commit, we pass update = 1. Based on that, the track function can determine
  335. * if it needs to track a field for the first time or if it needs to just
  336. * update the previously tracked value.
  337. *
  338. * If enqueue is set, this function enqueues the inode in fast commit list.
  339. */
  340. static int ext4_fc_track_template(
  341. handle_t *handle, struct inode *inode,
  342. int (*__fc_track_fn)(struct inode *, void *, bool),
  343. void *args, int enqueue)
  344. {
  345. bool update = false;
  346. struct ext4_inode_info *ei = EXT4_I(inode);
  347. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  348. tid_t tid = 0;
  349. int ret;
  350. tid = handle->h_transaction->t_tid;
  351. mutex_lock(&ei->i_fc_lock);
  352. if (tid == ei->i_sync_tid) {
  353. update = true;
  354. } else {
  355. ext4_fc_reset_inode(inode);
  356. ei->i_sync_tid = tid;
  357. }
  358. ret = __fc_track_fn(inode, args, update);
  359. mutex_unlock(&ei->i_fc_lock);
  360. if (!enqueue)
  361. return ret;
  362. spin_lock(&sbi->s_fc_lock);
  363. if (list_empty(&EXT4_I(inode)->i_fc_list))
  364. list_add_tail(&EXT4_I(inode)->i_fc_list,
  365. (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
  366. sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
  367. &sbi->s_fc_q[FC_Q_STAGING] :
  368. &sbi->s_fc_q[FC_Q_MAIN]);
  369. spin_unlock(&sbi->s_fc_lock);
  370. return ret;
  371. }
  372. struct __track_dentry_update_args {
  373. struct dentry *dentry;
  374. int op;
  375. };
  376. /* __track_fn for directory entry updates. Called with ei->i_fc_lock. */
  377. static int __track_dentry_update(struct inode *inode, void *arg, bool update)
  378. {
  379. struct ext4_fc_dentry_update *node;
  380. struct ext4_inode_info *ei = EXT4_I(inode);
  381. struct __track_dentry_update_args *dentry_update =
  382. (struct __track_dentry_update_args *)arg;
  383. struct dentry *dentry = dentry_update->dentry;
  384. struct inode *dir = dentry->d_parent->d_inode;
  385. struct super_block *sb = inode->i_sb;
  386. struct ext4_sb_info *sbi = EXT4_SB(sb);
  387. mutex_unlock(&ei->i_fc_lock);
  388. if (IS_ENCRYPTED(dir)) {
  389. ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_ENCRYPTED_FILENAME,
  390. NULL);
  391. mutex_lock(&ei->i_fc_lock);
  392. return -EOPNOTSUPP;
  393. }
  394. node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
  395. if (!node) {
  396. ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL);
  397. mutex_lock(&ei->i_fc_lock);
  398. return -ENOMEM;
  399. }
  400. node->fcd_op = dentry_update->op;
  401. node->fcd_parent = dir->i_ino;
  402. node->fcd_ino = inode->i_ino;
  403. if (dentry->d_name.len > DNAME_INLINE_LEN) {
  404. node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS);
  405. if (!node->fcd_name.name) {
  406. kmem_cache_free(ext4_fc_dentry_cachep, node);
  407. ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, NULL);
  408. mutex_lock(&ei->i_fc_lock);
  409. return -ENOMEM;
  410. }
  411. memcpy((u8 *)node->fcd_name.name, dentry->d_name.name,
  412. dentry->d_name.len);
  413. } else {
  414. memcpy(node->fcd_iname, dentry->d_name.name,
  415. dentry->d_name.len);
  416. node->fcd_name.name = node->fcd_iname;
  417. }
  418. node->fcd_name.len = dentry->d_name.len;
  419. INIT_LIST_HEAD(&node->fcd_dilist);
  420. spin_lock(&sbi->s_fc_lock);
  421. if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
  422. sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
  423. list_add_tail(&node->fcd_list,
  424. &sbi->s_fc_dentry_q[FC_Q_STAGING]);
  425. else
  426. list_add_tail(&node->fcd_list, &sbi->s_fc_dentry_q[FC_Q_MAIN]);
  427. /*
  428. * This helps us keep a track of all fc_dentry updates which is part of
  429. * this ext4 inode. So in case the inode is getting unlinked, before
  430. * even we get a chance to fsync, we could remove all fc_dentry
  431. * references while evicting the inode in ext4_fc_del().
  432. * Also with this, we don't need to loop over all the inodes in
  433. * sbi->s_fc_q to get the corresponding inode in
  434. * ext4_fc_commit_dentry_updates().
  435. */
  436. if (dentry_update->op == EXT4_FC_TAG_CREAT) {
  437. WARN_ON(!list_empty(&ei->i_fc_dilist));
  438. list_add_tail(&node->fcd_dilist, &ei->i_fc_dilist);
  439. }
  440. spin_unlock(&sbi->s_fc_lock);
  441. mutex_lock(&ei->i_fc_lock);
  442. return 0;
  443. }
  444. void __ext4_fc_track_unlink(handle_t *handle,
  445. struct inode *inode, struct dentry *dentry)
  446. {
  447. struct __track_dentry_update_args args;
  448. int ret;
  449. args.dentry = dentry;
  450. args.op = EXT4_FC_TAG_UNLINK;
  451. ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
  452. (void *)&args, 0);
  453. trace_ext4_fc_track_unlink(handle, inode, dentry, ret);
  454. }
  455. void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry)
  456. {
  457. struct inode *inode = d_inode(dentry);
  458. if (ext4_fc_disabled(inode->i_sb))
  459. return;
  460. if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
  461. return;
  462. __ext4_fc_track_unlink(handle, inode, dentry);
  463. }
  464. void __ext4_fc_track_link(handle_t *handle,
  465. struct inode *inode, struct dentry *dentry)
  466. {
  467. struct __track_dentry_update_args args;
  468. int ret;
  469. args.dentry = dentry;
  470. args.op = EXT4_FC_TAG_LINK;
  471. ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
  472. (void *)&args, 0);
  473. trace_ext4_fc_track_link(handle, inode, dentry, ret);
  474. }
  475. void ext4_fc_track_link(handle_t *handle, struct dentry *dentry)
  476. {
  477. struct inode *inode = d_inode(dentry);
  478. if (ext4_fc_disabled(inode->i_sb))
  479. return;
  480. if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
  481. return;
  482. __ext4_fc_track_link(handle, inode, dentry);
  483. }
  484. void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
  485. struct dentry *dentry)
  486. {
  487. struct __track_dentry_update_args args;
  488. int ret;
  489. args.dentry = dentry;
  490. args.op = EXT4_FC_TAG_CREAT;
  491. ret = ext4_fc_track_template(handle, inode, __track_dentry_update,
  492. (void *)&args, 0);
  493. trace_ext4_fc_track_create(handle, inode, dentry, ret);
  494. }
  495. void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
  496. {
  497. struct inode *inode = d_inode(dentry);
  498. if (ext4_fc_disabled(inode->i_sb))
  499. return;
  500. if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
  501. return;
  502. __ext4_fc_track_create(handle, inode, dentry);
  503. }
  504. /* __track_fn for inode tracking */
  505. static int __track_inode(struct inode *inode, void *arg, bool update)
  506. {
  507. if (update)
  508. return -EEXIST;
  509. EXT4_I(inode)->i_fc_lblk_len = 0;
  510. return 0;
  511. }
  512. void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
  513. {
  514. int ret;
  515. if (S_ISDIR(inode->i_mode))
  516. return;
  517. if (ext4_fc_disabled(inode->i_sb))
  518. return;
  519. if (ext4_should_journal_data(inode)) {
  520. ext4_fc_mark_ineligible(inode->i_sb,
  521. EXT4_FC_REASON_INODE_JOURNAL_DATA, handle);
  522. return;
  523. }
  524. if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
  525. return;
  526. ret = ext4_fc_track_template(handle, inode, __track_inode, NULL, 1);
  527. trace_ext4_fc_track_inode(handle, inode, ret);
  528. }
  529. struct __track_range_args {
  530. ext4_lblk_t start, end;
  531. };
  532. /* __track_fn for tracking data updates */
  533. static int __track_range(struct inode *inode, void *arg, bool update)
  534. {
  535. struct ext4_inode_info *ei = EXT4_I(inode);
  536. ext4_lblk_t oldstart;
  537. struct __track_range_args *__arg =
  538. (struct __track_range_args *)arg;
  539. if (inode->i_ino < EXT4_FIRST_INO(inode->i_sb)) {
  540. ext4_debug("Special inode %ld being modified\n", inode->i_ino);
  541. return -ECANCELED;
  542. }
  543. oldstart = ei->i_fc_lblk_start;
  544. if (update && ei->i_fc_lblk_len > 0) {
  545. ei->i_fc_lblk_start = min(ei->i_fc_lblk_start, __arg->start);
  546. ei->i_fc_lblk_len =
  547. max(oldstart + ei->i_fc_lblk_len - 1, __arg->end) -
  548. ei->i_fc_lblk_start + 1;
  549. } else {
  550. ei->i_fc_lblk_start = __arg->start;
  551. ei->i_fc_lblk_len = __arg->end - __arg->start + 1;
  552. }
  553. return 0;
  554. }
  555. void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t start,
  556. ext4_lblk_t end)
  557. {
  558. struct __track_range_args args;
  559. int ret;
  560. if (S_ISDIR(inode->i_mode))
  561. return;
  562. if (ext4_fc_disabled(inode->i_sb))
  563. return;
  564. if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_INELIGIBLE))
  565. return;
  566. args.start = start;
  567. args.end = end;
  568. ret = ext4_fc_track_template(handle, inode, __track_range, &args, 1);
  569. trace_ext4_fc_track_range(handle, inode, start, end, ret);
  570. }
  571. static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
  572. {
  573. blk_opf_t write_flags = REQ_SYNC;
  574. struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
  575. /* Add REQ_FUA | REQ_PREFLUSH only its tail */
  576. if (test_opt(sb, BARRIER) && is_tail)
  577. write_flags |= REQ_FUA | REQ_PREFLUSH;
  578. lock_buffer(bh);
  579. set_buffer_dirty(bh);
  580. set_buffer_uptodate(bh);
  581. bh->b_end_io = ext4_end_buffer_io_sync;
  582. submit_bh(REQ_OP_WRITE | write_flags, bh);
  583. EXT4_SB(sb)->s_fc_bh = NULL;
  584. }
  585. /* Ext4 commit path routines */
  586. /* memcpy to fc reserved space and update CRC */
  587. static void *ext4_fc_memcpy(struct super_block *sb, void *dst, const void *src,
  588. int len, u32 *crc)
  589. {
  590. if (crc)
  591. *crc = ext4_chksum(EXT4_SB(sb), *crc, src, len);
  592. return memcpy(dst, src, len);
  593. }
  594. /* memzero and update CRC */
  595. static void *ext4_fc_memzero(struct super_block *sb, void *dst, int len,
  596. u32 *crc)
  597. {
  598. void *ret;
  599. ret = memset(dst, 0, len);
  600. if (crc)
  601. *crc = ext4_chksum(EXT4_SB(sb), *crc, dst, len);
  602. return ret;
  603. }
  604. /*
  605. * Allocate len bytes on a fast commit buffer.
  606. *
  607. * During the commit time this function is used to manage fast commit
  608. * block space. We don't split a fast commit log onto different
  609. * blocks. So this function makes sure that if there's not enough space
  610. * on the current block, the remaining space in the current block is
  611. * marked as unused by adding EXT4_FC_TAG_PAD tag. In that case,
  612. * new block is from jbd2 and CRC is updated to reflect the padding
  613. * we added.
  614. */
  615. static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
  616. {
  617. struct ext4_fc_tl tl;
  618. struct ext4_sb_info *sbi = EXT4_SB(sb);
  619. struct buffer_head *bh;
  620. int bsize = sbi->s_journal->j_blocksize;
  621. int ret, off = sbi->s_fc_bytes % bsize;
  622. int remaining;
  623. u8 *dst;
  624. /*
  625. * If 'len' is too long to fit in any block alongside a PAD tlv, then we
  626. * cannot fulfill the request.
  627. */
  628. if (len > bsize - EXT4_FC_TAG_BASE_LEN)
  629. return NULL;
  630. if (!sbi->s_fc_bh) {
  631. ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
  632. if (ret)
  633. return NULL;
  634. sbi->s_fc_bh = bh;
  635. }
  636. dst = sbi->s_fc_bh->b_data + off;
  637. /*
  638. * Allocate the bytes in the current block if we can do so while still
  639. * leaving enough space for a PAD tlv.
  640. */
  641. remaining = bsize - EXT4_FC_TAG_BASE_LEN - off;
  642. if (len <= remaining) {
  643. sbi->s_fc_bytes += len;
  644. return dst;
  645. }
  646. /*
  647. * Else, terminate the current block with a PAD tlv, then allocate a new
  648. * block and allocate the bytes at the start of that new block.
  649. */
  650. tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_PAD);
  651. tl.fc_len = cpu_to_le16(remaining);
  652. ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc);
  653. ext4_fc_memzero(sb, dst + EXT4_FC_TAG_BASE_LEN, remaining, crc);
  654. ext4_fc_submit_bh(sb, false);
  655. ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
  656. if (ret)
  657. return NULL;
  658. sbi->s_fc_bh = bh;
  659. sbi->s_fc_bytes += bsize - off + len;
  660. return sbi->s_fc_bh->b_data;
  661. }
  662. /*
  663. * Complete a fast commit by writing tail tag.
  664. *
  665. * Writing tail tag marks the end of a fast commit. In order to guarantee
  666. * atomicity, after writing tail tag, even if there's space remaining
  667. * in the block, next commit shouldn't use it. That's why tail tag
  668. * has the length as that of the remaining space on the block.
  669. */
  670. static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
  671. {
  672. struct ext4_sb_info *sbi = EXT4_SB(sb);
  673. struct ext4_fc_tl tl;
  674. struct ext4_fc_tail tail;
  675. int off, bsize = sbi->s_journal->j_blocksize;
  676. u8 *dst;
  677. /*
  678. * ext4_fc_reserve_space takes care of allocating an extra block if
  679. * there's no enough space on this block for accommodating this tail.
  680. */
  681. dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + sizeof(tail), &crc);
  682. if (!dst)
  683. return -ENOSPC;
  684. off = sbi->s_fc_bytes % bsize;
  685. tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_TAIL);
  686. tl.fc_len = cpu_to_le16(bsize - off + sizeof(struct ext4_fc_tail));
  687. sbi->s_fc_bytes = round_up(sbi->s_fc_bytes, bsize);
  688. ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, &crc);
  689. dst += EXT4_FC_TAG_BASE_LEN;
  690. tail.fc_tid = cpu_to_le32(sbi->s_journal->j_running_transaction->t_tid);
  691. ext4_fc_memcpy(sb, dst, &tail.fc_tid, sizeof(tail.fc_tid), &crc);
  692. dst += sizeof(tail.fc_tid);
  693. tail.fc_crc = cpu_to_le32(crc);
  694. ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL);
  695. dst += sizeof(tail.fc_crc);
  696. memset(dst, 0, bsize - off); /* Don't leak uninitialized memory. */
  697. ext4_fc_submit_bh(sb, true);
  698. return 0;
  699. }
  700. /*
  701. * Adds tag, length, value and updates CRC. Returns true if tlv was added.
  702. * Returns false if there's not enough space.
  703. */
  704. static bool ext4_fc_add_tlv(struct super_block *sb, u16 tag, u16 len, u8 *val,
  705. u32 *crc)
  706. {
  707. struct ext4_fc_tl tl;
  708. u8 *dst;
  709. dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + len, crc);
  710. if (!dst)
  711. return false;
  712. tl.fc_tag = cpu_to_le16(tag);
  713. tl.fc_len = cpu_to_le16(len);
  714. ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc);
  715. ext4_fc_memcpy(sb, dst + EXT4_FC_TAG_BASE_LEN, val, len, crc);
  716. return true;
  717. }
  718. /* Same as above, but adds dentry tlv. */
  719. static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc,
  720. struct ext4_fc_dentry_update *fc_dentry)
  721. {
  722. struct ext4_fc_dentry_info fcd;
  723. struct ext4_fc_tl tl;
  724. int dlen = fc_dentry->fcd_name.len;
  725. u8 *dst = ext4_fc_reserve_space(sb,
  726. EXT4_FC_TAG_BASE_LEN + sizeof(fcd) + dlen, crc);
  727. if (!dst)
  728. return false;
  729. fcd.fc_parent_ino = cpu_to_le32(fc_dentry->fcd_parent);
  730. fcd.fc_ino = cpu_to_le32(fc_dentry->fcd_ino);
  731. tl.fc_tag = cpu_to_le16(fc_dentry->fcd_op);
  732. tl.fc_len = cpu_to_le16(sizeof(fcd) + dlen);
  733. ext4_fc_memcpy(sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc);
  734. dst += EXT4_FC_TAG_BASE_LEN;
  735. ext4_fc_memcpy(sb, dst, &fcd, sizeof(fcd), crc);
  736. dst += sizeof(fcd);
  737. ext4_fc_memcpy(sb, dst, fc_dentry->fcd_name.name, dlen, crc);
  738. return true;
  739. }
  740. /*
  741. * Writes inode in the fast commit space under TLV with tag @tag.
  742. * Returns 0 on success, error on failure.
  743. */
  744. static int ext4_fc_write_inode(struct inode *inode, u32 *crc)
  745. {
  746. struct ext4_inode_info *ei = EXT4_I(inode);
  747. int inode_len = EXT4_GOOD_OLD_INODE_SIZE;
  748. int ret;
  749. struct ext4_iloc iloc;
  750. struct ext4_fc_inode fc_inode;
  751. struct ext4_fc_tl tl;
  752. u8 *dst;
  753. ret = ext4_get_inode_loc(inode, &iloc);
  754. if (ret)
  755. return ret;
  756. if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
  757. inode_len = EXT4_INODE_SIZE(inode->i_sb);
  758. else if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE)
  759. inode_len += ei->i_extra_isize;
  760. fc_inode.fc_ino = cpu_to_le32(inode->i_ino);
  761. tl.fc_tag = cpu_to_le16(EXT4_FC_TAG_INODE);
  762. tl.fc_len = cpu_to_le16(inode_len + sizeof(fc_inode.fc_ino));
  763. ret = -ECANCELED;
  764. dst = ext4_fc_reserve_space(inode->i_sb,
  765. EXT4_FC_TAG_BASE_LEN + inode_len + sizeof(fc_inode.fc_ino), crc);
  766. if (!dst)
  767. goto err;
  768. if (!ext4_fc_memcpy(inode->i_sb, dst, &tl, EXT4_FC_TAG_BASE_LEN, crc))
  769. goto err;
  770. dst += EXT4_FC_TAG_BASE_LEN;
  771. if (!ext4_fc_memcpy(inode->i_sb, dst, &fc_inode, sizeof(fc_inode), crc))
  772. goto err;
  773. dst += sizeof(fc_inode);
  774. if (!ext4_fc_memcpy(inode->i_sb, dst, (u8 *)ext4_raw_inode(&iloc),
  775. inode_len, crc))
  776. goto err;
  777. ret = 0;
  778. err:
  779. brelse(iloc.bh);
  780. return ret;
  781. }
  782. /*
  783. * Writes updated data ranges for the inode in question. Updates CRC.
  784. * Returns 0 on success, error otherwise.
  785. */
  786. static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
  787. {
  788. ext4_lblk_t old_blk_size, cur_lblk_off, new_blk_size;
  789. struct ext4_inode_info *ei = EXT4_I(inode);
  790. struct ext4_map_blocks map;
  791. struct ext4_fc_add_range fc_ext;
  792. struct ext4_fc_del_range lrange;
  793. struct ext4_extent *ex;
  794. int ret;
  795. mutex_lock(&ei->i_fc_lock);
  796. if (ei->i_fc_lblk_len == 0) {
  797. mutex_unlock(&ei->i_fc_lock);
  798. return 0;
  799. }
  800. old_blk_size = ei->i_fc_lblk_start;
  801. new_blk_size = ei->i_fc_lblk_start + ei->i_fc_lblk_len - 1;
  802. ei->i_fc_lblk_len = 0;
  803. mutex_unlock(&ei->i_fc_lock);
  804. cur_lblk_off = old_blk_size;
  805. ext4_debug("will try writing %d to %d for inode %ld\n",
  806. cur_lblk_off, new_blk_size, inode->i_ino);
  807. while (cur_lblk_off <= new_blk_size) {
  808. map.m_lblk = cur_lblk_off;
  809. map.m_len = new_blk_size - cur_lblk_off + 1;
  810. ret = ext4_map_blocks(NULL, inode, &map, 0);
  811. if (ret < 0)
  812. return -ECANCELED;
  813. if (map.m_len == 0) {
  814. cur_lblk_off++;
  815. continue;
  816. }
  817. if (ret == 0) {
  818. lrange.fc_ino = cpu_to_le32(inode->i_ino);
  819. lrange.fc_lblk = cpu_to_le32(map.m_lblk);
  820. lrange.fc_len = cpu_to_le32(map.m_len);
  821. if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_DEL_RANGE,
  822. sizeof(lrange), (u8 *)&lrange, crc))
  823. return -ENOSPC;
  824. } else {
  825. unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
  826. EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
  827. /* Limit the number of blocks in one extent */
  828. map.m_len = min(max, map.m_len);
  829. fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
  830. ex = (struct ext4_extent *)&fc_ext.fc_ex;
  831. ex->ee_block = cpu_to_le32(map.m_lblk);
  832. ex->ee_len = cpu_to_le16(map.m_len);
  833. ext4_ext_store_pblock(ex, map.m_pblk);
  834. if (map.m_flags & EXT4_MAP_UNWRITTEN)
  835. ext4_ext_mark_unwritten(ex);
  836. else
  837. ext4_ext_mark_initialized(ex);
  838. if (!ext4_fc_add_tlv(inode->i_sb, EXT4_FC_TAG_ADD_RANGE,
  839. sizeof(fc_ext), (u8 *)&fc_ext, crc))
  840. return -ENOSPC;
  841. }
  842. cur_lblk_off += map.m_len;
  843. }
  844. return 0;
  845. }
  846. /* Submit data for all the fast commit inodes */
  847. static int ext4_fc_submit_inode_data_all(journal_t *journal)
  848. {
  849. struct super_block *sb = journal->j_private;
  850. struct ext4_sb_info *sbi = EXT4_SB(sb);
  851. struct ext4_inode_info *ei;
  852. int ret = 0;
  853. spin_lock(&sbi->s_fc_lock);
  854. list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
  855. ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
  856. while (atomic_read(&ei->i_fc_updates)) {
  857. DEFINE_WAIT(wait);
  858. prepare_to_wait(&ei->i_fc_wait, &wait,
  859. TASK_UNINTERRUPTIBLE);
  860. if (atomic_read(&ei->i_fc_updates)) {
  861. spin_unlock(&sbi->s_fc_lock);
  862. schedule();
  863. spin_lock(&sbi->s_fc_lock);
  864. }
  865. finish_wait(&ei->i_fc_wait, &wait);
  866. }
  867. spin_unlock(&sbi->s_fc_lock);
  868. ret = jbd2_submit_inode_data(ei->jinode);
  869. if (ret)
  870. return ret;
  871. spin_lock(&sbi->s_fc_lock);
  872. }
  873. spin_unlock(&sbi->s_fc_lock);
  874. return ret;
  875. }
  876. /* Wait for completion of data for all the fast commit inodes */
  877. static int ext4_fc_wait_inode_data_all(journal_t *journal)
  878. {
  879. struct super_block *sb = journal->j_private;
  880. struct ext4_sb_info *sbi = EXT4_SB(sb);
  881. struct ext4_inode_info *pos, *n;
  882. int ret = 0;
  883. spin_lock(&sbi->s_fc_lock);
  884. list_for_each_entry_safe(pos, n, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
  885. if (!ext4_test_inode_state(&pos->vfs_inode,
  886. EXT4_STATE_FC_COMMITTING))
  887. continue;
  888. spin_unlock(&sbi->s_fc_lock);
  889. ret = jbd2_wait_inode_data(journal, pos->jinode);
  890. if (ret)
  891. return ret;
  892. spin_lock(&sbi->s_fc_lock);
  893. }
  894. spin_unlock(&sbi->s_fc_lock);
  895. return 0;
  896. }
  897. /* Commit all the directory entry updates */
  898. static int ext4_fc_commit_dentry_updates(journal_t *journal, u32 *crc)
  899. __acquires(&sbi->s_fc_lock)
  900. __releases(&sbi->s_fc_lock)
  901. {
  902. struct super_block *sb = journal->j_private;
  903. struct ext4_sb_info *sbi = EXT4_SB(sb);
  904. struct ext4_fc_dentry_update *fc_dentry, *fc_dentry_n;
  905. struct inode *inode;
  906. struct ext4_inode_info *ei;
  907. int ret;
  908. if (list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN]))
  909. return 0;
  910. list_for_each_entry_safe(fc_dentry, fc_dentry_n,
  911. &sbi->s_fc_dentry_q[FC_Q_MAIN], fcd_list) {
  912. if (fc_dentry->fcd_op != EXT4_FC_TAG_CREAT) {
  913. spin_unlock(&sbi->s_fc_lock);
  914. if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry)) {
  915. ret = -ENOSPC;
  916. goto lock_and_exit;
  917. }
  918. spin_lock(&sbi->s_fc_lock);
  919. continue;
  920. }
  921. /*
  922. * With fcd_dilist we need not loop in sbi->s_fc_q to get the
  923. * corresponding inode pointer
  924. */
  925. WARN_ON(list_empty(&fc_dentry->fcd_dilist));
  926. ei = list_first_entry(&fc_dentry->fcd_dilist,
  927. struct ext4_inode_info, i_fc_dilist);
  928. inode = &ei->vfs_inode;
  929. WARN_ON(inode->i_ino != fc_dentry->fcd_ino);
  930. spin_unlock(&sbi->s_fc_lock);
  931. /*
  932. * We first write the inode and then the create dirent. This
  933. * allows the recovery code to create an unnamed inode first
  934. * and then link it to a directory entry. This allows us
  935. * to use namei.c routines almost as is and simplifies
  936. * the recovery code.
  937. */
  938. ret = ext4_fc_write_inode(inode, crc);
  939. if (ret)
  940. goto lock_and_exit;
  941. ret = ext4_fc_write_inode_data(inode, crc);
  942. if (ret)
  943. goto lock_and_exit;
  944. if (!ext4_fc_add_dentry_tlv(sb, crc, fc_dentry)) {
  945. ret = -ENOSPC;
  946. goto lock_and_exit;
  947. }
  948. spin_lock(&sbi->s_fc_lock);
  949. }
  950. return 0;
  951. lock_and_exit:
  952. spin_lock(&sbi->s_fc_lock);
  953. return ret;
  954. }
  955. static int ext4_fc_perform_commit(journal_t *journal)
  956. {
  957. struct super_block *sb = journal->j_private;
  958. struct ext4_sb_info *sbi = EXT4_SB(sb);
  959. struct ext4_inode_info *iter;
  960. struct ext4_fc_head head;
  961. struct inode *inode;
  962. struct blk_plug plug;
  963. int ret = 0;
  964. u32 crc = 0;
  965. ret = ext4_fc_submit_inode_data_all(journal);
  966. if (ret)
  967. return ret;
  968. ret = ext4_fc_wait_inode_data_all(journal);
  969. if (ret)
  970. return ret;
  971. /*
  972. * If file system device is different from journal device, issue a cache
  973. * flush before we start writing fast commit blocks.
  974. */
  975. if (journal->j_fs_dev != journal->j_dev)
  976. blkdev_issue_flush(journal->j_fs_dev);
  977. blk_start_plug(&plug);
  978. if (sbi->s_fc_bytes == 0) {
  979. /*
  980. * Add a head tag only if this is the first fast commit
  981. * in this TID.
  982. */
  983. head.fc_features = cpu_to_le32(EXT4_FC_SUPPORTED_FEATURES);
  984. head.fc_tid = cpu_to_le32(
  985. sbi->s_journal->j_running_transaction->t_tid);
  986. if (!ext4_fc_add_tlv(sb, EXT4_FC_TAG_HEAD, sizeof(head),
  987. (u8 *)&head, &crc)) {
  988. ret = -ENOSPC;
  989. goto out;
  990. }
  991. }
  992. spin_lock(&sbi->s_fc_lock);
  993. ret = ext4_fc_commit_dentry_updates(journal, &crc);
  994. if (ret) {
  995. spin_unlock(&sbi->s_fc_lock);
  996. goto out;
  997. }
  998. list_for_each_entry(iter, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
  999. inode = &iter->vfs_inode;
  1000. if (!ext4_test_inode_state(inode, EXT4_STATE_FC_COMMITTING))
  1001. continue;
  1002. spin_unlock(&sbi->s_fc_lock);
  1003. ret = ext4_fc_write_inode_data(inode, &crc);
  1004. if (ret)
  1005. goto out;
  1006. ret = ext4_fc_write_inode(inode, &crc);
  1007. if (ret)
  1008. goto out;
  1009. spin_lock(&sbi->s_fc_lock);
  1010. }
  1011. spin_unlock(&sbi->s_fc_lock);
  1012. ret = ext4_fc_write_tail(sb, crc);
  1013. out:
  1014. blk_finish_plug(&plug);
  1015. return ret;
  1016. }
  1017. static void ext4_fc_update_stats(struct super_block *sb, int status,
  1018. u64 commit_time, int nblks, tid_t commit_tid)
  1019. {
  1020. struct ext4_fc_stats *stats = &EXT4_SB(sb)->s_fc_stats;
  1021. ext4_debug("Fast commit ended with status = %d for tid %u",
  1022. status, commit_tid);
  1023. if (status == EXT4_FC_STATUS_OK) {
  1024. stats->fc_num_commits++;
  1025. stats->fc_numblks += nblks;
  1026. if (likely(stats->s_fc_avg_commit_time))
  1027. stats->s_fc_avg_commit_time =
  1028. (commit_time +
  1029. stats->s_fc_avg_commit_time * 3) / 4;
  1030. else
  1031. stats->s_fc_avg_commit_time = commit_time;
  1032. } else if (status == EXT4_FC_STATUS_FAILED ||
  1033. status == EXT4_FC_STATUS_INELIGIBLE) {
  1034. if (status == EXT4_FC_STATUS_FAILED)
  1035. stats->fc_failed_commits++;
  1036. stats->fc_ineligible_commits++;
  1037. } else {
  1038. stats->fc_skipped_commits++;
  1039. }
  1040. trace_ext4_fc_commit_stop(sb, nblks, status, commit_tid);
  1041. }
  1042. /*
  1043. * The main commit entry point. Performs a fast commit for transaction
  1044. * commit_tid if needed. If it's not possible to perform a fast commit
  1045. * due to various reasons, we fall back to full commit. Returns 0
  1046. * on success, error otherwise.
  1047. */
  1048. int ext4_fc_commit(journal_t *journal, tid_t commit_tid)
  1049. {
  1050. struct super_block *sb = journal->j_private;
  1051. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1052. int nblks = 0, ret, bsize = journal->j_blocksize;
  1053. int subtid = atomic_read(&sbi->s_fc_subtid);
  1054. int status = EXT4_FC_STATUS_OK, fc_bufs_before = 0;
  1055. ktime_t start_time, commit_time;
  1056. if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
  1057. return jbd2_complete_transaction(journal, commit_tid);
  1058. trace_ext4_fc_commit_start(sb, commit_tid);
  1059. start_time = ktime_get();
  1060. restart_fc:
  1061. ret = jbd2_fc_begin_commit(journal, commit_tid);
  1062. if (ret == -EALREADY) {
  1063. /* There was an ongoing commit, check if we need to restart */
  1064. if (atomic_read(&sbi->s_fc_subtid) <= subtid &&
  1065. commit_tid > journal->j_commit_sequence)
  1066. goto restart_fc;
  1067. ext4_fc_update_stats(sb, EXT4_FC_STATUS_SKIPPED, 0, 0,
  1068. commit_tid);
  1069. return 0;
  1070. } else if (ret) {
  1071. /*
  1072. * Commit couldn't start. Just update stats and perform a
  1073. * full commit.
  1074. */
  1075. ext4_fc_update_stats(sb, EXT4_FC_STATUS_FAILED, 0, 0,
  1076. commit_tid);
  1077. return jbd2_complete_transaction(journal, commit_tid);
  1078. }
  1079. /*
  1080. * After establishing journal barrier via jbd2_fc_begin_commit(), check
  1081. * if we are fast commit ineligible.
  1082. */
  1083. if (ext4_test_mount_flag(sb, EXT4_MF_FC_INELIGIBLE)) {
  1084. status = EXT4_FC_STATUS_INELIGIBLE;
  1085. goto fallback;
  1086. }
  1087. fc_bufs_before = (sbi->s_fc_bytes + bsize - 1) / bsize;
  1088. ret = ext4_fc_perform_commit(journal);
  1089. if (ret < 0) {
  1090. status = EXT4_FC_STATUS_FAILED;
  1091. goto fallback;
  1092. }
  1093. nblks = (sbi->s_fc_bytes + bsize - 1) / bsize - fc_bufs_before;
  1094. ret = jbd2_fc_wait_bufs(journal, nblks);
  1095. if (ret < 0) {
  1096. status = EXT4_FC_STATUS_FAILED;
  1097. goto fallback;
  1098. }
  1099. atomic_inc(&sbi->s_fc_subtid);
  1100. ret = jbd2_fc_end_commit(journal);
  1101. /*
  1102. * weight the commit time higher than the average time so we
  1103. * don't react too strongly to vast changes in the commit time
  1104. */
  1105. commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
  1106. ext4_fc_update_stats(sb, status, commit_time, nblks, commit_tid);
  1107. return ret;
  1108. fallback:
  1109. ret = jbd2_fc_end_commit_fallback(journal);
  1110. ext4_fc_update_stats(sb, status, 0, 0, commit_tid);
  1111. return ret;
  1112. }
  1113. /*
  1114. * Fast commit cleanup routine. This is called after every fast commit and
  1115. * full commit. full is true if we are called after a full commit.
  1116. */
  1117. static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
  1118. {
  1119. struct super_block *sb = journal->j_private;
  1120. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1121. struct ext4_inode_info *iter, *iter_n;
  1122. struct ext4_fc_dentry_update *fc_dentry;
  1123. if (full && sbi->s_fc_bh)
  1124. sbi->s_fc_bh = NULL;
  1125. trace_ext4_fc_cleanup(journal, full, tid);
  1126. jbd2_fc_release_bufs(journal);
  1127. spin_lock(&sbi->s_fc_lock);
  1128. list_for_each_entry_safe(iter, iter_n, &sbi->s_fc_q[FC_Q_MAIN],
  1129. i_fc_list) {
  1130. list_del_init(&iter->i_fc_list);
  1131. ext4_clear_inode_state(&iter->vfs_inode,
  1132. EXT4_STATE_FC_COMMITTING);
  1133. if (iter->i_sync_tid <= tid)
  1134. ext4_fc_reset_inode(&iter->vfs_inode);
  1135. /* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
  1136. smp_mb();
  1137. #if (BITS_PER_LONG < 64)
  1138. wake_up_bit(&iter->i_state_flags, EXT4_STATE_FC_COMMITTING);
  1139. #else
  1140. wake_up_bit(&iter->i_flags, EXT4_STATE_FC_COMMITTING);
  1141. #endif
  1142. }
  1143. while (!list_empty(&sbi->s_fc_dentry_q[FC_Q_MAIN])) {
  1144. fc_dentry = list_first_entry(&sbi->s_fc_dentry_q[FC_Q_MAIN],
  1145. struct ext4_fc_dentry_update,
  1146. fcd_list);
  1147. list_del_init(&fc_dentry->fcd_list);
  1148. list_del_init(&fc_dentry->fcd_dilist);
  1149. spin_unlock(&sbi->s_fc_lock);
  1150. if (fc_dentry->fcd_name.name &&
  1151. fc_dentry->fcd_name.len > DNAME_INLINE_LEN)
  1152. kfree(fc_dentry->fcd_name.name);
  1153. kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry);
  1154. spin_lock(&sbi->s_fc_lock);
  1155. }
  1156. list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
  1157. &sbi->s_fc_dentry_q[FC_Q_MAIN]);
  1158. list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
  1159. &sbi->s_fc_q[FC_Q_MAIN]);
  1160. if (tid >= sbi->s_fc_ineligible_tid) {
  1161. sbi->s_fc_ineligible_tid = 0;
  1162. ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
  1163. }
  1164. if (full)
  1165. sbi->s_fc_bytes = 0;
  1166. spin_unlock(&sbi->s_fc_lock);
  1167. trace_ext4_fc_stats(sb);
  1168. }
  1169. /* Ext4 Replay Path Routines */
  1170. /* Helper struct for dentry replay routines */
  1171. struct dentry_info_args {
  1172. int parent_ino, dname_len, ino, inode_len;
  1173. char *dname;
  1174. };
  1175. /* Same as struct ext4_fc_tl, but uses native endianness fields */
  1176. struct ext4_fc_tl_mem {
  1177. u16 fc_tag;
  1178. u16 fc_len;
  1179. };
  1180. static inline void tl_to_darg(struct dentry_info_args *darg,
  1181. struct ext4_fc_tl_mem *tl, u8 *val)
  1182. {
  1183. struct ext4_fc_dentry_info fcd;
  1184. memcpy(&fcd, val, sizeof(fcd));
  1185. darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino);
  1186. darg->ino = le32_to_cpu(fcd.fc_ino);
  1187. darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname);
  1188. darg->dname_len = tl->fc_len - sizeof(struct ext4_fc_dentry_info);
  1189. }
  1190. static inline void ext4_fc_get_tl(struct ext4_fc_tl_mem *tl, u8 *val)
  1191. {
  1192. struct ext4_fc_tl tl_disk;
  1193. memcpy(&tl_disk, val, EXT4_FC_TAG_BASE_LEN);
  1194. tl->fc_len = le16_to_cpu(tl_disk.fc_len);
  1195. tl->fc_tag = le16_to_cpu(tl_disk.fc_tag);
  1196. }
  1197. /* Unlink replay function */
  1198. static int ext4_fc_replay_unlink(struct super_block *sb,
  1199. struct ext4_fc_tl_mem *tl, u8 *val)
  1200. {
  1201. struct inode *inode, *old_parent;
  1202. struct qstr entry;
  1203. struct dentry_info_args darg;
  1204. int ret = 0;
  1205. tl_to_darg(&darg, tl, val);
  1206. trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino,
  1207. darg.parent_ino, darg.dname_len);
  1208. entry.name = darg.dname;
  1209. entry.len = darg.dname_len;
  1210. inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
  1211. if (IS_ERR(inode)) {
  1212. ext4_debug("Inode %d not found", darg.ino);
  1213. return 0;
  1214. }
  1215. old_parent = ext4_iget(sb, darg.parent_ino,
  1216. EXT4_IGET_NORMAL);
  1217. if (IS_ERR(old_parent)) {
  1218. ext4_debug("Dir with inode %d not found", darg.parent_ino);
  1219. iput(inode);
  1220. return 0;
  1221. }
  1222. ret = __ext4_unlink(old_parent, &entry, inode, NULL);
  1223. /* -ENOENT ok coz it might not exist anymore. */
  1224. if (ret == -ENOENT)
  1225. ret = 0;
  1226. iput(old_parent);
  1227. iput(inode);
  1228. return ret;
  1229. }
  1230. static int ext4_fc_replay_link_internal(struct super_block *sb,
  1231. struct dentry_info_args *darg,
  1232. struct inode *inode)
  1233. {
  1234. struct inode *dir = NULL;
  1235. struct dentry *dentry_dir = NULL, *dentry_inode = NULL;
  1236. struct qstr qstr_dname = QSTR_INIT(darg->dname, darg->dname_len);
  1237. int ret = 0;
  1238. dir = ext4_iget(sb, darg->parent_ino, EXT4_IGET_NORMAL);
  1239. if (IS_ERR(dir)) {
  1240. ext4_debug("Dir with inode %d not found.", darg->parent_ino);
  1241. dir = NULL;
  1242. goto out;
  1243. }
  1244. dentry_dir = d_obtain_alias(dir);
  1245. if (IS_ERR(dentry_dir)) {
  1246. ext4_debug("Failed to obtain dentry");
  1247. dentry_dir = NULL;
  1248. goto out;
  1249. }
  1250. dentry_inode = d_alloc(dentry_dir, &qstr_dname);
  1251. if (!dentry_inode) {
  1252. ext4_debug("Inode dentry not created.");
  1253. ret = -ENOMEM;
  1254. goto out;
  1255. }
  1256. ret = __ext4_link(dir, inode, dentry_inode);
  1257. /*
  1258. * It's possible that link already existed since data blocks
  1259. * for the dir in question got persisted before we crashed OR
  1260. * we replayed this tag and crashed before the entire replay
  1261. * could complete.
  1262. */
  1263. if (ret && ret != -EEXIST) {
  1264. ext4_debug("Failed to link\n");
  1265. goto out;
  1266. }
  1267. ret = 0;
  1268. out:
  1269. if (dentry_dir) {
  1270. d_drop(dentry_dir);
  1271. dput(dentry_dir);
  1272. } else if (dir) {
  1273. iput(dir);
  1274. }
  1275. if (dentry_inode) {
  1276. d_drop(dentry_inode);
  1277. dput(dentry_inode);
  1278. }
  1279. return ret;
  1280. }
  1281. /* Link replay function */
  1282. static int ext4_fc_replay_link(struct super_block *sb,
  1283. struct ext4_fc_tl_mem *tl, u8 *val)
  1284. {
  1285. struct inode *inode;
  1286. struct dentry_info_args darg;
  1287. int ret = 0;
  1288. tl_to_darg(&darg, tl, val);
  1289. trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino,
  1290. darg.parent_ino, darg.dname_len);
  1291. inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
  1292. if (IS_ERR(inode)) {
  1293. ext4_debug("Inode not found.");
  1294. return 0;
  1295. }
  1296. ret = ext4_fc_replay_link_internal(sb, &darg, inode);
  1297. iput(inode);
  1298. return ret;
  1299. }
  1300. /*
  1301. * Record all the modified inodes during replay. We use this later to setup
  1302. * block bitmaps correctly.
  1303. */
  1304. static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
  1305. {
  1306. struct ext4_fc_replay_state *state;
  1307. int i;
  1308. state = &EXT4_SB(sb)->s_fc_replay_state;
  1309. for (i = 0; i < state->fc_modified_inodes_used; i++)
  1310. if (state->fc_modified_inodes[i] == ino)
  1311. return 0;
  1312. if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
  1313. int *fc_modified_inodes;
  1314. fc_modified_inodes = krealloc(state->fc_modified_inodes,
  1315. sizeof(int) * (state->fc_modified_inodes_size +
  1316. EXT4_FC_REPLAY_REALLOC_INCREMENT),
  1317. GFP_KERNEL);
  1318. if (!fc_modified_inodes)
  1319. return -ENOMEM;
  1320. state->fc_modified_inodes = fc_modified_inodes;
  1321. state->fc_modified_inodes_size +=
  1322. EXT4_FC_REPLAY_REALLOC_INCREMENT;
  1323. }
  1324. state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
  1325. return 0;
  1326. }
  1327. /*
  1328. * Inode replay function
  1329. */
  1330. static int ext4_fc_replay_inode(struct super_block *sb,
  1331. struct ext4_fc_tl_mem *tl, u8 *val)
  1332. {
  1333. struct ext4_fc_inode fc_inode;
  1334. struct ext4_inode *raw_inode;
  1335. struct ext4_inode *raw_fc_inode;
  1336. struct inode *inode = NULL;
  1337. struct ext4_iloc iloc;
  1338. int inode_len, ino, ret, tag = tl->fc_tag;
  1339. struct ext4_extent_header *eh;
  1340. size_t off_gen = offsetof(struct ext4_inode, i_generation);
  1341. memcpy(&fc_inode, val, sizeof(fc_inode));
  1342. ino = le32_to_cpu(fc_inode.fc_ino);
  1343. trace_ext4_fc_replay(sb, tag, ino, 0, 0);
  1344. inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
  1345. if (!IS_ERR(inode)) {
  1346. ext4_ext_clear_bb(inode);
  1347. iput(inode);
  1348. }
  1349. inode = NULL;
  1350. ret = ext4_fc_record_modified_inode(sb, ino);
  1351. if (ret)
  1352. goto out;
  1353. raw_fc_inode = (struct ext4_inode *)
  1354. (val + offsetof(struct ext4_fc_inode, fc_raw_inode));
  1355. ret = ext4_get_fc_inode_loc(sb, ino, &iloc);
  1356. if (ret)
  1357. goto out;
  1358. inode_len = tl->fc_len - sizeof(struct ext4_fc_inode);
  1359. raw_inode = ext4_raw_inode(&iloc);
  1360. memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block));
  1361. memcpy((u8 *)raw_inode + off_gen, (u8 *)raw_fc_inode + off_gen,
  1362. inode_len - off_gen);
  1363. if (le32_to_cpu(raw_inode->i_flags) & EXT4_EXTENTS_FL) {
  1364. eh = (struct ext4_extent_header *)(&raw_inode->i_block[0]);
  1365. if (eh->eh_magic != EXT4_EXT_MAGIC) {
  1366. memset(eh, 0, sizeof(*eh));
  1367. eh->eh_magic = EXT4_EXT_MAGIC;
  1368. eh->eh_max = cpu_to_le16(
  1369. (sizeof(raw_inode->i_block) -
  1370. sizeof(struct ext4_extent_header))
  1371. / sizeof(struct ext4_extent));
  1372. }
  1373. } else if (le32_to_cpu(raw_inode->i_flags) & EXT4_INLINE_DATA_FL) {
  1374. memcpy(raw_inode->i_block, raw_fc_inode->i_block,
  1375. sizeof(raw_inode->i_block));
  1376. }
  1377. /* Immediately update the inode on disk. */
  1378. ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh);
  1379. if (ret)
  1380. goto out;
  1381. ret = sync_dirty_buffer(iloc.bh);
  1382. if (ret)
  1383. goto out;
  1384. ret = ext4_mark_inode_used(sb, ino);
  1385. if (ret)
  1386. goto out;
  1387. /* Given that we just wrote the inode on disk, this SHOULD succeed. */
  1388. inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
  1389. if (IS_ERR(inode)) {
  1390. ext4_debug("Inode not found.");
  1391. return -EFSCORRUPTED;
  1392. }
  1393. /*
  1394. * Our allocator could have made different decisions than before
  1395. * crashing. This should be fixed but until then, we calculate
  1396. * the number of blocks the inode.
  1397. */
  1398. if (!ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
  1399. ext4_ext_replay_set_iblocks(inode);
  1400. inode->i_generation = le32_to_cpu(ext4_raw_inode(&iloc)->i_generation);
  1401. ext4_reset_inode_seed(inode);
  1402. ext4_inode_csum_set(inode, ext4_raw_inode(&iloc), EXT4_I(inode));
  1403. ret = ext4_handle_dirty_metadata(NULL, NULL, iloc.bh);
  1404. sync_dirty_buffer(iloc.bh);
  1405. brelse(iloc.bh);
  1406. out:
  1407. iput(inode);
  1408. if (!ret)
  1409. blkdev_issue_flush(sb->s_bdev);
  1410. return 0;
  1411. }
  1412. /*
  1413. * Dentry create replay function.
  1414. *
  1415. * EXT4_FC_TAG_CREAT is preceded by EXT4_FC_TAG_INODE_FULL. Which means, the
  1416. * inode for which we are trying to create a dentry here, should already have
  1417. * been replayed before we start here.
  1418. */
  1419. static int ext4_fc_replay_create(struct super_block *sb,
  1420. struct ext4_fc_tl_mem *tl, u8 *val)
  1421. {
  1422. int ret = 0;
  1423. struct inode *inode = NULL;
  1424. struct inode *dir = NULL;
  1425. struct dentry_info_args darg;
  1426. tl_to_darg(&darg, tl, val);
  1427. trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino,
  1428. darg.parent_ino, darg.dname_len);
  1429. /* This takes care of update group descriptor and other metadata */
  1430. ret = ext4_mark_inode_used(sb, darg.ino);
  1431. if (ret)
  1432. goto out;
  1433. inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
  1434. if (IS_ERR(inode)) {
  1435. ext4_debug("inode %d not found.", darg.ino);
  1436. inode = NULL;
  1437. ret = -EINVAL;
  1438. goto out;
  1439. }
  1440. if (S_ISDIR(inode->i_mode)) {
  1441. /*
  1442. * If we are creating a directory, we need to make sure that the
  1443. * dot and dot dot dirents are setup properly.
  1444. */
  1445. dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
  1446. if (IS_ERR(dir)) {
  1447. ext4_debug("Dir %d not found.", darg.ino);
  1448. goto out;
  1449. }
  1450. ret = ext4_init_new_dir(NULL, dir, inode);
  1451. iput(dir);
  1452. if (ret) {
  1453. ret = 0;
  1454. goto out;
  1455. }
  1456. }
  1457. ret = ext4_fc_replay_link_internal(sb, &darg, inode);
  1458. if (ret)
  1459. goto out;
  1460. set_nlink(inode, 1);
  1461. ext4_mark_inode_dirty(NULL, inode);
  1462. out:
  1463. iput(inode);
  1464. return ret;
  1465. }
  1466. /*
  1467. * Record physical disk regions which are in use as per fast commit area,
  1468. * and used by inodes during replay phase. Our simple replay phase
  1469. * allocator excludes these regions from allocation.
  1470. */
  1471. int ext4_fc_record_regions(struct super_block *sb, int ino,
  1472. ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay)
  1473. {
  1474. struct ext4_fc_replay_state *state;
  1475. struct ext4_fc_alloc_region *region;
  1476. state = &EXT4_SB(sb)->s_fc_replay_state;
  1477. /*
  1478. * during replay phase, the fc_regions_valid may not same as
  1479. * fc_regions_used, update it when do new additions.
  1480. */
  1481. if (replay && state->fc_regions_used != state->fc_regions_valid)
  1482. state->fc_regions_used = state->fc_regions_valid;
  1483. if (state->fc_regions_used == state->fc_regions_size) {
  1484. struct ext4_fc_alloc_region *fc_regions;
  1485. fc_regions = krealloc(state->fc_regions,
  1486. sizeof(struct ext4_fc_alloc_region) *
  1487. (state->fc_regions_size +
  1488. EXT4_FC_REPLAY_REALLOC_INCREMENT),
  1489. GFP_KERNEL);
  1490. if (!fc_regions)
  1491. return -ENOMEM;
  1492. state->fc_regions_size +=
  1493. EXT4_FC_REPLAY_REALLOC_INCREMENT;
  1494. state->fc_regions = fc_regions;
  1495. }
  1496. region = &state->fc_regions[state->fc_regions_used++];
  1497. region->ino = ino;
  1498. region->lblk = lblk;
  1499. region->pblk = pblk;
  1500. region->len = len;
  1501. if (replay)
  1502. state->fc_regions_valid++;
  1503. return 0;
  1504. }
  1505. /* Replay add range tag */
  1506. static int ext4_fc_replay_add_range(struct super_block *sb,
  1507. struct ext4_fc_tl_mem *tl, u8 *val)
  1508. {
  1509. struct ext4_fc_add_range fc_add_ex;
  1510. struct ext4_extent newex, *ex;
  1511. struct inode *inode;
  1512. ext4_lblk_t start, cur;
  1513. int remaining, len;
  1514. ext4_fsblk_t start_pblk;
  1515. struct ext4_map_blocks map;
  1516. struct ext4_ext_path *path = NULL;
  1517. int ret;
  1518. memcpy(&fc_add_ex, val, sizeof(fc_add_ex));
  1519. ex = (struct ext4_extent *)&fc_add_ex.fc_ex;
  1520. trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE,
  1521. le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block),
  1522. ext4_ext_get_actual_len(ex));
  1523. inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL);
  1524. if (IS_ERR(inode)) {
  1525. ext4_debug("Inode not found.");
  1526. return 0;
  1527. }
  1528. ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
  1529. if (ret)
  1530. goto out;
  1531. start = le32_to_cpu(ex->ee_block);
  1532. start_pblk = ext4_ext_pblock(ex);
  1533. len = ext4_ext_get_actual_len(ex);
  1534. cur = start;
  1535. remaining = len;
  1536. ext4_debug("ADD_RANGE, lblk %d, pblk %lld, len %d, unwritten %d, inode %ld\n",
  1537. start, start_pblk, len, ext4_ext_is_unwritten(ex),
  1538. inode->i_ino);
  1539. while (remaining > 0) {
  1540. map.m_lblk = cur;
  1541. map.m_len = remaining;
  1542. map.m_pblk = 0;
  1543. ret = ext4_map_blocks(NULL, inode, &map, 0);
  1544. if (ret < 0)
  1545. goto out;
  1546. if (ret == 0) {
  1547. /* Range is not mapped */
  1548. path = ext4_find_extent(inode, cur, NULL, 0);
  1549. if (IS_ERR(path))
  1550. goto out;
  1551. memset(&newex, 0, sizeof(newex));
  1552. newex.ee_block = cpu_to_le32(cur);
  1553. ext4_ext_store_pblock(
  1554. &newex, start_pblk + cur - start);
  1555. newex.ee_len = cpu_to_le16(map.m_len);
  1556. if (ext4_ext_is_unwritten(ex))
  1557. ext4_ext_mark_unwritten(&newex);
  1558. down_write(&EXT4_I(inode)->i_data_sem);
  1559. ret = ext4_ext_insert_extent(
  1560. NULL, inode, &path, &newex, 0);
  1561. up_write((&EXT4_I(inode)->i_data_sem));
  1562. ext4_free_ext_path(path);
  1563. if (ret)
  1564. goto out;
  1565. goto next;
  1566. }
  1567. if (start_pblk + cur - start != map.m_pblk) {
  1568. /*
  1569. * Logical to physical mapping changed. This can happen
  1570. * if this range was removed and then reallocated to
  1571. * map to new physical blocks during a fast commit.
  1572. */
  1573. ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
  1574. ext4_ext_is_unwritten(ex),
  1575. start_pblk + cur - start);
  1576. if (ret)
  1577. goto out;
  1578. /*
  1579. * Mark the old blocks as free since they aren't used
  1580. * anymore. We maintain an array of all the modified
  1581. * inodes. In case these blocks are still used at either
  1582. * a different logical range in the same inode or in
  1583. * some different inode, we will mark them as allocated
  1584. * at the end of the FC replay using our array of
  1585. * modified inodes.
  1586. */
  1587. ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
  1588. goto next;
  1589. }
  1590. /* Range is mapped and needs a state change */
  1591. ext4_debug("Converting from %ld to %d %lld",
  1592. map.m_flags & EXT4_MAP_UNWRITTEN,
  1593. ext4_ext_is_unwritten(ex), map.m_pblk);
  1594. ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
  1595. ext4_ext_is_unwritten(ex), map.m_pblk);
  1596. if (ret)
  1597. goto out;
  1598. /*
  1599. * We may have split the extent tree while toggling the state.
  1600. * Try to shrink the extent tree now.
  1601. */
  1602. ext4_ext_replay_shrink_inode(inode, start + len);
  1603. next:
  1604. cur += map.m_len;
  1605. remaining -= map.m_len;
  1606. }
  1607. ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
  1608. sb->s_blocksize_bits);
  1609. out:
  1610. iput(inode);
  1611. return 0;
  1612. }
  1613. /* Replay DEL_RANGE tag */
  1614. static int
  1615. ext4_fc_replay_del_range(struct super_block *sb,
  1616. struct ext4_fc_tl_mem *tl, u8 *val)
  1617. {
  1618. struct inode *inode;
  1619. struct ext4_fc_del_range lrange;
  1620. struct ext4_map_blocks map;
  1621. ext4_lblk_t cur, remaining;
  1622. int ret;
  1623. memcpy(&lrange, val, sizeof(lrange));
  1624. cur = le32_to_cpu(lrange.fc_lblk);
  1625. remaining = le32_to_cpu(lrange.fc_len);
  1626. trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE,
  1627. le32_to_cpu(lrange.fc_ino), cur, remaining);
  1628. inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL);
  1629. if (IS_ERR(inode)) {
  1630. ext4_debug("Inode %d not found", le32_to_cpu(lrange.fc_ino));
  1631. return 0;
  1632. }
  1633. ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
  1634. if (ret)
  1635. goto out;
  1636. ext4_debug("DEL_RANGE, inode %ld, lblk %d, len %d\n",
  1637. inode->i_ino, le32_to_cpu(lrange.fc_lblk),
  1638. le32_to_cpu(lrange.fc_len));
  1639. while (remaining > 0) {
  1640. map.m_lblk = cur;
  1641. map.m_len = remaining;
  1642. ret = ext4_map_blocks(NULL, inode, &map, 0);
  1643. if (ret < 0)
  1644. goto out;
  1645. if (ret > 0) {
  1646. remaining -= ret;
  1647. cur += ret;
  1648. ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
  1649. } else {
  1650. remaining -= map.m_len;
  1651. cur += map.m_len;
  1652. }
  1653. }
  1654. down_write(&EXT4_I(inode)->i_data_sem);
  1655. ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk),
  1656. le32_to_cpu(lrange.fc_lblk) +
  1657. le32_to_cpu(lrange.fc_len) - 1);
  1658. up_write(&EXT4_I(inode)->i_data_sem);
  1659. if (ret)
  1660. goto out;
  1661. ext4_ext_replay_shrink_inode(inode,
  1662. i_size_read(inode) >> sb->s_blocksize_bits);
  1663. ext4_mark_inode_dirty(NULL, inode);
  1664. out:
  1665. iput(inode);
  1666. return 0;
  1667. }
  1668. static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
  1669. {
  1670. struct ext4_fc_replay_state *state;
  1671. struct inode *inode;
  1672. struct ext4_ext_path *path = NULL;
  1673. struct ext4_map_blocks map;
  1674. int i, ret, j;
  1675. ext4_lblk_t cur, end;
  1676. state = &EXT4_SB(sb)->s_fc_replay_state;
  1677. for (i = 0; i < state->fc_modified_inodes_used; i++) {
  1678. inode = ext4_iget(sb, state->fc_modified_inodes[i],
  1679. EXT4_IGET_NORMAL);
  1680. if (IS_ERR(inode)) {
  1681. ext4_debug("Inode %d not found.",
  1682. state->fc_modified_inodes[i]);
  1683. continue;
  1684. }
  1685. cur = 0;
  1686. end = EXT_MAX_BLOCKS;
  1687. if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) {
  1688. iput(inode);
  1689. continue;
  1690. }
  1691. while (cur < end) {
  1692. map.m_lblk = cur;
  1693. map.m_len = end - cur;
  1694. ret = ext4_map_blocks(NULL, inode, &map, 0);
  1695. if (ret < 0)
  1696. break;
  1697. if (ret > 0) {
  1698. path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
  1699. if (!IS_ERR(path)) {
  1700. for (j = 0; j < path->p_depth; j++)
  1701. ext4_mb_mark_bb(inode->i_sb,
  1702. path[j].p_block, 1, 1);
  1703. ext4_free_ext_path(path);
  1704. }
  1705. cur += ret;
  1706. ext4_mb_mark_bb(inode->i_sb, map.m_pblk,
  1707. map.m_len, 1);
  1708. } else {
  1709. cur = cur + (map.m_len ? map.m_len : 1);
  1710. }
  1711. }
  1712. iput(inode);
  1713. }
  1714. }
  1715. /*
  1716. * Check if block is in excluded regions for block allocation. The simple
  1717. * allocator that runs during replay phase is calls this function to see
  1718. * if it is okay to use a block.
  1719. */
  1720. bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t blk)
  1721. {
  1722. int i;
  1723. struct ext4_fc_replay_state *state;
  1724. state = &EXT4_SB(sb)->s_fc_replay_state;
  1725. for (i = 0; i < state->fc_regions_valid; i++) {
  1726. if (state->fc_regions[i].ino == 0 ||
  1727. state->fc_regions[i].len == 0)
  1728. continue;
  1729. if (in_range(blk, state->fc_regions[i].pblk,
  1730. state->fc_regions[i].len))
  1731. return true;
  1732. }
  1733. return false;
  1734. }
  1735. /* Cleanup function called after replay */
  1736. void ext4_fc_replay_cleanup(struct super_block *sb)
  1737. {
  1738. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1739. sbi->s_mount_state &= ~EXT4_FC_REPLAY;
  1740. kfree(sbi->s_fc_replay_state.fc_regions);
  1741. kfree(sbi->s_fc_replay_state.fc_modified_inodes);
  1742. }
  1743. static bool ext4_fc_value_len_isvalid(struct ext4_sb_info *sbi,
  1744. int tag, int len)
  1745. {
  1746. switch (tag) {
  1747. case EXT4_FC_TAG_ADD_RANGE:
  1748. return len == sizeof(struct ext4_fc_add_range);
  1749. case EXT4_FC_TAG_DEL_RANGE:
  1750. return len == sizeof(struct ext4_fc_del_range);
  1751. case EXT4_FC_TAG_CREAT:
  1752. case EXT4_FC_TAG_LINK:
  1753. case EXT4_FC_TAG_UNLINK:
  1754. len -= sizeof(struct ext4_fc_dentry_info);
  1755. return len >= 1 && len <= EXT4_NAME_LEN;
  1756. case EXT4_FC_TAG_INODE:
  1757. len -= sizeof(struct ext4_fc_inode);
  1758. return len >= EXT4_GOOD_OLD_INODE_SIZE &&
  1759. len <= sbi->s_inode_size;
  1760. case EXT4_FC_TAG_PAD:
  1761. return true; /* padding can have any length */
  1762. case EXT4_FC_TAG_TAIL:
  1763. return len >= sizeof(struct ext4_fc_tail);
  1764. case EXT4_FC_TAG_HEAD:
  1765. return len == sizeof(struct ext4_fc_head);
  1766. }
  1767. return false;
  1768. }
  1769. /*
  1770. * Recovery Scan phase handler
  1771. *
  1772. * This function is called during the scan phase and is responsible
  1773. * for doing following things:
  1774. * - Make sure the fast commit area has valid tags for replay
  1775. * - Count number of tags that need to be replayed by the replay handler
  1776. * - Verify CRC
  1777. * - Create a list of excluded blocks for allocation during replay phase
  1778. *
  1779. * This function returns JBD2_FC_REPLAY_CONTINUE to indicate that SCAN is
  1780. * incomplete and JBD2 should send more blocks. It returns JBD2_FC_REPLAY_STOP
  1781. * to indicate that scan has finished and JBD2 can now start replay phase.
  1782. * It returns a negative error to indicate that there was an error. At the end
  1783. * of a successful scan phase, sbi->s_fc_replay_state.fc_replay_num_tags is set
  1784. * to indicate the number of tags that need to replayed during the replay phase.
  1785. */
  1786. static int ext4_fc_replay_scan(journal_t *journal,
  1787. struct buffer_head *bh, int off,
  1788. tid_t expected_tid)
  1789. {
  1790. struct super_block *sb = journal->j_private;
  1791. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1792. struct ext4_fc_replay_state *state;
  1793. int ret = JBD2_FC_REPLAY_CONTINUE;
  1794. struct ext4_fc_add_range ext;
  1795. struct ext4_fc_tl_mem tl;
  1796. struct ext4_fc_tail tail;
  1797. __u8 *start, *end, *cur, *val;
  1798. struct ext4_fc_head head;
  1799. struct ext4_extent *ex;
  1800. state = &sbi->s_fc_replay_state;
  1801. start = (u8 *)bh->b_data;
  1802. end = start + journal->j_blocksize;
  1803. if (state->fc_replay_expected_off == 0) {
  1804. state->fc_cur_tag = 0;
  1805. state->fc_replay_num_tags = 0;
  1806. state->fc_crc = 0;
  1807. state->fc_regions = NULL;
  1808. state->fc_regions_valid = state->fc_regions_used =
  1809. state->fc_regions_size = 0;
  1810. /* Check if we can stop early */
  1811. if (le16_to_cpu(((struct ext4_fc_tl *)start)->fc_tag)
  1812. != EXT4_FC_TAG_HEAD)
  1813. return 0;
  1814. }
  1815. if (off != state->fc_replay_expected_off) {
  1816. ret = -EFSCORRUPTED;
  1817. goto out_err;
  1818. }
  1819. state->fc_replay_expected_off++;
  1820. for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
  1821. cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
  1822. ext4_fc_get_tl(&tl, cur);
  1823. val = cur + EXT4_FC_TAG_BASE_LEN;
  1824. if (tl.fc_len > end - val ||
  1825. !ext4_fc_value_len_isvalid(sbi, tl.fc_tag, tl.fc_len)) {
  1826. ret = state->fc_replay_num_tags ?
  1827. JBD2_FC_REPLAY_STOP : -ECANCELED;
  1828. goto out_err;
  1829. }
  1830. ext4_debug("Scan phase, tag:%s, blk %lld\n",
  1831. tag2str(tl.fc_tag), bh->b_blocknr);
  1832. switch (tl.fc_tag) {
  1833. case EXT4_FC_TAG_ADD_RANGE:
  1834. memcpy(&ext, val, sizeof(ext));
  1835. ex = (struct ext4_extent *)&ext.fc_ex;
  1836. ret = ext4_fc_record_regions(sb,
  1837. le32_to_cpu(ext.fc_ino),
  1838. le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
  1839. ext4_ext_get_actual_len(ex), 0);
  1840. if (ret < 0)
  1841. break;
  1842. ret = JBD2_FC_REPLAY_CONTINUE;
  1843. fallthrough;
  1844. case EXT4_FC_TAG_DEL_RANGE:
  1845. case EXT4_FC_TAG_LINK:
  1846. case EXT4_FC_TAG_UNLINK:
  1847. case EXT4_FC_TAG_CREAT:
  1848. case EXT4_FC_TAG_INODE:
  1849. case EXT4_FC_TAG_PAD:
  1850. state->fc_cur_tag++;
  1851. state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
  1852. EXT4_FC_TAG_BASE_LEN + tl.fc_len);
  1853. break;
  1854. case EXT4_FC_TAG_TAIL:
  1855. state->fc_cur_tag++;
  1856. memcpy(&tail, val, sizeof(tail));
  1857. state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
  1858. EXT4_FC_TAG_BASE_LEN +
  1859. offsetof(struct ext4_fc_tail,
  1860. fc_crc));
  1861. if (le32_to_cpu(tail.fc_tid) == expected_tid &&
  1862. le32_to_cpu(tail.fc_crc) == state->fc_crc) {
  1863. state->fc_replay_num_tags = state->fc_cur_tag;
  1864. state->fc_regions_valid =
  1865. state->fc_regions_used;
  1866. } else {
  1867. ret = state->fc_replay_num_tags ?
  1868. JBD2_FC_REPLAY_STOP : -EFSBADCRC;
  1869. }
  1870. state->fc_crc = 0;
  1871. break;
  1872. case EXT4_FC_TAG_HEAD:
  1873. memcpy(&head, val, sizeof(head));
  1874. if (le32_to_cpu(head.fc_features) &
  1875. ~EXT4_FC_SUPPORTED_FEATURES) {
  1876. ret = -EOPNOTSUPP;
  1877. break;
  1878. }
  1879. if (le32_to_cpu(head.fc_tid) != expected_tid) {
  1880. ret = JBD2_FC_REPLAY_STOP;
  1881. break;
  1882. }
  1883. state->fc_cur_tag++;
  1884. state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur,
  1885. EXT4_FC_TAG_BASE_LEN + tl.fc_len);
  1886. break;
  1887. default:
  1888. ret = state->fc_replay_num_tags ?
  1889. JBD2_FC_REPLAY_STOP : -ECANCELED;
  1890. }
  1891. if (ret < 0 || ret == JBD2_FC_REPLAY_STOP)
  1892. break;
  1893. }
  1894. out_err:
  1895. trace_ext4_fc_replay_scan(sb, ret, off);
  1896. return ret;
  1897. }
  1898. /*
  1899. * Main recovery path entry point.
  1900. * The meaning of return codes is similar as above.
  1901. */
  1902. static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
  1903. enum passtype pass, int off, tid_t expected_tid)
  1904. {
  1905. struct super_block *sb = journal->j_private;
  1906. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1907. struct ext4_fc_tl_mem tl;
  1908. __u8 *start, *end, *cur, *val;
  1909. int ret = JBD2_FC_REPLAY_CONTINUE;
  1910. struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
  1911. struct ext4_fc_tail tail;
  1912. if (pass == PASS_SCAN) {
  1913. state->fc_current_pass = PASS_SCAN;
  1914. return ext4_fc_replay_scan(journal, bh, off, expected_tid);
  1915. }
  1916. if (state->fc_current_pass != pass) {
  1917. state->fc_current_pass = pass;
  1918. sbi->s_mount_state |= EXT4_FC_REPLAY;
  1919. }
  1920. if (!sbi->s_fc_replay_state.fc_replay_num_tags) {
  1921. ext4_debug("Replay stops\n");
  1922. ext4_fc_set_bitmaps_and_counters(sb);
  1923. return 0;
  1924. }
  1925. #ifdef CONFIG_EXT4_DEBUG
  1926. if (sbi->s_fc_debug_max_replay && off >= sbi->s_fc_debug_max_replay) {
  1927. pr_warn("Dropping fc block %d because max_replay set\n", off);
  1928. return JBD2_FC_REPLAY_STOP;
  1929. }
  1930. #endif
  1931. start = (u8 *)bh->b_data;
  1932. end = start + journal->j_blocksize;
  1933. for (cur = start; cur <= end - EXT4_FC_TAG_BASE_LEN;
  1934. cur = cur + EXT4_FC_TAG_BASE_LEN + tl.fc_len) {
  1935. ext4_fc_get_tl(&tl, cur);
  1936. val = cur + EXT4_FC_TAG_BASE_LEN;
  1937. if (state->fc_replay_num_tags == 0) {
  1938. ret = JBD2_FC_REPLAY_STOP;
  1939. ext4_fc_set_bitmaps_and_counters(sb);
  1940. break;
  1941. }
  1942. ext4_debug("Replay phase, tag:%s\n", tag2str(tl.fc_tag));
  1943. state->fc_replay_num_tags--;
  1944. switch (tl.fc_tag) {
  1945. case EXT4_FC_TAG_LINK:
  1946. ret = ext4_fc_replay_link(sb, &tl, val);
  1947. break;
  1948. case EXT4_FC_TAG_UNLINK:
  1949. ret = ext4_fc_replay_unlink(sb, &tl, val);
  1950. break;
  1951. case EXT4_FC_TAG_ADD_RANGE:
  1952. ret = ext4_fc_replay_add_range(sb, &tl, val);
  1953. break;
  1954. case EXT4_FC_TAG_CREAT:
  1955. ret = ext4_fc_replay_create(sb, &tl, val);
  1956. break;
  1957. case EXT4_FC_TAG_DEL_RANGE:
  1958. ret = ext4_fc_replay_del_range(sb, &tl, val);
  1959. break;
  1960. case EXT4_FC_TAG_INODE:
  1961. ret = ext4_fc_replay_inode(sb, &tl, val);
  1962. break;
  1963. case EXT4_FC_TAG_PAD:
  1964. trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0,
  1965. tl.fc_len, 0);
  1966. break;
  1967. case EXT4_FC_TAG_TAIL:
  1968. trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL,
  1969. 0, tl.fc_len, 0);
  1970. memcpy(&tail, val, sizeof(tail));
  1971. WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid);
  1972. break;
  1973. case EXT4_FC_TAG_HEAD:
  1974. break;
  1975. default:
  1976. trace_ext4_fc_replay(sb, tl.fc_tag, 0, tl.fc_len, 0);
  1977. ret = -ECANCELED;
  1978. break;
  1979. }
  1980. if (ret < 0)
  1981. break;
  1982. ret = JBD2_FC_REPLAY_CONTINUE;
  1983. }
  1984. return ret;
  1985. }
  1986. void ext4_fc_init(struct super_block *sb, journal_t *journal)
  1987. {
  1988. /*
  1989. * We set replay callback even if fast commit disabled because we may
  1990. * could still have fast commit blocks that need to be replayed even if
  1991. * fast commit has now been turned off.
  1992. */
  1993. journal->j_fc_replay_callback = ext4_fc_replay;
  1994. if (!test_opt2(sb, JOURNAL_FAST_COMMIT))
  1995. return;
  1996. journal->j_fc_cleanup_callback = ext4_fc_cleanup;
  1997. }
  1998. static const char * const fc_ineligible_reasons[] = {
  1999. [EXT4_FC_REASON_XATTR] = "Extended attributes changed",
  2000. [EXT4_FC_REASON_CROSS_RENAME] = "Cross rename",
  2001. [EXT4_FC_REASON_JOURNAL_FLAG_CHANGE] = "Journal flag changed",
  2002. [EXT4_FC_REASON_NOMEM] = "Insufficient memory",
  2003. [EXT4_FC_REASON_SWAP_BOOT] = "Swap boot",
  2004. [EXT4_FC_REASON_RESIZE] = "Resize",
  2005. [EXT4_FC_REASON_RENAME_DIR] = "Dir renamed",
  2006. [EXT4_FC_REASON_FALLOC_RANGE] = "Falloc range op",
  2007. [EXT4_FC_REASON_INODE_JOURNAL_DATA] = "Data journalling",
  2008. [EXT4_FC_REASON_ENCRYPTED_FILENAME] = "Encrypted filename",
  2009. };
  2010. int ext4_fc_info_show(struct seq_file *seq, void *v)
  2011. {
  2012. struct ext4_sb_info *sbi = EXT4_SB((struct super_block *)seq->private);
  2013. struct ext4_fc_stats *stats = &sbi->s_fc_stats;
  2014. int i;
  2015. if (v != SEQ_START_TOKEN)
  2016. return 0;
  2017. seq_printf(seq,
  2018. "fc stats:\n%ld commits\n%ld ineligible\n%ld numblks\n%lluus avg_commit_time\n",
  2019. stats->fc_num_commits, stats->fc_ineligible_commits,
  2020. stats->fc_numblks,
  2021. div_u64(stats->s_fc_avg_commit_time, 1000));
  2022. seq_puts(seq, "Ineligible reasons:\n");
  2023. for (i = 0; i < EXT4_FC_REASON_MAX; i++)
  2024. seq_printf(seq, "\"%s\":\t%d\n", fc_ineligible_reasons[i],
  2025. stats->fc_ineligible_reason_count[i]);
  2026. return 0;
  2027. }
  2028. int __init ext4_fc_init_dentry_cache(void)
  2029. {
  2030. ext4_fc_dentry_cachep = KMEM_CACHE(ext4_fc_dentry_update,
  2031. SLAB_RECLAIM_ACCOUNT);
  2032. if (ext4_fc_dentry_cachep == NULL)
  2033. return -ENOMEM;
  2034. return 0;
  2035. }
  2036. void ext4_fc_destroy_dentry_cache(void)
  2037. {
  2038. kmem_cache_destroy(ext4_fc_dentry_cachep);
  2039. }