resize.c 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/ext4/resize.c
  4. *
  5. * Support for resizing an ext4 filesystem while it is mounted.
  6. *
  7. * Copyright (C) 2001, 2002 Andreas Dilger <[email protected]>
  8. *
  9. * This could probably be made into a module, because it is not often in use.
  10. */
  11. #define EXT4FS_DEBUG
  12. #include <linux/errno.h>
  13. #include <linux/slab.h>
  14. #include "ext4_jbd2.h"
  15. struct ext4_rcu_ptr {
  16. struct rcu_head rcu;
  17. void *ptr;
  18. };
  19. static void ext4_rcu_ptr_callback(struct rcu_head *head)
  20. {
  21. struct ext4_rcu_ptr *ptr;
  22. ptr = container_of(head, struct ext4_rcu_ptr, rcu);
  23. kvfree(ptr->ptr);
  24. kfree(ptr);
  25. }
  26. void ext4_kvfree_array_rcu(void *to_free)
  27. {
  28. struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
  29. if (ptr) {
  30. ptr->ptr = to_free;
  31. call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
  32. return;
  33. }
  34. synchronize_rcu();
  35. kvfree(to_free);
  36. }
  37. int ext4_resize_begin(struct super_block *sb)
  38. {
  39. struct ext4_sb_info *sbi = EXT4_SB(sb);
  40. int ret = 0;
  41. if (!capable(CAP_SYS_RESOURCE))
  42. return -EPERM;
  43. /*
  44. * If the reserved GDT blocks is non-zero, the resize_inode feature
  45. * should always be set.
  46. */
  47. if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
  48. !ext4_has_feature_resize_inode(sb)) {
  49. ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
  50. return -EFSCORRUPTED;
  51. }
  52. /*
  53. * If we are not using the primary superblock/GDT copy don't resize,
  54. * because the user tools have no way of handling this. Probably a
  55. * bad time to do it anyways.
  56. */
  57. if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
  58. le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
  59. ext4_warning(sb, "won't resize using backup superblock at %llu",
  60. (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
  61. return -EPERM;
  62. }
  63. /*
  64. * We are not allowed to do online-resizing on a filesystem mounted
  65. * with error, because it can destroy the filesystem easily.
  66. */
  67. if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
  68. ext4_warning(sb, "There are errors in the filesystem, "
  69. "so online resizing is not allowed");
  70. return -EPERM;
  71. }
  72. if (ext4_has_feature_sparse_super2(sb)) {
  73. ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
  74. return -EOPNOTSUPP;
  75. }
  76. if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
  77. &EXT4_SB(sb)->s_ext4_flags))
  78. ret = -EBUSY;
  79. return ret;
  80. }
  81. void ext4_resize_end(struct super_block *sb)
  82. {
  83. clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
  84. smp_mb__after_atomic();
  85. }
  86. static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
  87. ext4_group_t group) {
  88. return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
  89. EXT4_DESC_PER_BLOCK_BITS(sb);
  90. }
  91. static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
  92. ext4_group_t group) {
  93. group = ext4_meta_bg_first_group(sb, group);
  94. return ext4_group_first_block_no(sb, group);
  95. }
  96. static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
  97. ext4_group_t group) {
  98. ext4_grpblk_t overhead;
  99. overhead = ext4_bg_num_gdb(sb, group);
  100. if (ext4_bg_has_super(sb, group))
  101. overhead += 1 +
  102. le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
  103. return overhead;
  104. }
  105. #define outside(b, first, last) ((b) < (first) || (b) >= (last))
  106. #define inside(b, first, last) ((b) >= (first) && (b) < (last))
  107. static int verify_group_input(struct super_block *sb,
  108. struct ext4_new_group_data *input)
  109. {
  110. struct ext4_sb_info *sbi = EXT4_SB(sb);
  111. struct ext4_super_block *es = sbi->s_es;
  112. ext4_fsblk_t start = ext4_blocks_count(es);
  113. ext4_fsblk_t end = start + input->blocks_count;
  114. ext4_group_t group = input->group;
  115. ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
  116. unsigned overhead;
  117. ext4_fsblk_t metaend;
  118. struct buffer_head *bh = NULL;
  119. ext4_grpblk_t free_blocks_count, offset;
  120. int err = -EINVAL;
  121. if (group != sbi->s_groups_count) {
  122. ext4_warning(sb, "Cannot add at group %u (only %u groups)",
  123. input->group, sbi->s_groups_count);
  124. return -EINVAL;
  125. }
  126. overhead = ext4_group_overhead_blocks(sb, group);
  127. metaend = start + overhead;
  128. input->free_clusters_count = free_blocks_count =
  129. input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
  130. if (test_opt(sb, DEBUG))
  131. printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
  132. "(%d free, %u reserved)\n",
  133. ext4_bg_has_super(sb, input->group) ? "normal" :
  134. "no-super", input->group, input->blocks_count,
  135. free_blocks_count, input->reserved_blocks);
  136. ext4_get_group_no_and_offset(sb, start, NULL, &offset);
  137. if (offset != 0)
  138. ext4_warning(sb, "Last group not full");
  139. else if (input->reserved_blocks > input->blocks_count / 5)
  140. ext4_warning(sb, "Reserved blocks too high (%u)",
  141. input->reserved_blocks);
  142. else if (free_blocks_count < 0)
  143. ext4_warning(sb, "Bad blocks count %u",
  144. input->blocks_count);
  145. else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
  146. err = PTR_ERR(bh);
  147. bh = NULL;
  148. ext4_warning(sb, "Cannot read last block (%llu)",
  149. end - 1);
  150. } else if (outside(input->block_bitmap, start, end))
  151. ext4_warning(sb, "Block bitmap not in group (block %llu)",
  152. (unsigned long long)input->block_bitmap);
  153. else if (outside(input->inode_bitmap, start, end))
  154. ext4_warning(sb, "Inode bitmap not in group (block %llu)",
  155. (unsigned long long)input->inode_bitmap);
  156. else if (outside(input->inode_table, start, end) ||
  157. outside(itend - 1, start, end))
  158. ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
  159. (unsigned long long)input->inode_table, itend - 1);
  160. else if (input->inode_bitmap == input->block_bitmap)
  161. ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
  162. (unsigned long long)input->block_bitmap);
  163. else if (inside(input->block_bitmap, input->inode_table, itend))
  164. ext4_warning(sb, "Block bitmap (%llu) in inode table "
  165. "(%llu-%llu)",
  166. (unsigned long long)input->block_bitmap,
  167. (unsigned long long)input->inode_table, itend - 1);
  168. else if (inside(input->inode_bitmap, input->inode_table, itend))
  169. ext4_warning(sb, "Inode bitmap (%llu) in inode table "
  170. "(%llu-%llu)",
  171. (unsigned long long)input->inode_bitmap,
  172. (unsigned long long)input->inode_table, itend - 1);
  173. else if (inside(input->block_bitmap, start, metaend))
  174. ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
  175. (unsigned long long)input->block_bitmap,
  176. start, metaend - 1);
  177. else if (inside(input->inode_bitmap, start, metaend))
  178. ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
  179. (unsigned long long)input->inode_bitmap,
  180. start, metaend - 1);
  181. else if (inside(input->inode_table, start, metaend) ||
  182. inside(itend - 1, start, metaend))
  183. ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
  184. "(%llu-%llu)",
  185. (unsigned long long)input->inode_table,
  186. itend - 1, start, metaend - 1);
  187. else
  188. err = 0;
  189. brelse(bh);
  190. return err;
  191. }
  192. /*
  193. * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
  194. * group each time.
  195. */
  196. struct ext4_new_flex_group_data {
  197. struct ext4_new_group_data *groups; /* new_group_data for groups
  198. in the flex group */
  199. __u16 *bg_flags; /* block group flags of groups
  200. in @groups */
  201. ext4_group_t count; /* number of groups in @groups
  202. */
  203. };
  204. /*
  205. * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
  206. * @flexbg_size.
  207. *
  208. * Returns NULL on failure otherwise address of the allocated structure.
  209. */
  210. static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
  211. {
  212. struct ext4_new_flex_group_data *flex_gd;
  213. flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
  214. if (flex_gd == NULL)
  215. goto out3;
  216. if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
  217. goto out2;
  218. flex_gd->count = flexbg_size;
  219. flex_gd->groups = kmalloc_array(flexbg_size,
  220. sizeof(struct ext4_new_group_data),
  221. GFP_NOFS);
  222. if (flex_gd->groups == NULL)
  223. goto out2;
  224. flex_gd->bg_flags = kmalloc_array(flexbg_size, sizeof(__u16),
  225. GFP_NOFS);
  226. if (flex_gd->bg_flags == NULL)
  227. goto out1;
  228. return flex_gd;
  229. out1:
  230. kfree(flex_gd->groups);
  231. out2:
  232. kfree(flex_gd);
  233. out3:
  234. return NULL;
  235. }
  236. static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
  237. {
  238. kfree(flex_gd->bg_flags);
  239. kfree(flex_gd->groups);
  240. kfree(flex_gd);
  241. }
  242. /*
  243. * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
  244. * and inode tables for a flex group.
  245. *
  246. * This function is used by 64bit-resize. Note that this function allocates
  247. * group tables from the 1st group of groups contained by @flexgd, which may
  248. * be a partial of a flex group.
  249. *
  250. * @sb: super block of fs to which the groups belongs
  251. *
  252. * Returns 0 on a successful allocation of the metadata blocks in the
  253. * block group.
  254. */
  255. static int ext4_alloc_group_tables(struct super_block *sb,
  256. struct ext4_new_flex_group_data *flex_gd,
  257. int flexbg_size)
  258. {
  259. struct ext4_new_group_data *group_data = flex_gd->groups;
  260. ext4_fsblk_t start_blk;
  261. ext4_fsblk_t last_blk;
  262. ext4_group_t src_group;
  263. ext4_group_t bb_index = 0;
  264. ext4_group_t ib_index = 0;
  265. ext4_group_t it_index = 0;
  266. ext4_group_t group;
  267. ext4_group_t last_group;
  268. unsigned overhead;
  269. __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
  270. int i;
  271. BUG_ON(flex_gd->count == 0 || group_data == NULL);
  272. src_group = group_data[0].group;
  273. last_group = src_group + flex_gd->count - 1;
  274. BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
  275. (last_group & ~(flexbg_size - 1))));
  276. next_group:
  277. group = group_data[0].group;
  278. if (src_group >= group_data[0].group + flex_gd->count)
  279. return -ENOSPC;
  280. start_blk = ext4_group_first_block_no(sb, src_group);
  281. last_blk = start_blk + group_data[src_group - group].blocks_count;
  282. overhead = ext4_group_overhead_blocks(sb, src_group);
  283. start_blk += overhead;
  284. /* We collect contiguous blocks as much as possible. */
  285. src_group++;
  286. for (; src_group <= last_group; src_group++) {
  287. overhead = ext4_group_overhead_blocks(sb, src_group);
  288. if (overhead == 0)
  289. last_blk += group_data[src_group - group].blocks_count;
  290. else
  291. break;
  292. }
  293. /* Allocate block bitmaps */
  294. for (; bb_index < flex_gd->count; bb_index++) {
  295. if (start_blk >= last_blk)
  296. goto next_group;
  297. group_data[bb_index].block_bitmap = start_blk++;
  298. group = ext4_get_group_number(sb, start_blk - 1);
  299. group -= group_data[0].group;
  300. group_data[group].mdata_blocks++;
  301. flex_gd->bg_flags[group] &= uninit_mask;
  302. }
  303. /* Allocate inode bitmaps */
  304. for (; ib_index < flex_gd->count; ib_index++) {
  305. if (start_blk >= last_blk)
  306. goto next_group;
  307. group_data[ib_index].inode_bitmap = start_blk++;
  308. group = ext4_get_group_number(sb, start_blk - 1);
  309. group -= group_data[0].group;
  310. group_data[group].mdata_blocks++;
  311. flex_gd->bg_flags[group] &= uninit_mask;
  312. }
  313. /* Allocate inode tables */
  314. for (; it_index < flex_gd->count; it_index++) {
  315. unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
  316. ext4_fsblk_t next_group_start;
  317. if (start_blk + itb > last_blk)
  318. goto next_group;
  319. group_data[it_index].inode_table = start_blk;
  320. group = ext4_get_group_number(sb, start_blk);
  321. next_group_start = ext4_group_first_block_no(sb, group + 1);
  322. group -= group_data[0].group;
  323. if (start_blk + itb > next_group_start) {
  324. flex_gd->bg_flags[group + 1] &= uninit_mask;
  325. overhead = start_blk + itb - next_group_start;
  326. group_data[group + 1].mdata_blocks += overhead;
  327. itb -= overhead;
  328. }
  329. group_data[group].mdata_blocks += itb;
  330. flex_gd->bg_flags[group] &= uninit_mask;
  331. start_blk += EXT4_SB(sb)->s_itb_per_group;
  332. }
  333. /* Update free clusters count to exclude metadata blocks */
  334. for (i = 0; i < flex_gd->count; i++) {
  335. group_data[i].free_clusters_count -=
  336. EXT4_NUM_B2C(EXT4_SB(sb),
  337. group_data[i].mdata_blocks);
  338. }
  339. if (test_opt(sb, DEBUG)) {
  340. int i;
  341. group = group_data[0].group;
  342. printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
  343. "%d groups, flexbg size is %d:\n", flex_gd->count,
  344. flexbg_size);
  345. for (i = 0; i < flex_gd->count; i++) {
  346. ext4_debug(
  347. "adding %s group %u: %u blocks (%d free, %d mdata blocks)\n",
  348. ext4_bg_has_super(sb, group + i) ? "normal" :
  349. "no-super", group + i,
  350. group_data[i].blocks_count,
  351. group_data[i].free_clusters_count,
  352. group_data[i].mdata_blocks);
  353. }
  354. }
  355. return 0;
  356. }
  357. static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
  358. ext4_fsblk_t blk)
  359. {
  360. struct buffer_head *bh;
  361. int err;
  362. bh = sb_getblk(sb, blk);
  363. if (unlikely(!bh))
  364. return ERR_PTR(-ENOMEM);
  365. BUFFER_TRACE(bh, "get_write_access");
  366. if ((err = ext4_journal_get_write_access(handle, bh))) {
  367. brelse(bh);
  368. bh = ERR_PTR(err);
  369. } else {
  370. memset(bh->b_data, 0, sb->s_blocksize);
  371. set_buffer_uptodate(bh);
  372. }
  373. return bh;
  374. }
  375. static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
  376. {
  377. return ext4_journal_ensure_credits_fn(handle, credits,
  378. EXT4_MAX_TRANS_DATA, 0, 0);
  379. }
  380. /*
  381. * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
  382. *
  383. * Helper function for ext4_setup_new_group_blocks() which set .
  384. *
  385. * @sb: super block
  386. * @handle: journal handle
  387. * @flex_gd: flex group data
  388. */
  389. static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
  390. struct ext4_new_flex_group_data *flex_gd,
  391. ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
  392. {
  393. struct ext4_sb_info *sbi = EXT4_SB(sb);
  394. ext4_group_t count = last_cluster - first_cluster + 1;
  395. ext4_group_t count2;
  396. ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
  397. last_cluster);
  398. for (count2 = count; count > 0;
  399. count -= count2, first_cluster += count2) {
  400. ext4_fsblk_t start;
  401. struct buffer_head *bh;
  402. ext4_group_t group;
  403. int err;
  404. group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
  405. start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
  406. group -= flex_gd->groups[0].group;
  407. count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
  408. if (count2 > count)
  409. count2 = count;
  410. if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
  411. BUG_ON(flex_gd->count > 1);
  412. continue;
  413. }
  414. err = ext4_resize_ensure_credits_batch(handle, 1);
  415. if (err < 0)
  416. return err;
  417. bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
  418. if (unlikely(!bh))
  419. return -ENOMEM;
  420. BUFFER_TRACE(bh, "get_write_access");
  421. err = ext4_journal_get_write_access(handle, bh);
  422. if (err) {
  423. brelse(bh);
  424. return err;
  425. }
  426. ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
  427. first_cluster, first_cluster - start, count2);
  428. ext4_set_bits(bh->b_data, first_cluster - start, count2);
  429. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  430. brelse(bh);
  431. if (unlikely(err))
  432. return err;
  433. }
  434. return 0;
  435. }
  436. /*
  437. * Set up the block and inode bitmaps, and the inode table for the new groups.
  438. * This doesn't need to be part of the main transaction, since we are only
  439. * changing blocks outside the actual filesystem. We still do journaling to
  440. * ensure the recovery is correct in case of a failure just after resize.
  441. * If any part of this fails, we simply abort the resize.
  442. *
  443. * setup_new_flex_group_blocks handles a flex group as follow:
  444. * 1. copy super block and GDT, and initialize group tables if necessary.
  445. * In this step, we only set bits in blocks bitmaps for blocks taken by
  446. * super block and GDT.
  447. * 2. allocate group tables in block bitmaps, that is, set bits in block
  448. * bitmap for blocks taken by group tables.
  449. */
  450. static int setup_new_flex_group_blocks(struct super_block *sb,
  451. struct ext4_new_flex_group_data *flex_gd)
  452. {
  453. int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
  454. ext4_fsblk_t start;
  455. ext4_fsblk_t block;
  456. struct ext4_sb_info *sbi = EXT4_SB(sb);
  457. struct ext4_super_block *es = sbi->s_es;
  458. struct ext4_new_group_data *group_data = flex_gd->groups;
  459. __u16 *bg_flags = flex_gd->bg_flags;
  460. handle_t *handle;
  461. ext4_group_t group, count;
  462. struct buffer_head *bh = NULL;
  463. int reserved_gdb, i, j, err = 0, err2;
  464. int meta_bg;
  465. BUG_ON(!flex_gd->count || !group_data ||
  466. group_data[0].group != sbi->s_groups_count);
  467. reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
  468. meta_bg = ext4_has_feature_meta_bg(sb);
  469. /* This transaction may be extended/restarted along the way */
  470. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
  471. if (IS_ERR(handle))
  472. return PTR_ERR(handle);
  473. group = group_data[0].group;
  474. for (i = 0; i < flex_gd->count; i++, group++) {
  475. unsigned long gdblocks;
  476. ext4_grpblk_t overhead;
  477. gdblocks = ext4_bg_num_gdb(sb, group);
  478. start = ext4_group_first_block_no(sb, group);
  479. if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
  480. goto handle_itb;
  481. if (meta_bg == 1) {
  482. ext4_group_t first_group;
  483. first_group = ext4_meta_bg_first_group(sb, group);
  484. if (first_group != group + 1 &&
  485. first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1)
  486. goto handle_itb;
  487. }
  488. block = start + ext4_bg_has_super(sb, group);
  489. /* Copy all of the GDT blocks into the backup in this group */
  490. for (j = 0; j < gdblocks; j++, block++) {
  491. struct buffer_head *gdb;
  492. ext4_debug("update backup group %#04llx\n", block);
  493. err = ext4_resize_ensure_credits_batch(handle, 1);
  494. if (err < 0)
  495. goto out;
  496. gdb = sb_getblk(sb, block);
  497. if (unlikely(!gdb)) {
  498. err = -ENOMEM;
  499. goto out;
  500. }
  501. BUFFER_TRACE(gdb, "get_write_access");
  502. err = ext4_journal_get_write_access(handle, gdb);
  503. if (err) {
  504. brelse(gdb);
  505. goto out;
  506. }
  507. memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
  508. s_group_desc, j)->b_data, gdb->b_size);
  509. set_buffer_uptodate(gdb);
  510. err = ext4_handle_dirty_metadata(handle, NULL, gdb);
  511. if (unlikely(err)) {
  512. brelse(gdb);
  513. goto out;
  514. }
  515. brelse(gdb);
  516. }
  517. /* Zero out all of the reserved backup group descriptor
  518. * table blocks
  519. */
  520. if (ext4_bg_has_super(sb, group)) {
  521. err = sb_issue_zeroout(sb, gdblocks + start + 1,
  522. reserved_gdb, GFP_NOFS);
  523. if (err)
  524. goto out;
  525. }
  526. handle_itb:
  527. /* Initialize group tables of the grop @group */
  528. if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
  529. goto handle_bb;
  530. /* Zero out all of the inode table blocks */
  531. block = group_data[i].inode_table;
  532. ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
  533. block, sbi->s_itb_per_group);
  534. err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
  535. GFP_NOFS);
  536. if (err)
  537. goto out;
  538. handle_bb:
  539. if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
  540. goto handle_ib;
  541. /* Initialize block bitmap of the @group */
  542. block = group_data[i].block_bitmap;
  543. err = ext4_resize_ensure_credits_batch(handle, 1);
  544. if (err < 0)
  545. goto out;
  546. bh = bclean(handle, sb, block);
  547. if (IS_ERR(bh)) {
  548. err = PTR_ERR(bh);
  549. goto out;
  550. }
  551. overhead = ext4_group_overhead_blocks(sb, group);
  552. if (overhead != 0) {
  553. ext4_debug("mark backup superblock %#04llx (+0)\n",
  554. start);
  555. ext4_set_bits(bh->b_data, 0,
  556. EXT4_NUM_B2C(sbi, overhead));
  557. }
  558. ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
  559. sb->s_blocksize * 8, bh->b_data);
  560. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  561. brelse(bh);
  562. if (err)
  563. goto out;
  564. handle_ib:
  565. if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
  566. continue;
  567. /* Initialize inode bitmap of the @group */
  568. block = group_data[i].inode_bitmap;
  569. err = ext4_resize_ensure_credits_batch(handle, 1);
  570. if (err < 0)
  571. goto out;
  572. /* Mark unused entries in inode bitmap used */
  573. bh = bclean(handle, sb, block);
  574. if (IS_ERR(bh)) {
  575. err = PTR_ERR(bh);
  576. goto out;
  577. }
  578. ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
  579. sb->s_blocksize * 8, bh->b_data);
  580. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  581. brelse(bh);
  582. if (err)
  583. goto out;
  584. }
  585. /* Mark group tables in block bitmap */
  586. for (j = 0; j < GROUP_TABLE_COUNT; j++) {
  587. count = group_table_count[j];
  588. start = (&group_data[0].block_bitmap)[j];
  589. block = start;
  590. for (i = 1; i < flex_gd->count; i++) {
  591. block += group_table_count[j];
  592. if (block == (&group_data[i].block_bitmap)[j]) {
  593. count += group_table_count[j];
  594. continue;
  595. }
  596. err = set_flexbg_block_bitmap(sb, handle,
  597. flex_gd,
  598. EXT4_B2C(sbi, start),
  599. EXT4_B2C(sbi,
  600. start + count
  601. - 1));
  602. if (err)
  603. goto out;
  604. count = group_table_count[j];
  605. start = (&group_data[i].block_bitmap)[j];
  606. block = start;
  607. }
  608. if (count) {
  609. err = set_flexbg_block_bitmap(sb, handle,
  610. flex_gd,
  611. EXT4_B2C(sbi, start),
  612. EXT4_B2C(sbi,
  613. start + count
  614. - 1));
  615. if (err)
  616. goto out;
  617. }
  618. }
  619. out:
  620. err2 = ext4_journal_stop(handle);
  621. if (err2 && !err)
  622. err = err2;
  623. return err;
  624. }
  625. /*
  626. * Iterate through the groups which hold BACKUP superblock/GDT copies in an
  627. * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
  628. * calling this for the first time. In a sparse filesystem it will be the
  629. * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
  630. * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
  631. */
  632. static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
  633. unsigned *five, unsigned *seven)
  634. {
  635. unsigned *min = three;
  636. int mult = 3;
  637. unsigned ret;
  638. if (!ext4_has_feature_sparse_super(sb)) {
  639. ret = *min;
  640. *min += 1;
  641. return ret;
  642. }
  643. if (*five < *min) {
  644. min = five;
  645. mult = 5;
  646. }
  647. if (*seven < *min) {
  648. min = seven;
  649. mult = 7;
  650. }
  651. ret = *min;
  652. *min *= mult;
  653. return ret;
  654. }
  655. /*
  656. * Check that all of the backup GDT blocks are held in the primary GDT block.
  657. * It is assumed that they are stored in group order. Returns the number of
  658. * groups in current filesystem that have BACKUPS, or -ve error code.
  659. */
  660. static int verify_reserved_gdb(struct super_block *sb,
  661. ext4_group_t end,
  662. struct buffer_head *primary)
  663. {
  664. const ext4_fsblk_t blk = primary->b_blocknr;
  665. unsigned three = 1;
  666. unsigned five = 5;
  667. unsigned seven = 7;
  668. unsigned grp;
  669. __le32 *p = (__le32 *)primary->b_data;
  670. int gdbackups = 0;
  671. while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
  672. if (le32_to_cpu(*p++) !=
  673. grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
  674. ext4_warning(sb, "reserved GDT %llu"
  675. " missing grp %d (%llu)",
  676. blk, grp,
  677. grp *
  678. (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
  679. blk);
  680. return -EINVAL;
  681. }
  682. if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
  683. return -EFBIG;
  684. }
  685. return gdbackups;
  686. }
  687. /*
  688. * Called when we need to bring a reserved group descriptor table block into
  689. * use from the resize inode. The primary copy of the new GDT block currently
  690. * is an indirect block (under the double indirect block in the resize inode).
  691. * The new backup GDT blocks will be stored as leaf blocks in this indirect
  692. * block, in group order. Even though we know all the block numbers we need,
  693. * we check to ensure that the resize inode has actually reserved these blocks.
  694. *
  695. * Don't need to update the block bitmaps because the blocks are still in use.
  696. *
  697. * We get all of the error cases out of the way, so that we are sure to not
  698. * fail once we start modifying the data on disk, because JBD has no rollback.
  699. */
  700. static int add_new_gdb(handle_t *handle, struct inode *inode,
  701. ext4_group_t group)
  702. {
  703. struct super_block *sb = inode->i_sb;
  704. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  705. unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  706. ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
  707. struct buffer_head **o_group_desc, **n_group_desc = NULL;
  708. struct buffer_head *dind = NULL;
  709. struct buffer_head *gdb_bh = NULL;
  710. int gdbackups;
  711. struct ext4_iloc iloc = { .bh = NULL };
  712. __le32 *data;
  713. int err;
  714. if (test_opt(sb, DEBUG))
  715. printk(KERN_DEBUG
  716. "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
  717. gdb_num);
  718. gdb_bh = ext4_sb_bread(sb, gdblock, 0);
  719. if (IS_ERR(gdb_bh))
  720. return PTR_ERR(gdb_bh);
  721. gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
  722. if (gdbackups < 0) {
  723. err = gdbackups;
  724. goto errout;
  725. }
  726. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  727. dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
  728. if (IS_ERR(dind)) {
  729. err = PTR_ERR(dind);
  730. dind = NULL;
  731. goto errout;
  732. }
  733. data = (__le32 *)dind->b_data;
  734. if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
  735. ext4_warning(sb, "new group %u GDT block %llu not reserved",
  736. group, gdblock);
  737. err = -EINVAL;
  738. goto errout;
  739. }
  740. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  741. err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
  742. if (unlikely(err))
  743. goto errout;
  744. BUFFER_TRACE(gdb_bh, "get_write_access");
  745. err = ext4_journal_get_write_access(handle, gdb_bh);
  746. if (unlikely(err))
  747. goto errout;
  748. BUFFER_TRACE(dind, "get_write_access");
  749. err = ext4_journal_get_write_access(handle, dind);
  750. if (unlikely(err)) {
  751. ext4_std_error(sb, err);
  752. goto errout;
  753. }
  754. /* ext4_reserve_inode_write() gets a reference on the iloc */
  755. err = ext4_reserve_inode_write(handle, inode, &iloc);
  756. if (unlikely(err))
  757. goto errout;
  758. n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
  759. GFP_KERNEL);
  760. if (!n_group_desc) {
  761. err = -ENOMEM;
  762. ext4_warning(sb, "not enough memory for %lu groups",
  763. gdb_num + 1);
  764. goto errout;
  765. }
  766. /*
  767. * Finally, we have all of the possible failures behind us...
  768. *
  769. * Remove new GDT block from inode double-indirect block and clear out
  770. * the new GDT block for use (which also "frees" the backup GDT blocks
  771. * from the reserved inode). We don't need to change the bitmaps for
  772. * these blocks, because they are marked as in-use from being in the
  773. * reserved inode, and will become GDT blocks (primary and backup).
  774. */
  775. data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
  776. err = ext4_handle_dirty_metadata(handle, NULL, dind);
  777. if (unlikely(err)) {
  778. ext4_std_error(sb, err);
  779. goto errout;
  780. }
  781. inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
  782. (9 - EXT4_SB(sb)->s_cluster_bits);
  783. ext4_mark_iloc_dirty(handle, inode, &iloc);
  784. memset(gdb_bh->b_data, 0, sb->s_blocksize);
  785. err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
  786. if (unlikely(err)) {
  787. ext4_std_error(sb, err);
  788. iloc.bh = NULL;
  789. goto errout;
  790. }
  791. brelse(dind);
  792. rcu_read_lock();
  793. o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
  794. memcpy(n_group_desc, o_group_desc,
  795. EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  796. rcu_read_unlock();
  797. n_group_desc[gdb_num] = gdb_bh;
  798. rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
  799. EXT4_SB(sb)->s_gdb_count++;
  800. ext4_kvfree_array_rcu(o_group_desc);
  801. le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
  802. err = ext4_handle_dirty_super(handle, sb);
  803. if (err)
  804. ext4_std_error(sb, err);
  805. return err;
  806. errout:
  807. kvfree(n_group_desc);
  808. brelse(iloc.bh);
  809. brelse(dind);
  810. brelse(gdb_bh);
  811. ext4_debug("leaving with error %d\n", err);
  812. return err;
  813. }
  814. /*
  815. * add_new_gdb_meta_bg is the sister of add_new_gdb.
  816. */
  817. static int add_new_gdb_meta_bg(struct super_block *sb,
  818. handle_t *handle, ext4_group_t group) {
  819. ext4_fsblk_t gdblock;
  820. struct buffer_head *gdb_bh;
  821. struct buffer_head **o_group_desc, **n_group_desc;
  822. unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  823. int err;
  824. gdblock = ext4_meta_bg_first_block_no(sb, group) +
  825. ext4_bg_has_super(sb, group);
  826. gdb_bh = ext4_sb_bread(sb, gdblock, 0);
  827. if (IS_ERR(gdb_bh))
  828. return PTR_ERR(gdb_bh);
  829. n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
  830. GFP_KERNEL);
  831. if (!n_group_desc) {
  832. brelse(gdb_bh);
  833. err = -ENOMEM;
  834. ext4_warning(sb, "not enough memory for %lu groups",
  835. gdb_num + 1);
  836. return err;
  837. }
  838. rcu_read_lock();
  839. o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
  840. memcpy(n_group_desc, o_group_desc,
  841. EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
  842. rcu_read_unlock();
  843. n_group_desc[gdb_num] = gdb_bh;
  844. BUFFER_TRACE(gdb_bh, "get_write_access");
  845. err = ext4_journal_get_write_access(handle, gdb_bh);
  846. if (err) {
  847. kvfree(n_group_desc);
  848. brelse(gdb_bh);
  849. return err;
  850. }
  851. rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
  852. EXT4_SB(sb)->s_gdb_count++;
  853. ext4_kvfree_array_rcu(o_group_desc);
  854. return err;
  855. }
  856. /*
  857. * Called when we are adding a new group which has a backup copy of each of
  858. * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
  859. * We need to add these reserved backup GDT blocks to the resize inode, so
  860. * that they are kept for future resizing and not allocated to files.
  861. *
  862. * Each reserved backup GDT block will go into a different indirect block.
  863. * The indirect blocks are actually the primary reserved GDT blocks,
  864. * so we know in advance what their block numbers are. We only get the
  865. * double-indirect block to verify it is pointing to the primary reserved
  866. * GDT blocks so we don't overwrite a data block by accident. The reserved
  867. * backup GDT blocks are stored in their reserved primary GDT block.
  868. */
  869. static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
  870. ext4_group_t group)
  871. {
  872. struct super_block *sb = inode->i_sb;
  873. int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
  874. int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
  875. struct buffer_head **primary;
  876. struct buffer_head *dind;
  877. struct ext4_iloc iloc;
  878. ext4_fsblk_t blk;
  879. __le32 *data, *end;
  880. int gdbackups = 0;
  881. int res, i;
  882. int err;
  883. primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
  884. if (!primary)
  885. return -ENOMEM;
  886. data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
  887. dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
  888. if (IS_ERR(dind)) {
  889. err = PTR_ERR(dind);
  890. dind = NULL;
  891. goto exit_free;
  892. }
  893. blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
  894. data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
  895. EXT4_ADDR_PER_BLOCK(sb));
  896. end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
  897. /* Get each reserved primary GDT block and verify it holds backups */
  898. for (res = 0; res < reserved_gdb; res++, blk++) {
  899. if (le32_to_cpu(*data) != blk) {
  900. ext4_warning(sb, "reserved block %llu"
  901. " not at offset %ld",
  902. blk,
  903. (long)(data - (__le32 *)dind->b_data));
  904. err = -EINVAL;
  905. goto exit_bh;
  906. }
  907. primary[res] = ext4_sb_bread(sb, blk, 0);
  908. if (IS_ERR(primary[res])) {
  909. err = PTR_ERR(primary[res]);
  910. primary[res] = NULL;
  911. goto exit_bh;
  912. }
  913. gdbackups = verify_reserved_gdb(sb, group, primary[res]);
  914. if (gdbackups < 0) {
  915. brelse(primary[res]);
  916. err = gdbackups;
  917. goto exit_bh;
  918. }
  919. if (++data >= end)
  920. data = (__le32 *)dind->b_data;
  921. }
  922. for (i = 0; i < reserved_gdb; i++) {
  923. BUFFER_TRACE(primary[i], "get_write_access");
  924. if ((err = ext4_journal_get_write_access(handle, primary[i])))
  925. goto exit_bh;
  926. }
  927. if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
  928. goto exit_bh;
  929. /*
  930. * Finally we can add each of the reserved backup GDT blocks from
  931. * the new group to its reserved primary GDT block.
  932. */
  933. blk = group * EXT4_BLOCKS_PER_GROUP(sb);
  934. for (i = 0; i < reserved_gdb; i++) {
  935. int err2;
  936. data = (__le32 *)primary[i]->b_data;
  937. /* printk("reserving backup %lu[%u] = %lu\n",
  938. primary[i]->b_blocknr, gdbackups,
  939. blk + primary[i]->b_blocknr); */
  940. data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
  941. err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
  942. if (!err)
  943. err = err2;
  944. }
  945. inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
  946. ext4_mark_iloc_dirty(handle, inode, &iloc);
  947. exit_bh:
  948. while (--res >= 0)
  949. brelse(primary[res]);
  950. brelse(dind);
  951. exit_free:
  952. kfree(primary);
  953. return err;
  954. }
  955. /*
  956. * Update the backup copies of the ext4 metadata. These don't need to be part
  957. * of the main resize transaction, because e2fsck will re-write them if there
  958. * is a problem (basically only OOM will cause a problem). However, we
  959. * _should_ update the backups if possible, in case the primary gets trashed
  960. * for some reason and we need to run e2fsck from a backup superblock. The
  961. * important part is that the new block and inode counts are in the backup
  962. * superblocks, and the location of the new group metadata in the GDT backups.
  963. *
  964. * We do not need take the s_resize_lock for this, because these
  965. * blocks are not otherwise touched by the filesystem code when it is
  966. * mounted. We don't need to worry about last changing from
  967. * sbi->s_groups_count, because the worst that can happen is that we
  968. * do not copy the full number of backups at this time. The resize
  969. * which changed s_groups_count will backup again.
  970. */
  971. static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
  972. int size, int meta_bg)
  973. {
  974. struct ext4_sb_info *sbi = EXT4_SB(sb);
  975. ext4_group_t last;
  976. const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
  977. unsigned three = 1;
  978. unsigned five = 5;
  979. unsigned seven = 7;
  980. ext4_group_t group = 0;
  981. int rest = sb->s_blocksize - size;
  982. handle_t *handle;
  983. int err = 0, err2;
  984. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
  985. if (IS_ERR(handle)) {
  986. group = 1;
  987. err = PTR_ERR(handle);
  988. goto exit_err;
  989. }
  990. if (meta_bg == 0) {
  991. group = ext4_list_backups(sb, &three, &five, &seven);
  992. last = sbi->s_groups_count;
  993. } else {
  994. group = ext4_get_group_number(sb, blk_off) + 1;
  995. last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
  996. }
  997. while (group < sbi->s_groups_count) {
  998. struct buffer_head *bh;
  999. ext4_fsblk_t backup_block;
  1000. /* Out of journal space, and can't get more - abort - so sad */
  1001. err = ext4_resize_ensure_credits_batch(handle, 1);
  1002. if (err < 0)
  1003. break;
  1004. if (meta_bg == 0)
  1005. backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
  1006. else
  1007. backup_block = (ext4_group_first_block_no(sb, group) +
  1008. ext4_bg_has_super(sb, group));
  1009. bh = sb_getblk(sb, backup_block);
  1010. if (unlikely(!bh)) {
  1011. err = -ENOMEM;
  1012. break;
  1013. }
  1014. ext4_debug("update metadata backup %llu(+%llu)\n",
  1015. backup_block, backup_block -
  1016. ext4_group_first_block_no(sb, group));
  1017. BUFFER_TRACE(bh, "get_write_access");
  1018. if ((err = ext4_journal_get_write_access(handle, bh))) {
  1019. brelse(bh);
  1020. break;
  1021. }
  1022. lock_buffer(bh);
  1023. memcpy(bh->b_data, data, size);
  1024. if (rest)
  1025. memset(bh->b_data + size, 0, rest);
  1026. set_buffer_uptodate(bh);
  1027. unlock_buffer(bh);
  1028. err = ext4_handle_dirty_metadata(handle, NULL, bh);
  1029. if (unlikely(err))
  1030. ext4_std_error(sb, err);
  1031. brelse(bh);
  1032. if (meta_bg == 0)
  1033. group = ext4_list_backups(sb, &three, &five, &seven);
  1034. else if (group == last)
  1035. break;
  1036. else
  1037. group = last;
  1038. }
  1039. if ((err2 = ext4_journal_stop(handle)) && !err)
  1040. err = err2;
  1041. /*
  1042. * Ugh! Need to have e2fsck write the backup copies. It is too
  1043. * late to revert the resize, we shouldn't fail just because of
  1044. * the backup copies (they are only needed in case of corruption).
  1045. *
  1046. * However, if we got here we have a journal problem too, so we
  1047. * can't really start a transaction to mark the superblock.
  1048. * Chicken out and just set the flag on the hope it will be written
  1049. * to disk, and if not - we will simply wait until next fsck.
  1050. */
  1051. exit_err:
  1052. if (err) {
  1053. ext4_warning(sb, "can't update backup for group %u (err %d), "
  1054. "forcing fsck on next reboot", group, err);
  1055. sbi->s_mount_state &= ~EXT4_VALID_FS;
  1056. sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
  1057. mark_buffer_dirty(sbi->s_sbh);
  1058. }
  1059. }
  1060. /*
  1061. * ext4_add_new_descs() adds @count group descriptor of groups
  1062. * starting at @group
  1063. *
  1064. * @handle: journal handle
  1065. * @sb: super block
  1066. * @group: the group no. of the first group desc to be added
  1067. * @resize_inode: the resize inode
  1068. * @count: number of group descriptors to be added
  1069. */
  1070. static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
  1071. ext4_group_t group, struct inode *resize_inode,
  1072. ext4_group_t count)
  1073. {
  1074. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1075. struct ext4_super_block *es = sbi->s_es;
  1076. struct buffer_head *gdb_bh;
  1077. int i, gdb_off, gdb_num, err = 0;
  1078. int meta_bg;
  1079. meta_bg = ext4_has_feature_meta_bg(sb);
  1080. for (i = 0; i < count; i++, group++) {
  1081. int reserved_gdb = ext4_bg_has_super(sb, group) ?
  1082. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  1083. gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
  1084. gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1085. /*
  1086. * We will only either add reserved group blocks to a backup group
  1087. * or remove reserved blocks for the first group in a new group block.
  1088. * Doing both would be mean more complex code, and sane people don't
  1089. * use non-sparse filesystems anymore. This is already checked above.
  1090. */
  1091. if (gdb_off) {
  1092. gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
  1093. gdb_num);
  1094. BUFFER_TRACE(gdb_bh, "get_write_access");
  1095. err = ext4_journal_get_write_access(handle, gdb_bh);
  1096. if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
  1097. err = reserve_backup_gdb(handle, resize_inode, group);
  1098. } else if (meta_bg != 0) {
  1099. err = add_new_gdb_meta_bg(sb, handle, group);
  1100. } else {
  1101. err = add_new_gdb(handle, resize_inode, group);
  1102. }
  1103. if (err)
  1104. break;
  1105. }
  1106. return err;
  1107. }
  1108. static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
  1109. {
  1110. struct buffer_head *bh = sb_getblk(sb, block);
  1111. if (unlikely(!bh))
  1112. return NULL;
  1113. if (!bh_uptodate_or_lock(bh)) {
  1114. if (ext4_read_bh(bh, 0, NULL) < 0) {
  1115. brelse(bh);
  1116. return NULL;
  1117. }
  1118. }
  1119. return bh;
  1120. }
  1121. static int ext4_set_bitmap_checksums(struct super_block *sb,
  1122. ext4_group_t group,
  1123. struct ext4_group_desc *gdp,
  1124. struct ext4_new_group_data *group_data)
  1125. {
  1126. struct buffer_head *bh;
  1127. if (!ext4_has_metadata_csum(sb))
  1128. return 0;
  1129. bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
  1130. if (!bh)
  1131. return -EIO;
  1132. ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
  1133. EXT4_INODES_PER_GROUP(sb) / 8);
  1134. brelse(bh);
  1135. bh = ext4_get_bitmap(sb, group_data->block_bitmap);
  1136. if (!bh)
  1137. return -EIO;
  1138. ext4_block_bitmap_csum_set(sb, group, gdp, bh);
  1139. brelse(bh);
  1140. return 0;
  1141. }
  1142. /*
  1143. * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
  1144. */
  1145. static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
  1146. struct ext4_new_flex_group_data *flex_gd)
  1147. {
  1148. struct ext4_new_group_data *group_data = flex_gd->groups;
  1149. struct ext4_group_desc *gdp;
  1150. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1151. struct buffer_head *gdb_bh;
  1152. ext4_group_t group;
  1153. __u16 *bg_flags = flex_gd->bg_flags;
  1154. int i, gdb_off, gdb_num, err = 0;
  1155. for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
  1156. group = group_data->group;
  1157. gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
  1158. gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1159. /*
  1160. * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
  1161. */
  1162. gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
  1163. /* Update group descriptor block for new group */
  1164. gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
  1165. gdb_off * EXT4_DESC_SIZE(sb));
  1166. memset(gdp, 0, EXT4_DESC_SIZE(sb));
  1167. ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
  1168. ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
  1169. err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
  1170. if (err) {
  1171. ext4_std_error(sb, err);
  1172. break;
  1173. }
  1174. ext4_inode_table_set(sb, gdp, group_data->inode_table);
  1175. ext4_free_group_clusters_set(sb, gdp,
  1176. group_data->free_clusters_count);
  1177. ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
  1178. if (ext4_has_group_desc_csum(sb))
  1179. ext4_itable_unused_set(sb, gdp,
  1180. EXT4_INODES_PER_GROUP(sb));
  1181. gdp->bg_flags = cpu_to_le16(*bg_flags);
  1182. ext4_group_desc_csum_set(sb, group, gdp);
  1183. err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
  1184. if (unlikely(err)) {
  1185. ext4_std_error(sb, err);
  1186. break;
  1187. }
  1188. /*
  1189. * We can allocate memory for mb_alloc based on the new group
  1190. * descriptor
  1191. */
  1192. err = ext4_mb_add_groupinfo(sb, group, gdp);
  1193. if (err)
  1194. break;
  1195. }
  1196. return err;
  1197. }
  1198. /*
  1199. * ext4_update_super() updates the super block so that the newly added
  1200. * groups can be seen by the filesystem.
  1201. *
  1202. * @sb: super block
  1203. * @flex_gd: new added groups
  1204. */
  1205. static void ext4_update_super(struct super_block *sb,
  1206. struct ext4_new_flex_group_data *flex_gd)
  1207. {
  1208. ext4_fsblk_t blocks_count = 0;
  1209. ext4_fsblk_t free_blocks = 0;
  1210. ext4_fsblk_t reserved_blocks = 0;
  1211. struct ext4_new_group_data *group_data = flex_gd->groups;
  1212. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1213. struct ext4_super_block *es = sbi->s_es;
  1214. int i;
  1215. BUG_ON(flex_gd->count == 0 || group_data == NULL);
  1216. /*
  1217. * Make the new blocks and inodes valid next. We do this before
  1218. * increasing the group count so that once the group is enabled,
  1219. * all of its blocks and inodes are already valid.
  1220. *
  1221. * We always allocate group-by-group, then block-by-block or
  1222. * inode-by-inode within a group, so enabling these
  1223. * blocks/inodes before the group is live won't actually let us
  1224. * allocate the new space yet.
  1225. */
  1226. for (i = 0; i < flex_gd->count; i++) {
  1227. blocks_count += group_data[i].blocks_count;
  1228. free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
  1229. }
  1230. reserved_blocks = ext4_r_blocks_count(es) * 100;
  1231. reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
  1232. reserved_blocks *= blocks_count;
  1233. do_div(reserved_blocks, 100);
  1234. ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
  1235. ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
  1236. le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
  1237. flex_gd->count);
  1238. le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
  1239. flex_gd->count);
  1240. ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
  1241. /*
  1242. * We need to protect s_groups_count against other CPUs seeing
  1243. * inconsistent state in the superblock.
  1244. *
  1245. * The precise rules we use are:
  1246. *
  1247. * * Writers must perform a smp_wmb() after updating all
  1248. * dependent data and before modifying the groups count
  1249. *
  1250. * * Readers must perform an smp_rmb() after reading the groups
  1251. * count and before reading any dependent data.
  1252. *
  1253. * NB. These rules can be relaxed when checking the group count
  1254. * while freeing data, as we can only allocate from a block
  1255. * group after serialising against the group count, and we can
  1256. * only then free after serialising in turn against that
  1257. * allocation.
  1258. */
  1259. smp_wmb();
  1260. /* Update the global fs size fields */
  1261. sbi->s_groups_count += flex_gd->count;
  1262. sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
  1263. (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
  1264. /* Update the reserved block counts only once the new group is
  1265. * active. */
  1266. ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
  1267. reserved_blocks);
  1268. /* Update the free space counts */
  1269. percpu_counter_add(&sbi->s_freeclusters_counter,
  1270. EXT4_NUM_B2C(sbi, free_blocks));
  1271. percpu_counter_add(&sbi->s_freeinodes_counter,
  1272. EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
  1273. ext4_debug("free blocks count %llu",
  1274. percpu_counter_read(&sbi->s_freeclusters_counter));
  1275. if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
  1276. ext4_group_t flex_group;
  1277. struct flex_groups *fg;
  1278. flex_group = ext4_flex_group(sbi, group_data[0].group);
  1279. fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
  1280. atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
  1281. &fg->free_clusters);
  1282. atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
  1283. &fg->free_inodes);
  1284. }
  1285. /*
  1286. * Update the fs overhead information
  1287. */
  1288. ext4_calculate_overhead(sb);
  1289. es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
  1290. if (test_opt(sb, DEBUG))
  1291. printk(KERN_DEBUG "EXT4-fs: added group %u:"
  1292. "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
  1293. blocks_count, free_blocks, reserved_blocks);
  1294. }
  1295. /* Add a flex group to an fs. Ensure we handle all possible error conditions
  1296. * _before_ we start modifying the filesystem, because we cannot abort the
  1297. * transaction and not have it write the data to disk.
  1298. */
  1299. static int ext4_flex_group_add(struct super_block *sb,
  1300. struct inode *resize_inode,
  1301. struct ext4_new_flex_group_data *flex_gd)
  1302. {
  1303. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1304. struct ext4_super_block *es = sbi->s_es;
  1305. ext4_fsblk_t o_blocks_count;
  1306. ext4_grpblk_t last;
  1307. ext4_group_t group;
  1308. handle_t *handle;
  1309. unsigned reserved_gdb;
  1310. int err = 0, err2 = 0, credit;
  1311. BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
  1312. reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
  1313. o_blocks_count = ext4_blocks_count(es);
  1314. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1315. BUG_ON(last);
  1316. err = setup_new_flex_group_blocks(sb, flex_gd);
  1317. if (err)
  1318. goto exit;
  1319. /*
  1320. * We will always be modifying at least the superblock and GDT
  1321. * blocks. If we are adding a group past the last current GDT block,
  1322. * we will also modify the inode and the dindirect block. If we
  1323. * are adding a group with superblock/GDT backups we will also
  1324. * modify each of the reserved GDT dindirect blocks.
  1325. */
  1326. credit = 3; /* sb, resize inode, resize inode dindirect */
  1327. /* GDT blocks */
  1328. credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
  1329. credit += reserved_gdb; /* Reserved GDT dindirect blocks */
  1330. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
  1331. if (IS_ERR(handle)) {
  1332. err = PTR_ERR(handle);
  1333. goto exit;
  1334. }
  1335. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  1336. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  1337. if (err)
  1338. goto exit_journal;
  1339. group = flex_gd->groups[0].group;
  1340. BUG_ON(group != sbi->s_groups_count);
  1341. err = ext4_add_new_descs(handle, sb, group,
  1342. resize_inode, flex_gd->count);
  1343. if (err)
  1344. goto exit_journal;
  1345. err = ext4_setup_new_descs(handle, sb, flex_gd);
  1346. if (err)
  1347. goto exit_journal;
  1348. ext4_update_super(sb, flex_gd);
  1349. err = ext4_handle_dirty_super(handle, sb);
  1350. exit_journal:
  1351. err2 = ext4_journal_stop(handle);
  1352. if (!err)
  1353. err = err2;
  1354. if (!err) {
  1355. int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
  1356. int gdb_num_end = ((group + flex_gd->count - 1) /
  1357. EXT4_DESC_PER_BLOCK(sb));
  1358. int meta_bg = ext4_has_feature_meta_bg(sb);
  1359. sector_t old_gdb = 0;
  1360. update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
  1361. sizeof(struct ext4_super_block), 0);
  1362. for (; gdb_num <= gdb_num_end; gdb_num++) {
  1363. struct buffer_head *gdb_bh;
  1364. gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
  1365. gdb_num);
  1366. if (old_gdb == gdb_bh->b_blocknr)
  1367. continue;
  1368. update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
  1369. gdb_bh->b_size, meta_bg);
  1370. old_gdb = gdb_bh->b_blocknr;
  1371. }
  1372. }
  1373. exit:
  1374. return err;
  1375. }
  1376. static int ext4_setup_next_flex_gd(struct super_block *sb,
  1377. struct ext4_new_flex_group_data *flex_gd,
  1378. ext4_fsblk_t n_blocks_count,
  1379. unsigned long flexbg_size)
  1380. {
  1381. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1382. struct ext4_super_block *es = sbi->s_es;
  1383. struct ext4_new_group_data *group_data = flex_gd->groups;
  1384. ext4_fsblk_t o_blocks_count;
  1385. ext4_group_t n_group;
  1386. ext4_group_t group;
  1387. ext4_group_t last_group;
  1388. ext4_grpblk_t last;
  1389. ext4_grpblk_t clusters_per_group;
  1390. unsigned long i;
  1391. clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
  1392. o_blocks_count = ext4_blocks_count(es);
  1393. if (o_blocks_count == n_blocks_count)
  1394. return 0;
  1395. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1396. BUG_ON(last);
  1397. ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
  1398. last_group = group | (flexbg_size - 1);
  1399. if (last_group > n_group)
  1400. last_group = n_group;
  1401. flex_gd->count = last_group - group + 1;
  1402. for (i = 0; i < flex_gd->count; i++) {
  1403. int overhead;
  1404. group_data[i].group = group + i;
  1405. group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
  1406. overhead = ext4_group_overhead_blocks(sb, group + i);
  1407. group_data[i].mdata_blocks = overhead;
  1408. group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
  1409. if (ext4_has_group_desc_csum(sb)) {
  1410. flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
  1411. EXT4_BG_INODE_UNINIT;
  1412. if (!test_opt(sb, INIT_INODE_TABLE))
  1413. flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
  1414. } else
  1415. flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
  1416. }
  1417. if (last_group == n_group && ext4_has_group_desc_csum(sb))
  1418. /* We need to initialize block bitmap of last group. */
  1419. flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
  1420. if ((last_group == n_group) && (last != clusters_per_group - 1)) {
  1421. group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
  1422. group_data[i - 1].free_clusters_count -= clusters_per_group -
  1423. last - 1;
  1424. }
  1425. return 1;
  1426. }
  1427. /* Add group descriptor data to an existing or new group descriptor block.
  1428. * Ensure we handle all possible error conditions _before_ we start modifying
  1429. * the filesystem, because we cannot abort the transaction and not have it
  1430. * write the data to disk.
  1431. *
  1432. * If we are on a GDT block boundary, we need to get the reserved GDT block.
  1433. * Otherwise, we may need to add backup GDT blocks for a sparse group.
  1434. *
  1435. * We only need to hold the superblock lock while we are actually adding
  1436. * in the new group's counts to the superblock. Prior to that we have
  1437. * not really "added" the group at all. We re-check that we are still
  1438. * adding in the last group in case things have changed since verifying.
  1439. */
  1440. int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
  1441. {
  1442. struct ext4_new_flex_group_data flex_gd;
  1443. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1444. struct ext4_super_block *es = sbi->s_es;
  1445. int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
  1446. le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
  1447. struct inode *inode = NULL;
  1448. int gdb_off;
  1449. int err;
  1450. __u16 bg_flags = 0;
  1451. gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
  1452. if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
  1453. ext4_warning(sb, "Can't resize non-sparse filesystem further");
  1454. return -EPERM;
  1455. }
  1456. if (ext4_blocks_count(es) + input->blocks_count <
  1457. ext4_blocks_count(es)) {
  1458. ext4_warning(sb, "blocks_count overflow");
  1459. return -EINVAL;
  1460. }
  1461. if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
  1462. le32_to_cpu(es->s_inodes_count)) {
  1463. ext4_warning(sb, "inodes_count overflow");
  1464. return -EINVAL;
  1465. }
  1466. if (reserved_gdb || gdb_off == 0) {
  1467. if (!ext4_has_feature_resize_inode(sb) ||
  1468. !le16_to_cpu(es->s_reserved_gdt_blocks)) {
  1469. ext4_warning(sb,
  1470. "No reserved GDT blocks, can't resize");
  1471. return -EPERM;
  1472. }
  1473. inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
  1474. if (IS_ERR(inode)) {
  1475. ext4_warning(sb, "Error opening resize inode");
  1476. return PTR_ERR(inode);
  1477. }
  1478. }
  1479. err = verify_group_input(sb, input);
  1480. if (err)
  1481. goto out;
  1482. err = ext4_alloc_flex_bg_array(sb, input->group + 1);
  1483. if (err)
  1484. goto out;
  1485. err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
  1486. if (err)
  1487. goto out;
  1488. flex_gd.count = 1;
  1489. flex_gd.groups = input;
  1490. flex_gd.bg_flags = &bg_flags;
  1491. err = ext4_flex_group_add(sb, inode, &flex_gd);
  1492. out:
  1493. iput(inode);
  1494. return err;
  1495. } /* ext4_group_add */
  1496. /*
  1497. * extend a group without checking assuming that checking has been done.
  1498. */
  1499. static int ext4_group_extend_no_check(struct super_block *sb,
  1500. ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
  1501. {
  1502. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  1503. handle_t *handle;
  1504. int err = 0, err2;
  1505. /* We will update the superblock, one block bitmap, and
  1506. * one group descriptor via ext4_group_add_blocks().
  1507. */
  1508. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
  1509. if (IS_ERR(handle)) {
  1510. err = PTR_ERR(handle);
  1511. ext4_warning(sb, "error %d on journal start", err);
  1512. return err;
  1513. }
  1514. BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
  1515. err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
  1516. if (err) {
  1517. ext4_warning(sb, "error %d on journal write access", err);
  1518. goto errout;
  1519. }
  1520. ext4_blocks_count_set(es, o_blocks_count + add);
  1521. ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
  1522. ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
  1523. o_blocks_count + add);
  1524. /* We add the blocks to the bitmap and set the group need init bit */
  1525. err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
  1526. if (err)
  1527. goto errout;
  1528. ext4_handle_dirty_super(handle, sb);
  1529. ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
  1530. o_blocks_count + add);
  1531. errout:
  1532. err2 = ext4_journal_stop(handle);
  1533. if (err2 && !err)
  1534. err = err2;
  1535. if (!err) {
  1536. if (test_opt(sb, DEBUG))
  1537. printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
  1538. "blocks\n", ext4_blocks_count(es));
  1539. update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr,
  1540. (char *)es, sizeof(struct ext4_super_block), 0);
  1541. }
  1542. return err;
  1543. }
  1544. /*
  1545. * Extend the filesystem to the new number of blocks specified. This entry
  1546. * point is only used to extend the current filesystem to the end of the last
  1547. * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
  1548. * for emergencies (because it has no dependencies on reserved blocks).
  1549. *
  1550. * If we _really_ wanted, we could use default values to call ext4_group_add()
  1551. * allow the "remount" trick to work for arbitrary resizing, assuming enough
  1552. * GDT blocks are reserved to grow to the desired size.
  1553. */
  1554. int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
  1555. ext4_fsblk_t n_blocks_count)
  1556. {
  1557. ext4_fsblk_t o_blocks_count;
  1558. ext4_grpblk_t last;
  1559. ext4_grpblk_t add;
  1560. struct buffer_head *bh;
  1561. int err;
  1562. ext4_group_t group;
  1563. o_blocks_count = ext4_blocks_count(es);
  1564. if (test_opt(sb, DEBUG))
  1565. ext4_msg(sb, KERN_DEBUG,
  1566. "extending last group from %llu to %llu blocks",
  1567. o_blocks_count, n_blocks_count);
  1568. if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
  1569. return 0;
  1570. if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
  1571. ext4_msg(sb, KERN_ERR,
  1572. "filesystem too large to resize to %llu blocks safely",
  1573. n_blocks_count);
  1574. return -EINVAL;
  1575. }
  1576. if (n_blocks_count < o_blocks_count) {
  1577. ext4_warning(sb, "can't shrink FS - resize aborted");
  1578. return -EINVAL;
  1579. }
  1580. /* Handle the remaining blocks in the last group only. */
  1581. ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
  1582. if (last == 0) {
  1583. ext4_warning(sb, "need to use ext2online to resize further");
  1584. return -EPERM;
  1585. }
  1586. add = EXT4_BLOCKS_PER_GROUP(sb) - last;
  1587. if (o_blocks_count + add < o_blocks_count) {
  1588. ext4_warning(sb, "blocks_count overflow");
  1589. return -EINVAL;
  1590. }
  1591. if (o_blocks_count + add > n_blocks_count)
  1592. add = n_blocks_count - o_blocks_count;
  1593. if (o_blocks_count + add < n_blocks_count)
  1594. ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
  1595. o_blocks_count + add, add);
  1596. /* See if the device is actually as big as what was requested */
  1597. bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
  1598. if (IS_ERR(bh)) {
  1599. ext4_warning(sb, "can't read last block, resize aborted");
  1600. return -ENOSPC;
  1601. }
  1602. brelse(bh);
  1603. err = ext4_group_extend_no_check(sb, o_blocks_count, add);
  1604. return err;
  1605. } /* ext4_group_extend */
  1606. static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
  1607. {
  1608. return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
  1609. }
  1610. /*
  1611. * Release the resize inode and drop the resize_inode feature if there
  1612. * are no more reserved gdt blocks, and then convert the file system
  1613. * to enable meta_bg
  1614. */
  1615. static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
  1616. {
  1617. handle_t *handle;
  1618. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1619. struct ext4_super_block *es = sbi->s_es;
  1620. struct ext4_inode_info *ei = EXT4_I(inode);
  1621. ext4_fsblk_t nr;
  1622. int i, ret, err = 0;
  1623. int credits = 1;
  1624. ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
  1625. if (inode) {
  1626. if (es->s_reserved_gdt_blocks) {
  1627. ext4_error(sb, "Unexpected non-zero "
  1628. "s_reserved_gdt_blocks");
  1629. return -EPERM;
  1630. }
  1631. /* Do a quick sanity check of the resize inode */
  1632. if (inode->i_blocks != 1 << (inode->i_blkbits -
  1633. (9 - sbi->s_cluster_bits)))
  1634. goto invalid_resize_inode;
  1635. for (i = 0; i < EXT4_N_BLOCKS; i++) {
  1636. if (i == EXT4_DIND_BLOCK) {
  1637. if (ei->i_data[i])
  1638. continue;
  1639. else
  1640. goto invalid_resize_inode;
  1641. }
  1642. if (ei->i_data[i])
  1643. goto invalid_resize_inode;
  1644. }
  1645. credits += 3; /* block bitmap, bg descriptor, resize inode */
  1646. }
  1647. handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
  1648. if (IS_ERR(handle))
  1649. return PTR_ERR(handle);
  1650. BUFFER_TRACE(sbi->s_sbh, "get_write_access");
  1651. err = ext4_journal_get_write_access(handle, sbi->s_sbh);
  1652. if (err)
  1653. goto errout;
  1654. ext4_clear_feature_resize_inode(sb);
  1655. ext4_set_feature_meta_bg(sb);
  1656. sbi->s_es->s_first_meta_bg =
  1657. cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
  1658. err = ext4_handle_dirty_super(handle, sb);
  1659. if (err) {
  1660. ext4_std_error(sb, err);
  1661. goto errout;
  1662. }
  1663. if (inode) {
  1664. nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
  1665. ext4_free_blocks(handle, inode, NULL, nr, 1,
  1666. EXT4_FREE_BLOCKS_METADATA |
  1667. EXT4_FREE_BLOCKS_FORGET);
  1668. ei->i_data[EXT4_DIND_BLOCK] = 0;
  1669. inode->i_blocks = 0;
  1670. err = ext4_mark_inode_dirty(handle, inode);
  1671. if (err)
  1672. ext4_std_error(sb, err);
  1673. }
  1674. errout:
  1675. ret = ext4_journal_stop(handle);
  1676. if (!err)
  1677. err = ret;
  1678. return ret;
  1679. invalid_resize_inode:
  1680. ext4_error(sb, "corrupted/inconsistent resize inode");
  1681. return -EINVAL;
  1682. }
  1683. /*
  1684. * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
  1685. *
  1686. * @sb: super block of the fs to be resized
  1687. * @n_blocks_count: the number of blocks resides in the resized fs
  1688. */
  1689. int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
  1690. {
  1691. struct ext4_new_flex_group_data *flex_gd = NULL;
  1692. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1693. struct ext4_super_block *es = sbi->s_es;
  1694. struct buffer_head *bh;
  1695. struct inode *resize_inode = NULL;
  1696. ext4_grpblk_t add, offset;
  1697. unsigned long n_desc_blocks;
  1698. unsigned long o_desc_blocks;
  1699. ext4_group_t o_group;
  1700. ext4_group_t n_group;
  1701. ext4_fsblk_t o_blocks_count;
  1702. ext4_fsblk_t n_blocks_count_retry = 0;
  1703. unsigned long last_update_time = 0;
  1704. int err = 0, flexbg_size = 1 << sbi->s_log_groups_per_flex;
  1705. int meta_bg;
  1706. /* See if the device is actually as big as what was requested */
  1707. bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
  1708. if (IS_ERR(bh)) {
  1709. ext4_warning(sb, "can't read last block, resize aborted");
  1710. return -ENOSPC;
  1711. }
  1712. brelse(bh);
  1713. /*
  1714. * For bigalloc, trim the requested size to the nearest cluster
  1715. * boundary to avoid creating an unusable filesystem. We do this
  1716. * silently, instead of returning an error, to avoid breaking
  1717. * callers that blindly resize the filesystem to the full size of
  1718. * the underlying block device.
  1719. */
  1720. if (ext4_has_feature_bigalloc(sb))
  1721. n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
  1722. retry:
  1723. o_blocks_count = ext4_blocks_count(es);
  1724. ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
  1725. "to %llu blocks", o_blocks_count, n_blocks_count);
  1726. if (n_blocks_count < o_blocks_count) {
  1727. /* On-line shrinking not supported */
  1728. ext4_warning(sb, "can't shrink FS - resize aborted");
  1729. return -EINVAL;
  1730. }
  1731. if (n_blocks_count == o_blocks_count)
  1732. /* Nothing need to do */
  1733. return 0;
  1734. n_group = ext4_get_group_number(sb, n_blocks_count - 1);
  1735. if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
  1736. ext4_warning(sb, "resize would cause inodes_count overflow");
  1737. return -EINVAL;
  1738. }
  1739. ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
  1740. n_desc_blocks = num_desc_blocks(sb, n_group + 1);
  1741. o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
  1742. meta_bg = ext4_has_feature_meta_bg(sb);
  1743. if (ext4_has_feature_resize_inode(sb)) {
  1744. if (meta_bg) {
  1745. ext4_error(sb, "resize_inode and meta_bg enabled "
  1746. "simultaneously");
  1747. return -EINVAL;
  1748. }
  1749. if (n_desc_blocks > o_desc_blocks +
  1750. le16_to_cpu(es->s_reserved_gdt_blocks)) {
  1751. n_blocks_count_retry = n_blocks_count;
  1752. n_desc_blocks = o_desc_blocks +
  1753. le16_to_cpu(es->s_reserved_gdt_blocks);
  1754. n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
  1755. n_blocks_count = (ext4_fsblk_t)n_group *
  1756. EXT4_BLOCKS_PER_GROUP(sb) +
  1757. le32_to_cpu(es->s_first_data_block);
  1758. n_group--; /* set to last group number */
  1759. }
  1760. if (!resize_inode)
  1761. resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
  1762. EXT4_IGET_SPECIAL);
  1763. if (IS_ERR(resize_inode)) {
  1764. ext4_warning(sb, "Error opening resize inode");
  1765. return PTR_ERR(resize_inode);
  1766. }
  1767. }
  1768. if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
  1769. err = ext4_convert_meta_bg(sb, resize_inode);
  1770. if (err)
  1771. goto out;
  1772. if (resize_inode) {
  1773. iput(resize_inode);
  1774. resize_inode = NULL;
  1775. }
  1776. if (n_blocks_count_retry) {
  1777. n_blocks_count = n_blocks_count_retry;
  1778. n_blocks_count_retry = 0;
  1779. goto retry;
  1780. }
  1781. }
  1782. /*
  1783. * Make sure the last group has enough space so that it's
  1784. * guaranteed to have enough space for all metadata blocks
  1785. * that it might need to hold. (We might not need to store
  1786. * the inode table blocks in the last block group, but there
  1787. * will be cases where this might be needed.)
  1788. */
  1789. if ((ext4_group_first_block_no(sb, n_group) +
  1790. ext4_group_overhead_blocks(sb, n_group) + 2 +
  1791. sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
  1792. n_blocks_count = ext4_group_first_block_no(sb, n_group);
  1793. n_group--;
  1794. n_blocks_count_retry = 0;
  1795. if (resize_inode) {
  1796. iput(resize_inode);
  1797. resize_inode = NULL;
  1798. }
  1799. goto retry;
  1800. }
  1801. /* extend the last group */
  1802. if (n_group == o_group)
  1803. add = n_blocks_count - o_blocks_count;
  1804. else
  1805. add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
  1806. if (add > 0) {
  1807. err = ext4_group_extend_no_check(sb, o_blocks_count, add);
  1808. if (err)
  1809. goto out;
  1810. }
  1811. if (ext4_blocks_count(es) == n_blocks_count)
  1812. goto out;
  1813. err = ext4_alloc_flex_bg_array(sb, n_group + 1);
  1814. if (err)
  1815. goto out;
  1816. err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
  1817. if (err)
  1818. goto out;
  1819. flex_gd = alloc_flex_gd(flexbg_size);
  1820. if (flex_gd == NULL) {
  1821. err = -ENOMEM;
  1822. goto out;
  1823. }
  1824. /* Add flex groups. Note that a regular group is a
  1825. * flex group with 1 group.
  1826. */
  1827. while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
  1828. flexbg_size)) {
  1829. if (jiffies - last_update_time > HZ * 10) {
  1830. if (last_update_time)
  1831. ext4_msg(sb, KERN_INFO,
  1832. "resized to %llu blocks",
  1833. ext4_blocks_count(es));
  1834. last_update_time = jiffies;
  1835. }
  1836. if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
  1837. break;
  1838. err = ext4_flex_group_add(sb, resize_inode, flex_gd);
  1839. if (unlikely(err))
  1840. break;
  1841. }
  1842. if (!err && n_blocks_count_retry) {
  1843. n_blocks_count = n_blocks_count_retry;
  1844. n_blocks_count_retry = 0;
  1845. free_flex_gd(flex_gd);
  1846. flex_gd = NULL;
  1847. if (resize_inode) {
  1848. iput(resize_inode);
  1849. resize_inode = NULL;
  1850. }
  1851. goto retry;
  1852. }
  1853. out:
  1854. if (flex_gd)
  1855. free_flex_gd(flex_gd);
  1856. if (resize_inode != NULL)
  1857. iput(resize_inode);
  1858. if (err)
  1859. ext4_warning(sb, "error (%d) occurred during "
  1860. "file system resize", err);
  1861. ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
  1862. ext4_blocks_count(es));
  1863. return err;
  1864. }