fatent.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2004, OGAWA Hirofumi
  4. */
  5. #include <linux/blkdev.h>
  6. #include <linux/sched/signal.h>
  7. #include <linux/backing-dev-defs.h>
  8. #include "fat.h"
  9. struct fatent_operations {
  10. void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
  11. void (*ent_set_ptr)(struct fat_entry *, int);
  12. int (*ent_bread)(struct super_block *, struct fat_entry *,
  13. int, sector_t);
  14. int (*ent_get)(struct fat_entry *);
  15. void (*ent_put)(struct fat_entry *, int);
  16. int (*ent_next)(struct fat_entry *);
  17. };
  18. static DEFINE_SPINLOCK(fat12_entry_lock);
  19. static void fat12_ent_blocknr(struct super_block *sb, int entry,
  20. int *offset, sector_t *blocknr)
  21. {
  22. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  23. int bytes = entry + (entry >> 1);
  24. WARN_ON(!fat_valid_entry(sbi, entry));
  25. *offset = bytes & (sb->s_blocksize - 1);
  26. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  27. }
  28. static void fat_ent_blocknr(struct super_block *sb, int entry,
  29. int *offset, sector_t *blocknr)
  30. {
  31. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  32. int bytes = (entry << sbi->fatent_shift);
  33. WARN_ON(!fat_valid_entry(sbi, entry));
  34. *offset = bytes & (sb->s_blocksize - 1);
  35. *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
  36. }
  37. static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
  38. {
  39. struct buffer_head **bhs = fatent->bhs;
  40. if (fatent->nr_bhs == 1) {
  41. WARN_ON(offset >= (bhs[0]->b_size - 1));
  42. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  43. fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
  44. } else {
  45. WARN_ON(offset != (bhs[0]->b_size - 1));
  46. fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
  47. fatent->u.ent12_p[1] = bhs[1]->b_data;
  48. }
  49. }
  50. static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
  51. {
  52. WARN_ON(offset & (2 - 1));
  53. fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
  54. }
  55. static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
  56. {
  57. WARN_ON(offset & (4 - 1));
  58. fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
  59. }
  60. static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  61. int offset, sector_t blocknr)
  62. {
  63. struct buffer_head **bhs = fatent->bhs;
  64. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  65. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  66. bhs[0] = sb_bread(sb, blocknr);
  67. if (!bhs[0])
  68. goto err;
  69. if ((offset + 1) < sb->s_blocksize)
  70. fatent->nr_bhs = 1;
  71. else {
  72. /* This entry is block boundary, it needs the next block */
  73. blocknr++;
  74. bhs[1] = sb_bread(sb, blocknr);
  75. if (!bhs[1])
  76. goto err_brelse;
  77. fatent->nr_bhs = 2;
  78. }
  79. fat12_ent_set_ptr(fatent, offset);
  80. return 0;
  81. err_brelse:
  82. brelse(bhs[0]);
  83. err:
  84. fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
  85. (llu)blocknr);
  86. return -EIO;
  87. }
  88. static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
  89. int offset, sector_t blocknr)
  90. {
  91. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  92. WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
  93. fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
  94. fatent->bhs[0] = sb_bread(sb, blocknr);
  95. if (!fatent->bhs[0]) {
  96. fat_msg_ratelimit(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
  97. (llu)blocknr);
  98. return -EIO;
  99. }
  100. fatent->nr_bhs = 1;
  101. ops->ent_set_ptr(fatent, offset);
  102. return 0;
  103. }
  104. static int fat12_ent_get(struct fat_entry *fatent)
  105. {
  106. u8 **ent12_p = fatent->u.ent12_p;
  107. int next;
  108. spin_lock(&fat12_entry_lock);
  109. if (fatent->entry & 1)
  110. next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
  111. else
  112. next = (*ent12_p[1] << 8) | *ent12_p[0];
  113. spin_unlock(&fat12_entry_lock);
  114. next &= 0x0fff;
  115. if (next >= BAD_FAT12)
  116. next = FAT_ENT_EOF;
  117. return next;
  118. }
  119. static int fat16_ent_get(struct fat_entry *fatent)
  120. {
  121. int next = le16_to_cpu(*fatent->u.ent16_p);
  122. WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
  123. if (next >= BAD_FAT16)
  124. next = FAT_ENT_EOF;
  125. return next;
  126. }
  127. static int fat32_ent_get(struct fat_entry *fatent)
  128. {
  129. int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
  130. WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
  131. if (next >= BAD_FAT32)
  132. next = FAT_ENT_EOF;
  133. return next;
  134. }
  135. static void fat12_ent_put(struct fat_entry *fatent, int new)
  136. {
  137. u8 **ent12_p = fatent->u.ent12_p;
  138. if (new == FAT_ENT_EOF)
  139. new = EOF_FAT12;
  140. spin_lock(&fat12_entry_lock);
  141. if (fatent->entry & 1) {
  142. *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
  143. *ent12_p[1] = new >> 4;
  144. } else {
  145. *ent12_p[0] = new & 0xff;
  146. *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
  147. }
  148. spin_unlock(&fat12_entry_lock);
  149. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  150. if (fatent->nr_bhs == 2)
  151. mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
  152. }
  153. static void fat16_ent_put(struct fat_entry *fatent, int new)
  154. {
  155. if (new == FAT_ENT_EOF)
  156. new = EOF_FAT16;
  157. *fatent->u.ent16_p = cpu_to_le16(new);
  158. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  159. }
  160. static void fat32_ent_put(struct fat_entry *fatent, int new)
  161. {
  162. WARN_ON(new & 0xf0000000);
  163. new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
  164. *fatent->u.ent32_p = cpu_to_le32(new);
  165. mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
  166. }
  167. static int fat12_ent_next(struct fat_entry *fatent)
  168. {
  169. u8 **ent12_p = fatent->u.ent12_p;
  170. struct buffer_head **bhs = fatent->bhs;
  171. u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
  172. fatent->entry++;
  173. if (fatent->nr_bhs == 1) {
  174. WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
  175. (bhs[0]->b_size - 2)));
  176. WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
  177. (bhs[0]->b_size - 1)));
  178. if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
  179. ent12_p[0] = nextp - 1;
  180. ent12_p[1] = nextp;
  181. return 1;
  182. }
  183. } else {
  184. WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
  185. (bhs[0]->b_size - 1)));
  186. WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
  187. ent12_p[0] = nextp - 1;
  188. ent12_p[1] = nextp;
  189. brelse(bhs[0]);
  190. bhs[0] = bhs[1];
  191. fatent->nr_bhs = 1;
  192. return 1;
  193. }
  194. ent12_p[0] = NULL;
  195. ent12_p[1] = NULL;
  196. return 0;
  197. }
  198. static int fat16_ent_next(struct fat_entry *fatent)
  199. {
  200. const struct buffer_head *bh = fatent->bhs[0];
  201. fatent->entry++;
  202. if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
  203. fatent->u.ent16_p++;
  204. return 1;
  205. }
  206. fatent->u.ent16_p = NULL;
  207. return 0;
  208. }
  209. static int fat32_ent_next(struct fat_entry *fatent)
  210. {
  211. const struct buffer_head *bh = fatent->bhs[0];
  212. fatent->entry++;
  213. if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
  214. fatent->u.ent32_p++;
  215. return 1;
  216. }
  217. fatent->u.ent32_p = NULL;
  218. return 0;
  219. }
  220. static const struct fatent_operations fat12_ops = {
  221. .ent_blocknr = fat12_ent_blocknr,
  222. .ent_set_ptr = fat12_ent_set_ptr,
  223. .ent_bread = fat12_ent_bread,
  224. .ent_get = fat12_ent_get,
  225. .ent_put = fat12_ent_put,
  226. .ent_next = fat12_ent_next,
  227. };
  228. static const struct fatent_operations fat16_ops = {
  229. .ent_blocknr = fat_ent_blocknr,
  230. .ent_set_ptr = fat16_ent_set_ptr,
  231. .ent_bread = fat_ent_bread,
  232. .ent_get = fat16_ent_get,
  233. .ent_put = fat16_ent_put,
  234. .ent_next = fat16_ent_next,
  235. };
  236. static const struct fatent_operations fat32_ops = {
  237. .ent_blocknr = fat_ent_blocknr,
  238. .ent_set_ptr = fat32_ent_set_ptr,
  239. .ent_bread = fat_ent_bread,
  240. .ent_get = fat32_ent_get,
  241. .ent_put = fat32_ent_put,
  242. .ent_next = fat32_ent_next,
  243. };
  244. static inline void lock_fat(struct msdos_sb_info *sbi)
  245. {
  246. mutex_lock(&sbi->fat_lock);
  247. }
  248. static inline void unlock_fat(struct msdos_sb_info *sbi)
  249. {
  250. mutex_unlock(&sbi->fat_lock);
  251. }
  252. void fat_ent_access_init(struct super_block *sb)
  253. {
  254. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  255. mutex_init(&sbi->fat_lock);
  256. if (is_fat32(sbi)) {
  257. sbi->fatent_shift = 2;
  258. sbi->fatent_ops = &fat32_ops;
  259. } else if (is_fat16(sbi)) {
  260. sbi->fatent_shift = 1;
  261. sbi->fatent_ops = &fat16_ops;
  262. } else if (is_fat12(sbi)) {
  263. sbi->fatent_shift = -1;
  264. sbi->fatent_ops = &fat12_ops;
  265. } else {
  266. fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits);
  267. }
  268. }
  269. static void mark_fsinfo_dirty(struct super_block *sb)
  270. {
  271. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  272. if (sb_rdonly(sb) || !is_fat32(sbi))
  273. return;
  274. __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
  275. }
  276. static inline int fat_ent_update_ptr(struct super_block *sb,
  277. struct fat_entry *fatent,
  278. int offset, sector_t blocknr)
  279. {
  280. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  281. const struct fatent_operations *ops = sbi->fatent_ops;
  282. struct buffer_head **bhs = fatent->bhs;
  283. /* Is this fatent's blocks including this entry? */
  284. if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
  285. return 0;
  286. if (is_fat12(sbi)) {
  287. if ((offset + 1) < sb->s_blocksize) {
  288. /* This entry is on bhs[0]. */
  289. if (fatent->nr_bhs == 2) {
  290. brelse(bhs[1]);
  291. fatent->nr_bhs = 1;
  292. }
  293. } else {
  294. /* This entry needs the next block. */
  295. if (fatent->nr_bhs != 2)
  296. return 0;
  297. if (bhs[1]->b_blocknr != (blocknr + 1))
  298. return 0;
  299. }
  300. }
  301. ops->ent_set_ptr(fatent, offset);
  302. return 1;
  303. }
  304. int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
  305. {
  306. struct super_block *sb = inode->i_sb;
  307. struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
  308. const struct fatent_operations *ops = sbi->fatent_ops;
  309. int err, offset;
  310. sector_t blocknr;
  311. if (!fat_valid_entry(sbi, entry)) {
  312. fatent_brelse(fatent);
  313. fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
  314. return -EIO;
  315. }
  316. fatent_set_entry(fatent, entry);
  317. ops->ent_blocknr(sb, entry, &offset, &blocknr);
  318. if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
  319. fatent_brelse(fatent);
  320. err = ops->ent_bread(sb, fatent, offset, blocknr);
  321. if (err)
  322. return err;
  323. }
  324. return ops->ent_get(fatent);
  325. }
  326. /* FIXME: We can write the blocks as more big chunk. */
  327. static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
  328. int nr_bhs)
  329. {
  330. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  331. struct buffer_head *c_bh;
  332. int err, n, copy;
  333. err = 0;
  334. for (copy = 1; copy < sbi->fats; copy++) {
  335. sector_t backup_fat = sbi->fat_length * copy;
  336. for (n = 0; n < nr_bhs; n++) {
  337. c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
  338. if (!c_bh) {
  339. err = -ENOMEM;
  340. goto error;
  341. }
  342. /* Avoid race with userspace read via bdev */
  343. lock_buffer(c_bh);
  344. memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
  345. set_buffer_uptodate(c_bh);
  346. unlock_buffer(c_bh);
  347. mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
  348. if (sb->s_flags & SB_SYNCHRONOUS)
  349. err = sync_dirty_buffer(c_bh);
  350. brelse(c_bh);
  351. if (err)
  352. goto error;
  353. }
  354. }
  355. error:
  356. return err;
  357. }
  358. int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
  359. int new, int wait)
  360. {
  361. struct super_block *sb = inode->i_sb;
  362. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  363. int err;
  364. ops->ent_put(fatent, new);
  365. if (wait) {
  366. err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
  367. if (err)
  368. return err;
  369. }
  370. return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
  371. }
  372. static inline int fat_ent_next(struct msdos_sb_info *sbi,
  373. struct fat_entry *fatent)
  374. {
  375. if (sbi->fatent_ops->ent_next(fatent)) {
  376. if (fatent->entry < sbi->max_cluster)
  377. return 1;
  378. }
  379. return 0;
  380. }
  381. static inline int fat_ent_read_block(struct super_block *sb,
  382. struct fat_entry *fatent)
  383. {
  384. const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
  385. sector_t blocknr;
  386. int offset;
  387. fatent_brelse(fatent);
  388. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  389. return ops->ent_bread(sb, fatent, offset, blocknr);
  390. }
  391. static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
  392. struct fat_entry *fatent)
  393. {
  394. int n, i;
  395. for (n = 0; n < fatent->nr_bhs; n++) {
  396. for (i = 0; i < *nr_bhs; i++) {
  397. if (fatent->bhs[n] == bhs[i])
  398. break;
  399. }
  400. if (i == *nr_bhs) {
  401. get_bh(fatent->bhs[n]);
  402. bhs[i] = fatent->bhs[n];
  403. (*nr_bhs)++;
  404. }
  405. }
  406. }
  407. int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
  408. {
  409. struct super_block *sb = inode->i_sb;
  410. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  411. const struct fatent_operations *ops = sbi->fatent_ops;
  412. struct fat_entry fatent, prev_ent;
  413. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  414. int i, count, err, nr_bhs, idx_clus;
  415. BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
  416. lock_fat(sbi);
  417. if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
  418. sbi->free_clusters < nr_cluster) {
  419. unlock_fat(sbi);
  420. return -ENOSPC;
  421. }
  422. err = nr_bhs = idx_clus = 0;
  423. count = FAT_START_ENT;
  424. fatent_init(&prev_ent);
  425. fatent_init(&fatent);
  426. fatent_set_entry(&fatent, sbi->prev_free + 1);
  427. while (count < sbi->max_cluster) {
  428. if (fatent.entry >= sbi->max_cluster)
  429. fatent.entry = FAT_START_ENT;
  430. fatent_set_entry(&fatent, fatent.entry);
  431. err = fat_ent_read_block(sb, &fatent);
  432. if (err)
  433. goto out;
  434. /* Find the free entries in a block */
  435. do {
  436. if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
  437. int entry = fatent.entry;
  438. /* make the cluster chain */
  439. ops->ent_put(&fatent, FAT_ENT_EOF);
  440. if (prev_ent.nr_bhs)
  441. ops->ent_put(&prev_ent, entry);
  442. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  443. sbi->prev_free = entry;
  444. if (sbi->free_clusters != -1)
  445. sbi->free_clusters--;
  446. cluster[idx_clus] = entry;
  447. idx_clus++;
  448. if (idx_clus == nr_cluster)
  449. goto out;
  450. /*
  451. * fat_collect_bhs() gets ref-count of bhs,
  452. * so we can still use the prev_ent.
  453. */
  454. prev_ent = fatent;
  455. }
  456. count++;
  457. if (count == sbi->max_cluster)
  458. break;
  459. } while (fat_ent_next(sbi, &fatent));
  460. }
  461. /* Couldn't allocate the free entries */
  462. sbi->free_clusters = 0;
  463. sbi->free_clus_valid = 1;
  464. err = -ENOSPC;
  465. out:
  466. unlock_fat(sbi);
  467. mark_fsinfo_dirty(sb);
  468. fatent_brelse(&fatent);
  469. if (!err) {
  470. if (inode_needs_sync(inode))
  471. err = fat_sync_bhs(bhs, nr_bhs);
  472. if (!err)
  473. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  474. }
  475. for (i = 0; i < nr_bhs; i++)
  476. brelse(bhs[i]);
  477. if (err && idx_clus)
  478. fat_free_clusters(inode, cluster[0]);
  479. return err;
  480. }
  481. int fat_free_clusters(struct inode *inode, int cluster)
  482. {
  483. struct super_block *sb = inode->i_sb;
  484. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  485. const struct fatent_operations *ops = sbi->fatent_ops;
  486. struct fat_entry fatent;
  487. struct buffer_head *bhs[MAX_BUF_PER_PAGE];
  488. int i, err, nr_bhs;
  489. int first_cl = cluster, dirty_fsinfo = 0;
  490. nr_bhs = 0;
  491. fatent_init(&fatent);
  492. lock_fat(sbi);
  493. do {
  494. cluster = fat_ent_read(inode, &fatent, cluster);
  495. if (cluster < 0) {
  496. err = cluster;
  497. goto error;
  498. } else if (cluster == FAT_ENT_FREE) {
  499. fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
  500. __func__);
  501. err = -EIO;
  502. goto error;
  503. }
  504. if (sbi->options.discard) {
  505. /*
  506. * Issue discard for the sectors we no longer
  507. * care about, batching contiguous clusters
  508. * into one request
  509. */
  510. if (cluster != fatent.entry + 1) {
  511. int nr_clus = fatent.entry - first_cl + 1;
  512. sb_issue_discard(sb,
  513. fat_clus_to_blknr(sbi, first_cl),
  514. nr_clus * sbi->sec_per_clus,
  515. GFP_NOFS, 0);
  516. first_cl = cluster;
  517. }
  518. }
  519. ops->ent_put(&fatent, FAT_ENT_FREE);
  520. if (sbi->free_clusters != -1) {
  521. sbi->free_clusters++;
  522. dirty_fsinfo = 1;
  523. }
  524. if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
  525. if (sb->s_flags & SB_SYNCHRONOUS) {
  526. err = fat_sync_bhs(bhs, nr_bhs);
  527. if (err)
  528. goto error;
  529. }
  530. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  531. if (err)
  532. goto error;
  533. for (i = 0; i < nr_bhs; i++)
  534. brelse(bhs[i]);
  535. nr_bhs = 0;
  536. }
  537. fat_collect_bhs(bhs, &nr_bhs, &fatent);
  538. } while (cluster != FAT_ENT_EOF);
  539. if (sb->s_flags & SB_SYNCHRONOUS) {
  540. err = fat_sync_bhs(bhs, nr_bhs);
  541. if (err)
  542. goto error;
  543. }
  544. err = fat_mirror_bhs(sb, bhs, nr_bhs);
  545. error:
  546. fatent_brelse(&fatent);
  547. for (i = 0; i < nr_bhs; i++)
  548. brelse(bhs[i]);
  549. unlock_fat(sbi);
  550. if (dirty_fsinfo)
  551. mark_fsinfo_dirty(sb);
  552. return err;
  553. }
  554. EXPORT_SYMBOL_GPL(fat_free_clusters);
  555. struct fatent_ra {
  556. sector_t cur;
  557. sector_t limit;
  558. unsigned int ra_blocks;
  559. sector_t ra_advance;
  560. sector_t ra_next;
  561. sector_t ra_limit;
  562. };
  563. static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
  564. struct fat_entry *fatent, int ent_limit)
  565. {
  566. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  567. const struct fatent_operations *ops = sbi->fatent_ops;
  568. sector_t blocknr, block_end;
  569. int offset;
  570. /*
  571. * This is the sequential read, so ra_pages * 2 (but try to
  572. * align the optimal hardware IO size).
  573. * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
  574. */
  575. unsigned long ra_pages = sb->s_bdi->ra_pages;
  576. unsigned int reada_blocks;
  577. if (fatent->entry >= ent_limit)
  578. return;
  579. if (ra_pages > sb->s_bdi->io_pages)
  580. ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
  581. reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
  582. /* Initialize the range for sequential read */
  583. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  584. ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
  585. ra->cur = 0;
  586. ra->limit = (block_end + 1) - blocknr;
  587. /* Advancing the window at half size */
  588. ra->ra_blocks = reada_blocks >> 1;
  589. ra->ra_advance = ra->cur;
  590. ra->ra_next = ra->cur;
  591. ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
  592. }
  593. /* Assuming to be called before reading a new block (increments ->cur). */
  594. static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
  595. struct fat_entry *fatent)
  596. {
  597. if (ra->ra_next >= ra->ra_limit)
  598. return;
  599. if (ra->cur >= ra->ra_advance) {
  600. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  601. const struct fatent_operations *ops = sbi->fatent_ops;
  602. struct blk_plug plug;
  603. sector_t blocknr, diff;
  604. int offset;
  605. ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
  606. diff = blocknr - ra->cur;
  607. blk_start_plug(&plug);
  608. /*
  609. * FIXME: we would want to directly use the bio with
  610. * pages to reduce the number of segments.
  611. */
  612. for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
  613. sb_breadahead(sb, ra->ra_next + diff);
  614. blk_finish_plug(&plug);
  615. /* Advance the readahead window */
  616. ra->ra_advance += ra->ra_blocks;
  617. ra->ra_limit += min_t(sector_t,
  618. ra->ra_blocks, ra->limit - ra->ra_limit);
  619. }
  620. ra->cur++;
  621. }
  622. int fat_count_free_clusters(struct super_block *sb)
  623. {
  624. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  625. const struct fatent_operations *ops = sbi->fatent_ops;
  626. struct fat_entry fatent;
  627. struct fatent_ra fatent_ra;
  628. int err = 0, free;
  629. lock_fat(sbi);
  630. if (sbi->free_clusters != -1 && sbi->free_clus_valid)
  631. goto out;
  632. free = 0;
  633. fatent_init(&fatent);
  634. fatent_set_entry(&fatent, FAT_START_ENT);
  635. fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
  636. while (fatent.entry < sbi->max_cluster) {
  637. /* readahead of fat blocks */
  638. fat_ent_reada(sb, &fatent_ra, &fatent);
  639. err = fat_ent_read_block(sb, &fatent);
  640. if (err)
  641. goto out;
  642. do {
  643. if (ops->ent_get(&fatent) == FAT_ENT_FREE)
  644. free++;
  645. } while (fat_ent_next(sbi, &fatent));
  646. cond_resched();
  647. }
  648. sbi->free_clusters = free;
  649. sbi->free_clus_valid = 1;
  650. mark_fsinfo_dirty(sb);
  651. fatent_brelse(&fatent);
  652. out:
  653. unlock_fat(sbi);
  654. return err;
  655. }
  656. static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
  657. {
  658. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  659. return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
  660. nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
  661. }
  662. int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
  663. {
  664. struct super_block *sb = inode->i_sb;
  665. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  666. const struct fatent_operations *ops = sbi->fatent_ops;
  667. struct fat_entry fatent;
  668. struct fatent_ra fatent_ra;
  669. u64 ent_start, ent_end, minlen, trimmed = 0;
  670. u32 free = 0;
  671. int err = 0;
  672. /*
  673. * FAT data is organized as clusters, trim at the granulary of cluster.
  674. *
  675. * fstrim_range is in byte, convert values to cluster index.
  676. * Treat sectors before data region as all used, not to trim them.
  677. */
  678. ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
  679. ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
  680. minlen = range->minlen >> sbi->cluster_bits;
  681. if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
  682. return -EINVAL;
  683. if (ent_end >= sbi->max_cluster)
  684. ent_end = sbi->max_cluster - 1;
  685. fatent_init(&fatent);
  686. lock_fat(sbi);
  687. fatent_set_entry(&fatent, ent_start);
  688. fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
  689. while (fatent.entry <= ent_end) {
  690. /* readahead of fat blocks */
  691. fat_ent_reada(sb, &fatent_ra, &fatent);
  692. err = fat_ent_read_block(sb, &fatent);
  693. if (err)
  694. goto error;
  695. do {
  696. if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
  697. free++;
  698. } else if (free) {
  699. if (free >= minlen) {
  700. u32 clus = fatent.entry - free;
  701. err = fat_trim_clusters(sb, clus, free);
  702. if (err && err != -EOPNOTSUPP)
  703. goto error;
  704. if (!err)
  705. trimmed += free;
  706. err = 0;
  707. }
  708. free = 0;
  709. }
  710. } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
  711. if (fatal_signal_pending(current)) {
  712. err = -ERESTARTSYS;
  713. goto error;
  714. }
  715. if (need_resched()) {
  716. fatent_brelse(&fatent);
  717. unlock_fat(sbi);
  718. cond_resched();
  719. lock_fat(sbi);
  720. }
  721. }
  722. /* handle scenario when tail entries are all free */
  723. if (free && free >= minlen) {
  724. u32 clus = fatent.entry - free;
  725. err = fat_trim_clusters(sb, clus, free);
  726. if (err && err != -EOPNOTSUPP)
  727. goto error;
  728. if (!err)
  729. trimmed += free;
  730. err = 0;
  731. }
  732. error:
  733. fatent_brelse(&fatent);
  734. unlock_fat(sbi);
  735. range->len = trimmed << sbi->cluster_bits;
  736. return err;
  737. }