rfd_ftl.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * rfd_ftl.c -- resident flash disk (flash translation layer)
  4. *
  5. * Copyright © 2005 Sean Young <[email protected]>
  6. *
  7. * This type of flash translation layer (FTL) is used by the Embedded BIOS
  8. * by General Software. It is known as the Resident Flash Disk (RFD), see:
  9. *
  10. * http://www.gensw.com/pages/prod/bios/rfd.htm
  11. *
  12. * based on ftl.c
  13. */
  14. #include <linux/hdreg.h>
  15. #include <linux/init.h>
  16. #include <linux/mtd/blktrans.h>
  17. #include <linux/mtd/mtd.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/slab.h>
  20. #include <linux/jiffies.h>
  21. #include <linux/module.h>
  22. #include <asm/types.h>
  23. static int block_size = 0;
  24. module_param(block_size, int, 0);
  25. MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
  26. #define PREFIX "rfd_ftl: "
  27. /* This major has been assigned by [email protected] */
  28. #ifndef RFD_FTL_MAJOR
  29. #define RFD_FTL_MAJOR 256
  30. #endif
  31. /* Maximum number of partitions in an FTL region */
  32. #define PART_BITS 4
  33. /* An erase unit should start with this value */
  34. #define RFD_MAGIC 0x9193
  35. /* the second value is 0xffff or 0xffc8; function unknown */
  36. /* the third value is always 0xffff, ignored */
  37. /* next is an array of mapping for each corresponding sector */
  38. #define HEADER_MAP_OFFSET 3
  39. #define SECTOR_DELETED 0x0000
  40. #define SECTOR_ZERO 0xfffe
  41. #define SECTOR_FREE 0xffff
  42. #define SECTOR_SIZE 512
  43. #define SECTORS_PER_TRACK 63
  44. struct block {
  45. enum {
  46. BLOCK_OK,
  47. BLOCK_ERASING,
  48. BLOCK_ERASED,
  49. BLOCK_UNUSED,
  50. BLOCK_FAILED
  51. } state;
  52. int free_sectors;
  53. int used_sectors;
  54. int erases;
  55. u_long offset;
  56. };
  57. struct partition {
  58. struct mtd_blktrans_dev mbd;
  59. u_int block_size; /* size of erase unit */
  60. u_int total_blocks; /* number of erase units */
  61. u_int header_sectors_per_block; /* header sectors in erase unit */
  62. u_int data_sectors_per_block; /* data sectors in erase unit */
  63. u_int sector_count; /* sectors in translated disk */
  64. u_int header_size; /* bytes in header sector */
  65. int reserved_block; /* block next up for reclaim */
  66. int current_block; /* block to write to */
  67. u16 *header_cache; /* cached header */
  68. int is_reclaiming;
  69. int cylinders;
  70. int errors;
  71. u_long *sector_map;
  72. struct block *blocks;
  73. };
  74. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
  75. static int build_block_map(struct partition *part, int block_no)
  76. {
  77. struct block *block = &part->blocks[block_no];
  78. int i;
  79. block->offset = part->block_size * block_no;
  80. if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
  81. block->state = BLOCK_UNUSED;
  82. return -ENOENT;
  83. }
  84. block->state = BLOCK_OK;
  85. for (i=0; i<part->data_sectors_per_block; i++) {
  86. u16 entry;
  87. entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
  88. if (entry == SECTOR_DELETED)
  89. continue;
  90. if (entry == SECTOR_FREE) {
  91. block->free_sectors++;
  92. continue;
  93. }
  94. if (entry == SECTOR_ZERO)
  95. entry = 0;
  96. if (entry >= part->sector_count) {
  97. printk(KERN_WARNING PREFIX
  98. "'%s': unit #%d: entry %d corrupt, "
  99. "sector %d out of range\n",
  100. part->mbd.mtd->name, block_no, i, entry);
  101. continue;
  102. }
  103. if (part->sector_map[entry] != -1) {
  104. printk(KERN_WARNING PREFIX
  105. "'%s': more than one entry for sector %d\n",
  106. part->mbd.mtd->name, entry);
  107. part->errors = 1;
  108. continue;
  109. }
  110. part->sector_map[entry] = block->offset +
  111. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  112. block->used_sectors++;
  113. }
  114. if (block->free_sectors == part->data_sectors_per_block)
  115. part->reserved_block = block_no;
  116. return 0;
  117. }
  118. static int scan_header(struct partition *part)
  119. {
  120. int sectors_per_block;
  121. int i, rc = -ENOMEM;
  122. int blocks_found;
  123. size_t retlen;
  124. sectors_per_block = part->block_size / SECTOR_SIZE;
  125. part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
  126. if (part->total_blocks < 2)
  127. return -ENOENT;
  128. /* each erase block has three bytes header, followed by the map */
  129. part->header_sectors_per_block =
  130. ((HEADER_MAP_OFFSET + sectors_per_block) *
  131. sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
  132. part->data_sectors_per_block = sectors_per_block -
  133. part->header_sectors_per_block;
  134. part->header_size = (HEADER_MAP_OFFSET +
  135. part->data_sectors_per_block) * sizeof(u16);
  136. part->cylinders = (part->data_sectors_per_block *
  137. (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
  138. part->sector_count = part->cylinders * SECTORS_PER_TRACK;
  139. part->current_block = -1;
  140. part->reserved_block = -1;
  141. part->is_reclaiming = 0;
  142. part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
  143. if (!part->header_cache)
  144. goto err;
  145. part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
  146. GFP_KERNEL);
  147. if (!part->blocks)
  148. goto err;
  149. part->sector_map = vmalloc(array_size(sizeof(u_long),
  150. part->sector_count));
  151. if (!part->sector_map)
  152. goto err;
  153. for (i=0; i<part->sector_count; i++)
  154. part->sector_map[i] = -1;
  155. for (i=0, blocks_found=0; i<part->total_blocks; i++) {
  156. rc = mtd_read(part->mbd.mtd, i * part->block_size,
  157. part->header_size, &retlen,
  158. (u_char *)part->header_cache);
  159. if (!rc && retlen != part->header_size)
  160. rc = -EIO;
  161. if (rc)
  162. goto err;
  163. if (!build_block_map(part, i))
  164. blocks_found++;
  165. }
  166. if (blocks_found == 0) {
  167. printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
  168. part->mbd.mtd->name);
  169. rc = -ENOENT;
  170. goto err;
  171. }
  172. if (part->reserved_block == -1) {
  173. printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
  174. part->mbd.mtd->name);
  175. part->errors = 1;
  176. }
  177. return 0;
  178. err:
  179. vfree(part->sector_map);
  180. kfree(part->header_cache);
  181. kfree(part->blocks);
  182. return rc;
  183. }
  184. static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  185. {
  186. struct partition *part = container_of(dev, struct partition, mbd);
  187. u_long addr;
  188. size_t retlen;
  189. int rc;
  190. if (sector >= part->sector_count)
  191. return -EIO;
  192. addr = part->sector_map[sector];
  193. if (addr != -1) {
  194. rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
  195. (u_char *)buf);
  196. if (!rc && retlen != SECTOR_SIZE)
  197. rc = -EIO;
  198. if (rc) {
  199. printk(KERN_WARNING PREFIX "error reading '%s' at "
  200. "0x%lx\n", part->mbd.mtd->name, addr);
  201. return rc;
  202. }
  203. } else
  204. memset(buf, 0, SECTOR_SIZE);
  205. return 0;
  206. }
  207. static int erase_block(struct partition *part, int block)
  208. {
  209. struct erase_info *erase;
  210. int rc;
  211. erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
  212. if (!erase)
  213. return -ENOMEM;
  214. erase->addr = part->blocks[block].offset;
  215. erase->len = part->block_size;
  216. part->blocks[block].state = BLOCK_ERASING;
  217. part->blocks[block].free_sectors = 0;
  218. rc = mtd_erase(part->mbd.mtd, erase);
  219. if (rc) {
  220. printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
  221. "failed\n", (unsigned long long)erase->addr,
  222. (unsigned long long)erase->len, part->mbd.mtd->name);
  223. part->blocks[block].state = BLOCK_FAILED;
  224. part->blocks[block].free_sectors = 0;
  225. part->blocks[block].used_sectors = 0;
  226. } else {
  227. u16 magic = cpu_to_le16(RFD_MAGIC);
  228. size_t retlen;
  229. part->blocks[block].state = BLOCK_ERASED;
  230. part->blocks[block].free_sectors = part->data_sectors_per_block;
  231. part->blocks[block].used_sectors = 0;
  232. part->blocks[block].erases++;
  233. rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
  234. sizeof(magic), &retlen, (u_char *)&magic);
  235. if (!rc && retlen != sizeof(magic))
  236. rc = -EIO;
  237. if (rc) {
  238. pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
  239. part->mbd.mtd->name, part->blocks[block].offset);
  240. part->blocks[block].state = BLOCK_FAILED;
  241. } else {
  242. part->blocks[block].state = BLOCK_OK;
  243. }
  244. }
  245. kfree(erase);
  246. return rc;
  247. }
  248. static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
  249. {
  250. void *sector_data;
  251. u16 *map;
  252. size_t retlen;
  253. int i, rc = -ENOMEM;
  254. part->is_reclaiming = 1;
  255. sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
  256. if (!sector_data)
  257. goto err3;
  258. map = kmalloc(part->header_size, GFP_KERNEL);
  259. if (!map)
  260. goto err2;
  261. rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
  262. part->header_size, &retlen, (u_char *)map);
  263. if (!rc && retlen != part->header_size)
  264. rc = -EIO;
  265. if (rc) {
  266. printk(KERN_ERR PREFIX "error reading '%s' at "
  267. "0x%lx\n", part->mbd.mtd->name,
  268. part->blocks[block_no].offset);
  269. goto err;
  270. }
  271. for (i=0; i<part->data_sectors_per_block; i++) {
  272. u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
  273. u_long addr;
  274. if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
  275. continue;
  276. if (entry == SECTOR_ZERO)
  277. entry = 0;
  278. /* already warned about and ignored in build_block_map() */
  279. if (entry >= part->sector_count)
  280. continue;
  281. addr = part->blocks[block_no].offset +
  282. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  283. if (*old_sector == addr) {
  284. *old_sector = -1;
  285. if (!part->blocks[block_no].used_sectors--) {
  286. rc = erase_block(part, block_no);
  287. break;
  288. }
  289. continue;
  290. }
  291. rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
  292. sector_data);
  293. if (!rc && retlen != SECTOR_SIZE)
  294. rc = -EIO;
  295. if (rc) {
  296. printk(KERN_ERR PREFIX "'%s': Unable to "
  297. "read sector for relocation\n",
  298. part->mbd.mtd->name);
  299. goto err;
  300. }
  301. rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
  302. entry, sector_data);
  303. if (rc)
  304. goto err;
  305. }
  306. err:
  307. kfree(map);
  308. err2:
  309. kfree(sector_data);
  310. err3:
  311. part->is_reclaiming = 0;
  312. return rc;
  313. }
  314. static int reclaim_block(struct partition *part, u_long *old_sector)
  315. {
  316. int block, best_block, score, old_sector_block;
  317. int rc;
  318. /* we have a race if sync doesn't exist */
  319. mtd_sync(part->mbd.mtd);
  320. score = 0x7fffffff; /* MAX_INT */
  321. best_block = -1;
  322. if (*old_sector != -1)
  323. old_sector_block = *old_sector / part->block_size;
  324. else
  325. old_sector_block = -1;
  326. for (block=0; block<part->total_blocks; block++) {
  327. int this_score;
  328. if (block == part->reserved_block)
  329. continue;
  330. /*
  331. * Postpone reclaiming if there is a free sector as
  332. * more removed sectors is more efficient (have to move
  333. * less).
  334. */
  335. if (part->blocks[block].free_sectors)
  336. return 0;
  337. this_score = part->blocks[block].used_sectors;
  338. if (block == old_sector_block)
  339. this_score--;
  340. else {
  341. /* no point in moving a full block */
  342. if (part->blocks[block].used_sectors ==
  343. part->data_sectors_per_block)
  344. continue;
  345. }
  346. this_score += part->blocks[block].erases;
  347. if (this_score < score) {
  348. best_block = block;
  349. score = this_score;
  350. }
  351. }
  352. if (best_block == -1)
  353. return -ENOSPC;
  354. part->current_block = -1;
  355. part->reserved_block = best_block;
  356. pr_debug("reclaim_block: reclaiming block #%d with %d used "
  357. "%d free sectors\n", best_block,
  358. part->blocks[best_block].used_sectors,
  359. part->blocks[best_block].free_sectors);
  360. if (part->blocks[best_block].used_sectors)
  361. rc = move_block_contents(part, best_block, old_sector);
  362. else
  363. rc = erase_block(part, best_block);
  364. return rc;
  365. }
  366. /*
  367. * IMPROVE: It would be best to choose the block with the most deleted sectors,
  368. * because if we fill that one up first it'll have the most chance of having
  369. * the least live sectors at reclaim.
  370. */
  371. static int find_free_block(struct partition *part)
  372. {
  373. int block, stop;
  374. block = part->current_block == -1 ?
  375. jiffies % part->total_blocks : part->current_block;
  376. stop = block;
  377. do {
  378. if (part->blocks[block].free_sectors &&
  379. block != part->reserved_block)
  380. return block;
  381. if (part->blocks[block].state == BLOCK_UNUSED)
  382. erase_block(part, block);
  383. if (++block >= part->total_blocks)
  384. block = 0;
  385. } while (block != stop);
  386. return -1;
  387. }
  388. static int find_writable_block(struct partition *part, u_long *old_sector)
  389. {
  390. int rc, block;
  391. size_t retlen;
  392. block = find_free_block(part);
  393. if (block == -1) {
  394. if (!part->is_reclaiming) {
  395. rc = reclaim_block(part, old_sector);
  396. if (rc)
  397. goto err;
  398. block = find_free_block(part);
  399. }
  400. if (block == -1) {
  401. rc = -ENOSPC;
  402. goto err;
  403. }
  404. }
  405. rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
  406. part->header_size, &retlen,
  407. (u_char *)part->header_cache);
  408. if (!rc && retlen != part->header_size)
  409. rc = -EIO;
  410. if (rc) {
  411. printk(KERN_ERR PREFIX "'%s': unable to read header at "
  412. "0x%lx\n", part->mbd.mtd->name,
  413. part->blocks[block].offset);
  414. goto err;
  415. }
  416. part->current_block = block;
  417. err:
  418. return rc;
  419. }
  420. static int mark_sector_deleted(struct partition *part, u_long old_addr)
  421. {
  422. int block, offset, rc;
  423. u_long addr;
  424. size_t retlen;
  425. u16 del = cpu_to_le16(SECTOR_DELETED);
  426. block = old_addr / part->block_size;
  427. offset = (old_addr % part->block_size) / SECTOR_SIZE -
  428. part->header_sectors_per_block;
  429. addr = part->blocks[block].offset +
  430. (HEADER_MAP_OFFSET + offset) * sizeof(u16);
  431. rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
  432. (u_char *)&del);
  433. if (!rc && retlen != sizeof(del))
  434. rc = -EIO;
  435. if (rc) {
  436. printk(KERN_ERR PREFIX "error writing '%s' at "
  437. "0x%lx\n", part->mbd.mtd->name, addr);
  438. goto err;
  439. }
  440. if (block == part->current_block)
  441. part->header_cache[offset + HEADER_MAP_OFFSET] = del;
  442. part->blocks[block].used_sectors--;
  443. if (!part->blocks[block].used_sectors &&
  444. !part->blocks[block].free_sectors)
  445. rc = erase_block(part, block);
  446. err:
  447. return rc;
  448. }
  449. static int find_free_sector(const struct partition *part, const struct block *block)
  450. {
  451. int i, stop;
  452. i = stop = part->data_sectors_per_block - block->free_sectors;
  453. do {
  454. if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
  455. == SECTOR_FREE)
  456. return i;
  457. if (++i == part->data_sectors_per_block)
  458. i = 0;
  459. }
  460. while(i != stop);
  461. return -1;
  462. }
  463. static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
  464. {
  465. struct partition *part = container_of(dev, struct partition, mbd);
  466. struct block *block;
  467. u_long addr;
  468. int i;
  469. int rc;
  470. size_t retlen;
  471. u16 entry;
  472. if (part->current_block == -1 ||
  473. !part->blocks[part->current_block].free_sectors) {
  474. rc = find_writable_block(part, old_addr);
  475. if (rc)
  476. goto err;
  477. }
  478. block = &part->blocks[part->current_block];
  479. i = find_free_sector(part, block);
  480. if (i < 0) {
  481. rc = -ENOSPC;
  482. goto err;
  483. }
  484. addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
  485. block->offset;
  486. rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
  487. (u_char *)buf);
  488. if (!rc && retlen != SECTOR_SIZE)
  489. rc = -EIO;
  490. if (rc) {
  491. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  492. part->mbd.mtd->name, addr);
  493. goto err;
  494. }
  495. part->sector_map[sector] = addr;
  496. entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
  497. part->header_cache[i + HEADER_MAP_OFFSET] = entry;
  498. addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
  499. rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
  500. (u_char *)&entry);
  501. if (!rc && retlen != sizeof(entry))
  502. rc = -EIO;
  503. if (rc) {
  504. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  505. part->mbd.mtd->name, addr);
  506. goto err;
  507. }
  508. block->used_sectors++;
  509. block->free_sectors--;
  510. err:
  511. return rc;
  512. }
  513. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  514. {
  515. struct partition *part = container_of(dev, struct partition, mbd);
  516. u_long old_addr;
  517. int i;
  518. int rc = 0;
  519. pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
  520. if (part->reserved_block == -1) {
  521. rc = -EACCES;
  522. goto err;
  523. }
  524. if (sector >= part->sector_count) {
  525. rc = -EIO;
  526. goto err;
  527. }
  528. old_addr = part->sector_map[sector];
  529. for (i=0; i<SECTOR_SIZE; i++) {
  530. if (!buf[i])
  531. continue;
  532. rc = do_writesect(dev, sector, buf, &old_addr);
  533. if (rc)
  534. goto err;
  535. break;
  536. }
  537. if (i == SECTOR_SIZE)
  538. part->sector_map[sector] = -1;
  539. if (old_addr != -1)
  540. rc = mark_sector_deleted(part, old_addr);
  541. err:
  542. return rc;
  543. }
  544. static int rfd_ftl_discardsect(struct mtd_blktrans_dev *dev,
  545. unsigned long sector, unsigned int nr_sects)
  546. {
  547. struct partition *part = container_of(dev, struct partition, mbd);
  548. u_long addr;
  549. int rc;
  550. while (nr_sects) {
  551. if (sector >= part->sector_count)
  552. return -EIO;
  553. addr = part->sector_map[sector];
  554. if (addr != -1) {
  555. rc = mark_sector_deleted(part, addr);
  556. if (rc)
  557. return rc;
  558. part->sector_map[sector] = -1;
  559. }
  560. sector++;
  561. nr_sects--;
  562. }
  563. return 0;
  564. }
  565. static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
  566. {
  567. struct partition *part = container_of(dev, struct partition, mbd);
  568. geo->heads = 1;
  569. geo->sectors = SECTORS_PER_TRACK;
  570. geo->cylinders = part->cylinders;
  571. return 0;
  572. }
  573. static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
  574. {
  575. struct partition *part;
  576. if ((mtd->type != MTD_NORFLASH && mtd->type != MTD_RAM) ||
  577. mtd->size > UINT_MAX)
  578. return;
  579. part = kzalloc(sizeof(struct partition), GFP_KERNEL);
  580. if (!part)
  581. return;
  582. part->mbd.mtd = mtd;
  583. if (block_size)
  584. part->block_size = block_size;
  585. else {
  586. if (!mtd->erasesize) {
  587. printk(KERN_WARNING PREFIX "please provide block_size");
  588. goto out;
  589. } else
  590. part->block_size = mtd->erasesize;
  591. }
  592. if (scan_header(part) == 0) {
  593. part->mbd.size = part->sector_count;
  594. part->mbd.tr = tr;
  595. part->mbd.devnum = -1;
  596. if (!(mtd->flags & MTD_WRITEABLE))
  597. part->mbd.readonly = 1;
  598. else if (part->errors) {
  599. printk(KERN_WARNING PREFIX "'%s': errors found, "
  600. "setting read-only\n", mtd->name);
  601. part->mbd.readonly = 1;
  602. }
  603. printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
  604. mtd->name, mtd->type, mtd->flags);
  605. if (!add_mtd_blktrans_dev(&part->mbd))
  606. return;
  607. }
  608. out:
  609. kfree(part);
  610. }
  611. static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
  612. {
  613. struct partition *part = container_of(dev, struct partition, mbd);
  614. int i;
  615. for (i=0; i<part->total_blocks; i++) {
  616. pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
  617. part->mbd.mtd->name, i, part->blocks[i].erases);
  618. }
  619. vfree(part->sector_map);
  620. kfree(part->header_cache);
  621. kfree(part->blocks);
  622. del_mtd_blktrans_dev(&part->mbd);
  623. }
  624. static struct mtd_blktrans_ops rfd_ftl_tr = {
  625. .name = "rfd",
  626. .major = RFD_FTL_MAJOR,
  627. .part_bits = PART_BITS,
  628. .blksize = SECTOR_SIZE,
  629. .readsect = rfd_ftl_readsect,
  630. .writesect = rfd_ftl_writesect,
  631. .discard = rfd_ftl_discardsect,
  632. .getgeo = rfd_ftl_getgeo,
  633. .add_mtd = rfd_ftl_add_mtd,
  634. .remove_dev = rfd_ftl_remove_dev,
  635. .owner = THIS_MODULE,
  636. };
  637. module_mtd_blktrans(rfd_ftl_tr);
  638. MODULE_LICENSE("GPL");
  639. MODULE_AUTHOR("Sean Young <[email protected]>");
  640. MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
  641. "used by General Software's Embedded BIOS");