swim3.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Driver for the SWIM3 (Super Woz Integrated Machine 3)
  4. * floppy controller found on Power Macintoshes.
  5. *
  6. * Copyright (C) 1996 Paul Mackerras.
  7. */
  8. /*
  9. * TODO:
  10. * handle 2 drives
  11. * handle GCR disks
  12. */
  13. #undef DEBUG
  14. #include <linux/stddef.h>
  15. #include <linux/kernel.h>
  16. #include <linux/sched/signal.h>
  17. #include <linux/timer.h>
  18. #include <linux/delay.h>
  19. #include <linux/fd.h>
  20. #include <linux/ioctl.h>
  21. #include <linux/blk-mq.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/mutex.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/wait.h>
  27. #include <linux/major.h>
  28. #include <asm/io.h>
  29. #include <asm/dbdma.h>
  30. #include <asm/prom.h>
  31. #include <linux/uaccess.h>
  32. #include <asm/mediabay.h>
  33. #include <asm/machdep.h>
  34. #include <asm/pmac_feature.h>
  35. #define MAX_FLOPPIES 2
  36. static DEFINE_MUTEX(swim3_mutex);
  37. static struct gendisk *disks[MAX_FLOPPIES];
  38. enum swim_state {
  39. idle,
  40. locating,
  41. seeking,
  42. settling,
  43. do_transfer,
  44. jogging,
  45. available,
  46. revalidating,
  47. ejecting
  48. };
  49. #define REG(x) unsigned char x; char x ## _pad[15];
  50. /*
  51. * The names for these registers mostly represent speculation on my part.
  52. * It will be interesting to see how close they are to the names Apple uses.
  53. */
  54. struct swim3 {
  55. REG(data);
  56. REG(timer); /* counts down at 1MHz */
  57. REG(error);
  58. REG(mode);
  59. REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */
  60. REG(setup);
  61. REG(control); /* writing bits clears them */
  62. REG(status); /* writing bits sets them in control */
  63. REG(intr);
  64. REG(nseek); /* # tracks to seek */
  65. REG(ctrack); /* current track number */
  66. REG(csect); /* current sector number */
  67. REG(gap3); /* size of gap 3 in track format */
  68. REG(sector); /* sector # to read or write */
  69. REG(nsect); /* # sectors to read or write */
  70. REG(intr_enable);
  71. };
  72. #define control_bic control
  73. #define control_bis status
  74. /* Bits in select register */
  75. #define CA_MASK 7
  76. #define LSTRB 8
  77. /* Bits in control register */
  78. #define DO_SEEK 0x80
  79. #define FORMAT 0x40
  80. #define SELECT 0x20
  81. #define WRITE_SECTORS 0x10
  82. #define DO_ACTION 0x08
  83. #define DRIVE2_ENABLE 0x04
  84. #define DRIVE_ENABLE 0x02
  85. #define INTR_ENABLE 0x01
  86. /* Bits in status register */
  87. #define FIFO_1BYTE 0x80
  88. #define FIFO_2BYTE 0x40
  89. #define ERROR 0x20
  90. #define DATA 0x08
  91. #define RDDATA 0x04
  92. #define INTR_PENDING 0x02
  93. #define MARK_BYTE 0x01
  94. /* Bits in intr and intr_enable registers */
  95. #define ERROR_INTR 0x20
  96. #define DATA_CHANGED 0x10
  97. #define TRANSFER_DONE 0x08
  98. #define SEEN_SECTOR 0x04
  99. #define SEEK_DONE 0x02
  100. #define TIMER_DONE 0x01
  101. /* Bits in error register */
  102. #define ERR_DATA_CRC 0x80
  103. #define ERR_ADDR_CRC 0x40
  104. #define ERR_OVERRUN 0x04
  105. #define ERR_UNDERRUN 0x01
  106. /* Bits in setup register */
  107. #define S_SW_RESET 0x80
  108. #define S_GCR_WRITE 0x40
  109. #define S_IBM_DRIVE 0x20
  110. #define S_TEST_MODE 0x10
  111. #define S_FCLK_DIV2 0x08
  112. #define S_GCR 0x04
  113. #define S_COPY_PROT 0x02
  114. #define S_INV_WDATA 0x01
  115. /* Select values for swim3_action */
  116. #define SEEK_POSITIVE 0
  117. #define SEEK_NEGATIVE 4
  118. #define STEP 1
  119. #define MOTOR_ON 2
  120. #define MOTOR_OFF 6
  121. #define INDEX 3
  122. #define EJECT 7
  123. #define SETMFM 9
  124. #define SETGCR 13
  125. /* Select values for swim3_select and swim3_readbit */
  126. #define STEP_DIR 0
  127. #define STEPPING 1
  128. #define MOTOR_ON 2
  129. #define RELAX 3 /* also eject in progress */
  130. #define READ_DATA_0 4
  131. #define ONEMEG_DRIVE 5
  132. #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
  133. #define DRIVE_PRESENT 7
  134. #define DISK_IN 8
  135. #define WRITE_PROT 9
  136. #define TRACK_ZERO 10
  137. #define TACHO 11
  138. #define READ_DATA_1 12
  139. #define GCR_MODE 13
  140. #define SEEK_COMPLETE 14
  141. #define TWOMEG_MEDIA 15
  142. /* Definitions of values used in writing and formatting */
  143. #define DATA_ESCAPE 0x99
  144. #define GCR_SYNC_EXC 0x3f
  145. #define GCR_SYNC_CONV 0x80
  146. #define GCR_FIRST_MARK 0xd5
  147. #define GCR_SECOND_MARK 0xaa
  148. #define GCR_ADDR_MARK "\xd5\xaa\x00"
  149. #define GCR_DATA_MARK "\xd5\xaa\x0b"
  150. #define GCR_SLIP_BYTE "\x27\xaa"
  151. #define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f"
  152. #define DATA_99 "\x99\x99"
  153. #define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
  154. #define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
  155. #define MFM_GAP_LEN 12
  156. struct floppy_state {
  157. enum swim_state state;
  158. struct swim3 __iomem *swim3; /* hardware registers */
  159. struct dbdma_regs __iomem *dma; /* DMA controller registers */
  160. int swim3_intr; /* interrupt number for SWIM3 */
  161. int dma_intr; /* interrupt number for DMA channel */
  162. int cur_cyl; /* cylinder head is on, or -1 */
  163. int cur_sector; /* last sector we saw go past */
  164. int req_cyl; /* the cylinder for the current r/w request */
  165. int head; /* head number ditto */
  166. int req_sector; /* sector number ditto */
  167. int scount; /* # sectors we're transferring at present */
  168. int retries;
  169. int settle_time;
  170. int secpercyl; /* disk geometry information */
  171. int secpertrack;
  172. int total_secs;
  173. int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
  174. struct dbdma_cmd *dma_cmd;
  175. int ref_count;
  176. int expect_cyl;
  177. struct timer_list timeout;
  178. int timeout_pending;
  179. int ejected;
  180. wait_queue_head_t wait;
  181. int wanted;
  182. struct macio_dev *mdev;
  183. char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
  184. int index;
  185. struct request *cur_req;
  186. struct blk_mq_tag_set tag_set;
  187. };
  188. #define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  189. #define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  190. #define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  191. #ifdef DEBUG
  192. #define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
  193. #else
  194. #define swim3_dbg(fmt, arg...) do { } while(0)
  195. #endif
  196. static struct floppy_state floppy_states[MAX_FLOPPIES];
  197. static int floppy_count = 0;
  198. static DEFINE_SPINLOCK(swim3_lock);
  199. static unsigned short write_preamble[] = {
  200. 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
  201. 0, 0, 0, 0, 0, 0, /* sync field */
  202. 0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */
  203. 0x990f /* no escape for 512 bytes */
  204. };
  205. static unsigned short write_postamble[] = {
  206. 0x9904, /* insert CRC */
  207. 0x4e4e, 0x4e4e,
  208. 0x9908, /* stop writing */
  209. 0, 0, 0, 0, 0, 0
  210. };
  211. static void seek_track(struct floppy_state *fs, int n);
  212. static void act(struct floppy_state *fs);
  213. static void scan_timeout(struct timer_list *t);
  214. static void seek_timeout(struct timer_list *t);
  215. static void settle_timeout(struct timer_list *t);
  216. static void xfer_timeout(struct timer_list *t);
  217. static irqreturn_t swim3_interrupt(int irq, void *dev_id);
  218. /*static void fd_dma_interrupt(int irq, void *dev_id);*/
  219. static int grab_drive(struct floppy_state *fs, enum swim_state state,
  220. int interruptible);
  221. static void release_drive(struct floppy_state *fs);
  222. static int fd_eject(struct floppy_state *fs);
  223. static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
  224. unsigned int cmd, unsigned long param);
  225. static int floppy_open(struct block_device *bdev, fmode_t mode);
  226. static void floppy_release(struct gendisk *disk, fmode_t mode);
  227. static unsigned int floppy_check_events(struct gendisk *disk,
  228. unsigned int clearing);
  229. static int floppy_revalidate(struct gendisk *disk);
  230. static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
  231. {
  232. struct request *req = fs->cur_req;
  233. swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
  234. err, nr_bytes, req);
  235. if (err)
  236. nr_bytes = blk_rq_cur_bytes(req);
  237. if (blk_update_request(req, err, nr_bytes))
  238. return true;
  239. __blk_mq_end_request(req, err);
  240. fs->cur_req = NULL;
  241. return false;
  242. }
  243. static void swim3_select(struct floppy_state *fs, int sel)
  244. {
  245. struct swim3 __iomem *sw = fs->swim3;
  246. out_8(&sw->select, RELAX);
  247. if (sel & 8)
  248. out_8(&sw->control_bis, SELECT);
  249. else
  250. out_8(&sw->control_bic, SELECT);
  251. out_8(&sw->select, sel & CA_MASK);
  252. }
  253. static void swim3_action(struct floppy_state *fs, int action)
  254. {
  255. struct swim3 __iomem *sw = fs->swim3;
  256. swim3_select(fs, action);
  257. udelay(1);
  258. out_8(&sw->select, sw->select | LSTRB);
  259. udelay(2);
  260. out_8(&sw->select, sw->select & ~LSTRB);
  261. udelay(1);
  262. }
  263. static int swim3_readbit(struct floppy_state *fs, int bit)
  264. {
  265. struct swim3 __iomem *sw = fs->swim3;
  266. int stat;
  267. swim3_select(fs, bit);
  268. udelay(1);
  269. stat = in_8(&sw->status);
  270. return (stat & DATA) == 0;
  271. }
  272. static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx,
  273. const struct blk_mq_queue_data *bd)
  274. {
  275. struct floppy_state *fs = hctx->queue->queuedata;
  276. struct request *req = bd->rq;
  277. unsigned long x;
  278. spin_lock_irq(&swim3_lock);
  279. if (fs->cur_req || fs->state != idle) {
  280. spin_unlock_irq(&swim3_lock);
  281. return BLK_STS_DEV_RESOURCE;
  282. }
  283. blk_mq_start_request(req);
  284. fs->cur_req = req;
  285. if (fs->mdev->media_bay &&
  286. check_media_bay(fs->mdev->media_bay) != MB_FD) {
  287. swim3_dbg("%s", " media bay absent, dropping req\n");
  288. swim3_end_request(fs, BLK_STS_IOERR, 0);
  289. goto out;
  290. }
  291. if (fs->ejected) {
  292. swim3_dbg("%s", " disk ejected\n");
  293. swim3_end_request(fs, BLK_STS_IOERR, 0);
  294. goto out;
  295. }
  296. if (rq_data_dir(req) == WRITE) {
  297. if (fs->write_prot < 0)
  298. fs->write_prot = swim3_readbit(fs, WRITE_PROT);
  299. if (fs->write_prot) {
  300. swim3_dbg("%s", " try to write, disk write protected\n");
  301. swim3_end_request(fs, BLK_STS_IOERR, 0);
  302. goto out;
  303. }
  304. }
  305. /*
  306. * Do not remove the cast. blk_rq_pos(req) is now a sector_t and can be
  307. * 64 bits, but it will never go past 32 bits for this driver anyway, so
  308. * we can safely cast it down and not have to do a 64/32 division
  309. */
  310. fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
  311. x = ((long)blk_rq_pos(req)) % fs->secpercyl;
  312. fs->head = x / fs->secpertrack;
  313. fs->req_sector = x % fs->secpertrack + 1;
  314. fs->state = do_transfer;
  315. fs->retries = 0;
  316. act(fs);
  317. out:
  318. spin_unlock_irq(&swim3_lock);
  319. return BLK_STS_OK;
  320. }
  321. static void set_timeout(struct floppy_state *fs, int nticks,
  322. void (*proc)(struct timer_list *t))
  323. {
  324. if (fs->timeout_pending)
  325. del_timer(&fs->timeout);
  326. fs->timeout.expires = jiffies + nticks;
  327. fs->timeout.function = proc;
  328. add_timer(&fs->timeout);
  329. fs->timeout_pending = 1;
  330. }
  331. static inline void scan_track(struct floppy_state *fs)
  332. {
  333. struct swim3 __iomem *sw = fs->swim3;
  334. swim3_select(fs, READ_DATA_0);
  335. in_8(&sw->intr); /* clear SEEN_SECTOR bit */
  336. in_8(&sw->error);
  337. out_8(&sw->intr_enable, SEEN_SECTOR);
  338. out_8(&sw->control_bis, DO_ACTION);
  339. /* enable intr when track found */
  340. set_timeout(fs, HZ, scan_timeout); /* enable timeout */
  341. }
  342. static inline void seek_track(struct floppy_state *fs, int n)
  343. {
  344. struct swim3 __iomem *sw = fs->swim3;
  345. if (n >= 0) {
  346. swim3_action(fs, SEEK_POSITIVE);
  347. sw->nseek = n;
  348. } else {
  349. swim3_action(fs, SEEK_NEGATIVE);
  350. sw->nseek = -n;
  351. }
  352. fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
  353. swim3_select(fs, STEP);
  354. in_8(&sw->error);
  355. /* enable intr when seek finished */
  356. out_8(&sw->intr_enable, SEEK_DONE);
  357. out_8(&sw->control_bis, DO_SEEK);
  358. set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */
  359. fs->settle_time = 0;
  360. }
  361. /*
  362. * XXX: this is a horrible hack, but at least allows ppc32 to get
  363. * out of defining virt_to_bus, and this driver out of using the
  364. * deprecated block layer bounce buffering for highmem addresses
  365. * for no good reason.
  366. */
  367. static unsigned long swim3_phys_to_bus(phys_addr_t paddr)
  368. {
  369. return paddr + PCI_DRAM_OFFSET;
  370. }
  371. static phys_addr_t swim3_bio_phys(struct bio *bio)
  372. {
  373. return page_to_phys(bio_page(bio)) + bio_offset(bio);
  374. }
  375. static inline void init_dma(struct dbdma_cmd *cp, int cmd,
  376. phys_addr_t paddr, int count)
  377. {
  378. cp->req_count = cpu_to_le16(count);
  379. cp->command = cpu_to_le16(cmd);
  380. cp->phy_addr = cpu_to_le32(swim3_phys_to_bus(paddr));
  381. cp->xfer_status = 0;
  382. }
  383. static inline void setup_transfer(struct floppy_state *fs)
  384. {
  385. int n;
  386. struct swim3 __iomem *sw = fs->swim3;
  387. struct dbdma_cmd *cp = fs->dma_cmd;
  388. struct dbdma_regs __iomem *dr = fs->dma;
  389. struct request *req = fs->cur_req;
  390. if (blk_rq_cur_sectors(req) <= 0) {
  391. swim3_warn("%s", "Transfer 0 sectors ?\n");
  392. return;
  393. }
  394. if (rq_data_dir(req) == WRITE)
  395. n = 1;
  396. else {
  397. n = fs->secpertrack - fs->req_sector + 1;
  398. if (n > blk_rq_cur_sectors(req))
  399. n = blk_rq_cur_sectors(req);
  400. }
  401. swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
  402. fs->req_sector, fs->secpertrack, fs->head, n);
  403. fs->scount = n;
  404. swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
  405. out_8(&sw->sector, fs->req_sector);
  406. out_8(&sw->nsect, n);
  407. out_8(&sw->gap3, 0);
  408. out_le32(&dr->cmdptr, swim3_phys_to_bus(virt_to_phys(cp)));
  409. if (rq_data_dir(req) == WRITE) {
  410. /* Set up 3 dma commands: write preamble, data, postamble */
  411. init_dma(cp, OUTPUT_MORE, virt_to_phys(write_preamble),
  412. sizeof(write_preamble));
  413. ++cp;
  414. init_dma(cp, OUTPUT_MORE, swim3_bio_phys(req->bio), 512);
  415. ++cp;
  416. init_dma(cp, OUTPUT_LAST, virt_to_phys(write_postamble),
  417. sizeof(write_postamble));
  418. } else {
  419. init_dma(cp, INPUT_LAST, swim3_bio_phys(req->bio), n * 512);
  420. }
  421. ++cp;
  422. out_le16(&cp->command, DBDMA_STOP);
  423. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  424. in_8(&sw->error);
  425. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  426. if (rq_data_dir(req) == WRITE)
  427. out_8(&sw->control_bis, WRITE_SECTORS);
  428. in_8(&sw->intr);
  429. out_le32(&dr->control, (RUN << 16) | RUN);
  430. /* enable intr when transfer complete */
  431. out_8(&sw->intr_enable, TRANSFER_DONE);
  432. out_8(&sw->control_bis, DO_ACTION);
  433. set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */
  434. }
  435. static void act(struct floppy_state *fs)
  436. {
  437. for (;;) {
  438. swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
  439. fs->state, fs->req_cyl, fs->cur_cyl);
  440. switch (fs->state) {
  441. case idle:
  442. return; /* XXX shouldn't get here */
  443. case locating:
  444. if (swim3_readbit(fs, TRACK_ZERO)) {
  445. swim3_dbg("%s", " locate track 0\n");
  446. fs->cur_cyl = 0;
  447. if (fs->req_cyl == 0)
  448. fs->state = do_transfer;
  449. else
  450. fs->state = seeking;
  451. break;
  452. }
  453. scan_track(fs);
  454. return;
  455. case seeking:
  456. if (fs->cur_cyl < 0) {
  457. fs->expect_cyl = -1;
  458. fs->state = locating;
  459. break;
  460. }
  461. if (fs->req_cyl == fs->cur_cyl) {
  462. swim3_warn("%s", "Whoops, seeking 0\n");
  463. fs->state = do_transfer;
  464. break;
  465. }
  466. seek_track(fs, fs->req_cyl - fs->cur_cyl);
  467. return;
  468. case settling:
  469. /* check for SEEK_COMPLETE after 30ms */
  470. fs->settle_time = (HZ + 32) / 33;
  471. set_timeout(fs, fs->settle_time, settle_timeout);
  472. return;
  473. case do_transfer:
  474. if (fs->cur_cyl != fs->req_cyl) {
  475. if (fs->retries > 5) {
  476. swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
  477. fs->req_cyl, fs->cur_cyl);
  478. swim3_end_request(fs, BLK_STS_IOERR, 0);
  479. fs->state = idle;
  480. return;
  481. }
  482. fs->state = seeking;
  483. break;
  484. }
  485. setup_transfer(fs);
  486. return;
  487. case jogging:
  488. seek_track(fs, -5);
  489. return;
  490. default:
  491. swim3_err("Unknown state %d\n", fs->state);
  492. return;
  493. }
  494. }
  495. }
  496. static void scan_timeout(struct timer_list *t)
  497. {
  498. struct floppy_state *fs = from_timer(fs, t, timeout);
  499. struct swim3 __iomem *sw = fs->swim3;
  500. unsigned long flags;
  501. swim3_dbg("* scan timeout, state=%d\n", fs->state);
  502. spin_lock_irqsave(&swim3_lock, flags);
  503. fs->timeout_pending = 0;
  504. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  505. out_8(&sw->select, RELAX);
  506. out_8(&sw->intr_enable, 0);
  507. fs->cur_cyl = -1;
  508. if (fs->retries > 5) {
  509. swim3_end_request(fs, BLK_STS_IOERR, 0);
  510. fs->state = idle;
  511. } else {
  512. fs->state = jogging;
  513. act(fs);
  514. }
  515. spin_unlock_irqrestore(&swim3_lock, flags);
  516. }
  517. static void seek_timeout(struct timer_list *t)
  518. {
  519. struct floppy_state *fs = from_timer(fs, t, timeout);
  520. struct swim3 __iomem *sw = fs->swim3;
  521. unsigned long flags;
  522. swim3_dbg("* seek timeout, state=%d\n", fs->state);
  523. spin_lock_irqsave(&swim3_lock, flags);
  524. fs->timeout_pending = 0;
  525. out_8(&sw->control_bic, DO_SEEK);
  526. out_8(&sw->select, RELAX);
  527. out_8(&sw->intr_enable, 0);
  528. swim3_err("%s", "Seek timeout\n");
  529. swim3_end_request(fs, BLK_STS_IOERR, 0);
  530. fs->state = idle;
  531. spin_unlock_irqrestore(&swim3_lock, flags);
  532. }
  533. static void settle_timeout(struct timer_list *t)
  534. {
  535. struct floppy_state *fs = from_timer(fs, t, timeout);
  536. struct swim3 __iomem *sw = fs->swim3;
  537. unsigned long flags;
  538. swim3_dbg("* settle timeout, state=%d\n", fs->state);
  539. spin_lock_irqsave(&swim3_lock, flags);
  540. fs->timeout_pending = 0;
  541. if (swim3_readbit(fs, SEEK_COMPLETE)) {
  542. out_8(&sw->select, RELAX);
  543. fs->state = locating;
  544. act(fs);
  545. goto unlock;
  546. }
  547. out_8(&sw->select, RELAX);
  548. if (fs->settle_time < 2*HZ) {
  549. ++fs->settle_time;
  550. set_timeout(fs, 1, settle_timeout);
  551. goto unlock;
  552. }
  553. swim3_err("%s", "Seek settle timeout\n");
  554. swim3_end_request(fs, BLK_STS_IOERR, 0);
  555. fs->state = idle;
  556. unlock:
  557. spin_unlock_irqrestore(&swim3_lock, flags);
  558. }
  559. static void xfer_timeout(struct timer_list *t)
  560. {
  561. struct floppy_state *fs = from_timer(fs, t, timeout);
  562. struct swim3 __iomem *sw = fs->swim3;
  563. struct dbdma_regs __iomem *dr = fs->dma;
  564. unsigned long flags;
  565. int n;
  566. swim3_dbg("* xfer timeout, state=%d\n", fs->state);
  567. spin_lock_irqsave(&swim3_lock, flags);
  568. fs->timeout_pending = 0;
  569. out_le32(&dr->control, RUN << 16);
  570. /* We must wait a bit for dbdma to stop */
  571. for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
  572. udelay(1);
  573. out_8(&sw->intr_enable, 0);
  574. out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
  575. out_8(&sw->select, RELAX);
  576. swim3_err("Timeout %sing sector %ld\n",
  577. (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
  578. (long)blk_rq_pos(fs->cur_req));
  579. swim3_end_request(fs, BLK_STS_IOERR, 0);
  580. fs->state = idle;
  581. spin_unlock_irqrestore(&swim3_lock, flags);
  582. }
  583. static irqreturn_t swim3_interrupt(int irq, void *dev_id)
  584. {
  585. struct floppy_state *fs = (struct floppy_state *) dev_id;
  586. struct swim3 __iomem *sw = fs->swim3;
  587. int intr, err, n;
  588. int stat, resid;
  589. struct dbdma_regs __iomem *dr;
  590. struct dbdma_cmd *cp;
  591. unsigned long flags;
  592. struct request *req = fs->cur_req;
  593. swim3_dbg("* interrupt, state=%d\n", fs->state);
  594. spin_lock_irqsave(&swim3_lock, flags);
  595. intr = in_8(&sw->intr);
  596. err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
  597. if ((intr & ERROR_INTR) && fs->state != do_transfer)
  598. swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
  599. fs->state, rq_data_dir(req), intr, err);
  600. switch (fs->state) {
  601. case locating:
  602. if (intr & SEEN_SECTOR) {
  603. out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
  604. out_8(&sw->select, RELAX);
  605. out_8(&sw->intr_enable, 0);
  606. del_timer(&fs->timeout);
  607. fs->timeout_pending = 0;
  608. if (sw->ctrack == 0xff) {
  609. swim3_err("%s", "Seen sector but cyl=ff?\n");
  610. fs->cur_cyl = -1;
  611. if (fs->retries > 5) {
  612. swim3_end_request(fs, BLK_STS_IOERR, 0);
  613. fs->state = idle;
  614. } else {
  615. fs->state = jogging;
  616. act(fs);
  617. }
  618. break;
  619. }
  620. fs->cur_cyl = sw->ctrack;
  621. fs->cur_sector = sw->csect;
  622. if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
  623. swim3_err("Expected cyl %d, got %d\n",
  624. fs->expect_cyl, fs->cur_cyl);
  625. fs->state = do_transfer;
  626. act(fs);
  627. }
  628. break;
  629. case seeking:
  630. case jogging:
  631. if (sw->nseek == 0) {
  632. out_8(&sw->control_bic, DO_SEEK);
  633. out_8(&sw->select, RELAX);
  634. out_8(&sw->intr_enable, 0);
  635. del_timer(&fs->timeout);
  636. fs->timeout_pending = 0;
  637. if (fs->state == seeking)
  638. ++fs->retries;
  639. fs->state = settling;
  640. act(fs);
  641. }
  642. break;
  643. case settling:
  644. out_8(&sw->intr_enable, 0);
  645. del_timer(&fs->timeout);
  646. fs->timeout_pending = 0;
  647. act(fs);
  648. break;
  649. case do_transfer:
  650. if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
  651. break;
  652. out_8(&sw->intr_enable, 0);
  653. out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
  654. out_8(&sw->select, RELAX);
  655. del_timer(&fs->timeout);
  656. fs->timeout_pending = 0;
  657. dr = fs->dma;
  658. cp = fs->dma_cmd;
  659. if (rq_data_dir(req) == WRITE)
  660. ++cp;
  661. /*
  662. * Check that the main data transfer has finished.
  663. * On writing, the swim3 sometimes doesn't use
  664. * up all the bytes of the postamble, so we can still
  665. * see DMA active here. That doesn't matter as long
  666. * as all the sector data has been transferred.
  667. */
  668. if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
  669. /* wait a little while for DMA to complete */
  670. for (n = 0; n < 100; ++n) {
  671. if (cp->xfer_status != 0)
  672. break;
  673. udelay(1);
  674. barrier();
  675. }
  676. }
  677. /* turn off DMA */
  678. out_le32(&dr->control, (RUN | PAUSE) << 16);
  679. stat = le16_to_cpu(cp->xfer_status);
  680. resid = le16_to_cpu(cp->res_count);
  681. if (intr & ERROR_INTR) {
  682. n = fs->scount - 1 - resid / 512;
  683. if (n > 0) {
  684. blk_update_request(req, 0, n << 9);
  685. fs->req_sector += n;
  686. }
  687. if (fs->retries < 5) {
  688. ++fs->retries;
  689. act(fs);
  690. } else {
  691. swim3_err("Error %sing block %ld (err=%x)\n",
  692. rq_data_dir(req) == WRITE? "writ": "read",
  693. (long)blk_rq_pos(req), err);
  694. swim3_end_request(fs, BLK_STS_IOERR, 0);
  695. fs->state = idle;
  696. }
  697. } else {
  698. if ((stat & ACTIVE) == 0 || resid != 0) {
  699. /* musta been an error */
  700. swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
  701. swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
  702. fs->state, rq_data_dir(req), intr, err);
  703. swim3_end_request(fs, BLK_STS_IOERR, 0);
  704. fs->state = idle;
  705. break;
  706. }
  707. fs->retries = 0;
  708. if (swim3_end_request(fs, 0, fs->scount << 9)) {
  709. fs->req_sector += fs->scount;
  710. if (fs->req_sector > fs->secpertrack) {
  711. fs->req_sector -= fs->secpertrack;
  712. if (++fs->head > 1) {
  713. fs->head = 0;
  714. ++fs->req_cyl;
  715. }
  716. }
  717. act(fs);
  718. } else
  719. fs->state = idle;
  720. }
  721. break;
  722. default:
  723. swim3_err("Don't know what to do in state %d\n", fs->state);
  724. }
  725. spin_unlock_irqrestore(&swim3_lock, flags);
  726. return IRQ_HANDLED;
  727. }
  728. /*
  729. static void fd_dma_interrupt(int irq, void *dev_id)
  730. {
  731. }
  732. */
  733. /* Called under the mutex to grab exclusive access to a drive */
  734. static int grab_drive(struct floppy_state *fs, enum swim_state state,
  735. int interruptible)
  736. {
  737. unsigned long flags;
  738. swim3_dbg("%s", "-> grab drive\n");
  739. spin_lock_irqsave(&swim3_lock, flags);
  740. if (fs->state != idle && fs->state != available) {
  741. ++fs->wanted;
  742. /* this will enable irqs in order to sleep */
  743. if (!interruptible)
  744. wait_event_lock_irq(fs->wait,
  745. fs->state == available,
  746. swim3_lock);
  747. else if (wait_event_interruptible_lock_irq(fs->wait,
  748. fs->state == available,
  749. swim3_lock)) {
  750. --fs->wanted;
  751. spin_unlock_irqrestore(&swim3_lock, flags);
  752. return -EINTR;
  753. }
  754. --fs->wanted;
  755. }
  756. fs->state = state;
  757. spin_unlock_irqrestore(&swim3_lock, flags);
  758. return 0;
  759. }
  760. static void release_drive(struct floppy_state *fs)
  761. {
  762. struct request_queue *q = disks[fs->index]->queue;
  763. unsigned long flags;
  764. swim3_dbg("%s", "-> release drive\n");
  765. spin_lock_irqsave(&swim3_lock, flags);
  766. fs->state = idle;
  767. spin_unlock_irqrestore(&swim3_lock, flags);
  768. blk_mq_freeze_queue(q);
  769. blk_mq_quiesce_queue(q);
  770. blk_mq_unquiesce_queue(q);
  771. blk_mq_unfreeze_queue(q);
  772. }
  773. static int fd_eject(struct floppy_state *fs)
  774. {
  775. int err, n;
  776. err = grab_drive(fs, ejecting, 1);
  777. if (err)
  778. return err;
  779. swim3_action(fs, EJECT);
  780. for (n = 20; n > 0; --n) {
  781. if (signal_pending(current)) {
  782. err = -EINTR;
  783. break;
  784. }
  785. swim3_select(fs, RELAX);
  786. schedule_timeout_interruptible(1);
  787. if (swim3_readbit(fs, DISK_IN) == 0)
  788. break;
  789. }
  790. swim3_select(fs, RELAX);
  791. udelay(150);
  792. fs->ejected = 1;
  793. release_drive(fs);
  794. return err;
  795. }
  796. static struct floppy_struct floppy_type =
  797. { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
  798. static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
  799. unsigned int cmd, unsigned long param)
  800. {
  801. struct floppy_state *fs = bdev->bd_disk->private_data;
  802. int err;
  803. if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
  804. return -EPERM;
  805. if (fs->mdev->media_bay &&
  806. check_media_bay(fs->mdev->media_bay) != MB_FD)
  807. return -ENXIO;
  808. switch (cmd) {
  809. case FDEJECT:
  810. if (fs->ref_count != 1)
  811. return -EBUSY;
  812. err = fd_eject(fs);
  813. return err;
  814. case FDGETPRM:
  815. if (copy_to_user((void __user *) param, &floppy_type,
  816. sizeof(struct floppy_struct)))
  817. return -EFAULT;
  818. return 0;
  819. }
  820. return -ENOTTY;
  821. }
  822. static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
  823. unsigned int cmd, unsigned long param)
  824. {
  825. int ret;
  826. mutex_lock(&swim3_mutex);
  827. ret = floppy_locked_ioctl(bdev, mode, cmd, param);
  828. mutex_unlock(&swim3_mutex);
  829. return ret;
  830. }
  831. static int floppy_open(struct block_device *bdev, fmode_t mode)
  832. {
  833. struct floppy_state *fs = bdev->bd_disk->private_data;
  834. struct swim3 __iomem *sw = fs->swim3;
  835. int n, err = 0;
  836. if (fs->ref_count == 0) {
  837. if (fs->mdev->media_bay &&
  838. check_media_bay(fs->mdev->media_bay) != MB_FD)
  839. return -ENXIO;
  840. out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
  841. out_8(&sw->control_bic, 0xff);
  842. out_8(&sw->mode, 0x95);
  843. udelay(10);
  844. out_8(&sw->intr_enable, 0);
  845. out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
  846. swim3_action(fs, MOTOR_ON);
  847. fs->write_prot = -1;
  848. fs->cur_cyl = -1;
  849. for (n = 0; n < 2 * HZ; ++n) {
  850. if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
  851. break;
  852. if (signal_pending(current)) {
  853. err = -EINTR;
  854. break;
  855. }
  856. swim3_select(fs, RELAX);
  857. schedule_timeout_interruptible(1);
  858. }
  859. if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
  860. || swim3_readbit(fs, DISK_IN) == 0))
  861. err = -ENXIO;
  862. swim3_action(fs, SETMFM);
  863. swim3_select(fs, RELAX);
  864. } else if (fs->ref_count == -1 || mode & FMODE_EXCL)
  865. return -EBUSY;
  866. if (err == 0 && (mode & FMODE_NDELAY) == 0
  867. && (mode & (FMODE_READ|FMODE_WRITE))) {
  868. if (bdev_check_media_change(bdev))
  869. floppy_revalidate(bdev->bd_disk);
  870. if (fs->ejected)
  871. err = -ENXIO;
  872. }
  873. if (err == 0 && (mode & FMODE_WRITE)) {
  874. if (fs->write_prot < 0)
  875. fs->write_prot = swim3_readbit(fs, WRITE_PROT);
  876. if (fs->write_prot)
  877. err = -EROFS;
  878. }
  879. if (err) {
  880. if (fs->ref_count == 0) {
  881. swim3_action(fs, MOTOR_OFF);
  882. out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
  883. swim3_select(fs, RELAX);
  884. }
  885. return err;
  886. }
  887. if (mode & FMODE_EXCL)
  888. fs->ref_count = -1;
  889. else
  890. ++fs->ref_count;
  891. return 0;
  892. }
  893. static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
  894. {
  895. int ret;
  896. mutex_lock(&swim3_mutex);
  897. ret = floppy_open(bdev, mode);
  898. mutex_unlock(&swim3_mutex);
  899. return ret;
  900. }
  901. static void floppy_release(struct gendisk *disk, fmode_t mode)
  902. {
  903. struct floppy_state *fs = disk->private_data;
  904. struct swim3 __iomem *sw = fs->swim3;
  905. mutex_lock(&swim3_mutex);
  906. if (fs->ref_count > 0)
  907. --fs->ref_count;
  908. else if (fs->ref_count == -1)
  909. fs->ref_count = 0;
  910. if (fs->ref_count == 0) {
  911. swim3_action(fs, MOTOR_OFF);
  912. out_8(&sw->control_bic, 0xff);
  913. swim3_select(fs, RELAX);
  914. }
  915. mutex_unlock(&swim3_mutex);
  916. }
  917. static unsigned int floppy_check_events(struct gendisk *disk,
  918. unsigned int clearing)
  919. {
  920. struct floppy_state *fs = disk->private_data;
  921. return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0;
  922. }
  923. static int floppy_revalidate(struct gendisk *disk)
  924. {
  925. struct floppy_state *fs = disk->private_data;
  926. struct swim3 __iomem *sw;
  927. int ret, n;
  928. if (fs->mdev->media_bay &&
  929. check_media_bay(fs->mdev->media_bay) != MB_FD)
  930. return -ENXIO;
  931. sw = fs->swim3;
  932. grab_drive(fs, revalidating, 0);
  933. out_8(&sw->intr_enable, 0);
  934. out_8(&sw->control_bis, DRIVE_ENABLE);
  935. swim3_action(fs, MOTOR_ON); /* necessary? */
  936. fs->write_prot = -1;
  937. fs->cur_cyl = -1;
  938. mdelay(1);
  939. for (n = HZ; n > 0; --n) {
  940. if (swim3_readbit(fs, SEEK_COMPLETE))
  941. break;
  942. if (signal_pending(current))
  943. break;
  944. swim3_select(fs, RELAX);
  945. schedule_timeout_interruptible(1);
  946. }
  947. ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
  948. || swim3_readbit(fs, DISK_IN) == 0;
  949. if (ret)
  950. swim3_action(fs, MOTOR_OFF);
  951. else {
  952. fs->ejected = 0;
  953. swim3_action(fs, SETMFM);
  954. }
  955. swim3_select(fs, RELAX);
  956. release_drive(fs);
  957. return ret;
  958. }
  959. static const struct block_device_operations floppy_fops = {
  960. .open = floppy_unlocked_open,
  961. .release = floppy_release,
  962. .ioctl = floppy_ioctl,
  963. .check_events = floppy_check_events,
  964. };
  965. static const struct blk_mq_ops swim3_mq_ops = {
  966. .queue_rq = swim3_queue_rq,
  967. };
  968. static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
  969. {
  970. struct floppy_state *fs = macio_get_drvdata(mdev);
  971. struct swim3 __iomem *sw;
  972. if (!fs)
  973. return;
  974. sw = fs->swim3;
  975. if (mb_state != MB_FD)
  976. return;
  977. /* Clear state */
  978. out_8(&sw->intr_enable, 0);
  979. in_8(&sw->intr);
  980. in_8(&sw->error);
  981. }
  982. static int swim3_add_device(struct macio_dev *mdev, int index)
  983. {
  984. struct device_node *swim = mdev->ofdev.dev.of_node;
  985. struct floppy_state *fs = &floppy_states[index];
  986. int rc = -EBUSY;
  987. fs->mdev = mdev;
  988. fs->index = index;
  989. /* Check & Request resources */
  990. if (macio_resource_count(mdev) < 2) {
  991. swim3_err("%s", "No address in device-tree\n");
  992. return -ENXIO;
  993. }
  994. if (macio_irq_count(mdev) < 1) {
  995. swim3_err("%s", "No interrupt in device-tree\n");
  996. return -ENXIO;
  997. }
  998. if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
  999. swim3_err("%s", "Can't request mmio resource\n");
  1000. return -EBUSY;
  1001. }
  1002. if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
  1003. swim3_err("%s", "Can't request dma resource\n");
  1004. macio_release_resource(mdev, 0);
  1005. return -EBUSY;
  1006. }
  1007. dev_set_drvdata(&mdev->ofdev.dev, fs);
  1008. if (mdev->media_bay == NULL)
  1009. pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
  1010. fs->state = idle;
  1011. fs->swim3 = (struct swim3 __iomem *)
  1012. ioremap(macio_resource_start(mdev, 0), 0x200);
  1013. if (fs->swim3 == NULL) {
  1014. swim3_err("%s", "Couldn't map mmio registers\n");
  1015. rc = -ENOMEM;
  1016. goto out_release;
  1017. }
  1018. fs->dma = (struct dbdma_regs __iomem *)
  1019. ioremap(macio_resource_start(mdev, 1), 0x200);
  1020. if (fs->dma == NULL) {
  1021. swim3_err("%s", "Couldn't map dma registers\n");
  1022. iounmap(fs->swim3);
  1023. rc = -ENOMEM;
  1024. goto out_release;
  1025. }
  1026. fs->swim3_intr = macio_irq(mdev, 0);
  1027. fs->dma_intr = macio_irq(mdev, 1);
  1028. fs->cur_cyl = -1;
  1029. fs->cur_sector = -1;
  1030. fs->secpercyl = 36;
  1031. fs->secpertrack = 18;
  1032. fs->total_secs = 2880;
  1033. init_waitqueue_head(&fs->wait);
  1034. fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
  1035. memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
  1036. fs->dma_cmd[1].command = cpu_to_le16(DBDMA_STOP);
  1037. if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
  1038. swim3_mb_event(mdev, MB_FD);
  1039. if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
  1040. swim3_err("%s", "Couldn't request interrupt\n");
  1041. pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
  1042. goto out_unmap;
  1043. }
  1044. timer_setup(&fs->timeout, NULL, 0);
  1045. swim3_info("SWIM3 floppy controller %s\n",
  1046. mdev->media_bay ? "in media bay" : "");
  1047. return 0;
  1048. out_unmap:
  1049. iounmap(fs->dma);
  1050. iounmap(fs->swim3);
  1051. out_release:
  1052. macio_release_resource(mdev, 0);
  1053. macio_release_resource(mdev, 1);
  1054. return rc;
  1055. }
  1056. static int swim3_attach(struct macio_dev *mdev,
  1057. const struct of_device_id *match)
  1058. {
  1059. struct floppy_state *fs;
  1060. struct gendisk *disk;
  1061. int rc;
  1062. if (floppy_count >= MAX_FLOPPIES)
  1063. return -ENXIO;
  1064. if (floppy_count == 0) {
  1065. rc = register_blkdev(FLOPPY_MAJOR, "fd");
  1066. if (rc)
  1067. return rc;
  1068. }
  1069. fs = &floppy_states[floppy_count];
  1070. memset(fs, 0, sizeof(*fs));
  1071. rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
  1072. BLK_MQ_F_SHOULD_MERGE);
  1073. if (rc)
  1074. goto out_unregister;
  1075. disk = blk_mq_alloc_disk(&fs->tag_set, fs);
  1076. if (IS_ERR(disk)) {
  1077. rc = PTR_ERR(disk);
  1078. goto out_free_tag_set;
  1079. }
  1080. rc = swim3_add_device(mdev, floppy_count);
  1081. if (rc)
  1082. goto out_cleanup_disk;
  1083. disk->major = FLOPPY_MAJOR;
  1084. disk->first_minor = floppy_count;
  1085. disk->minors = 1;
  1086. disk->fops = &floppy_fops;
  1087. disk->private_data = fs;
  1088. disk->events = DISK_EVENT_MEDIA_CHANGE;
  1089. disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
  1090. sprintf(disk->disk_name, "fd%d", floppy_count);
  1091. set_capacity(disk, 2880);
  1092. rc = add_disk(disk);
  1093. if (rc)
  1094. goto out_cleanup_disk;
  1095. disks[floppy_count++] = disk;
  1096. return 0;
  1097. out_cleanup_disk:
  1098. put_disk(disk);
  1099. out_free_tag_set:
  1100. blk_mq_free_tag_set(&fs->tag_set);
  1101. out_unregister:
  1102. if (floppy_count == 0)
  1103. unregister_blkdev(FLOPPY_MAJOR, "fd");
  1104. return rc;
  1105. }
  1106. static const struct of_device_id swim3_match[] =
  1107. {
  1108. {
  1109. .name = "swim3",
  1110. },
  1111. {
  1112. .compatible = "ohare-swim3"
  1113. },
  1114. {
  1115. .compatible = "swim3"
  1116. },
  1117. { /* end of list */ }
  1118. };
  1119. static struct macio_driver swim3_driver =
  1120. {
  1121. .driver = {
  1122. .name = "swim3",
  1123. .of_match_table = swim3_match,
  1124. },
  1125. .probe = swim3_attach,
  1126. #ifdef CONFIG_PMAC_MEDIABAY
  1127. .mediabay_event = swim3_mb_event,
  1128. #endif
  1129. #if 0
  1130. .suspend = swim3_suspend,
  1131. .resume = swim3_resume,
  1132. #endif
  1133. };
  1134. int swim3_init(void)
  1135. {
  1136. macio_register_driver(&swim3_driver);
  1137. return 0;
  1138. }
  1139. module_init(swim3_init)
  1140. MODULE_LICENSE("GPL");
  1141. MODULE_AUTHOR("Paul Mackerras");
  1142. MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);