pata_octeon_cf.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038
  1. /*
  2. * Driver for the Octeon bootbus compact flash.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2005 - 2012 Cavium Inc.
  9. * Copyright (C) 2008 Wind River Systems
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/libata.h>
  14. #include <linux/hrtimer.h>
  15. #include <linux/slab.h>
  16. #include <linux/irq.h>
  17. #include <linux/of.h>
  18. #include <linux/of_platform.h>
  19. #include <linux/platform_device.h>
  20. #include <scsi/scsi_host.h>
  21. #include <trace/events/libata.h>
  22. #include <asm/byteorder.h>
  23. #include <asm/octeon/octeon.h>
  24. /*
  25. * The Octeon bootbus compact flash interface is connected in at least
  26. * 3 different configurations on various evaluation boards:
  27. *
  28. * -- 8 bits no irq, no DMA
  29. * -- 16 bits no irq, no DMA
  30. * -- 16 bits True IDE mode with DMA, but no irq.
  31. *
  32. * In the last case the DMA engine can generate an interrupt when the
  33. * transfer is complete. For the first two cases only PIO is supported.
  34. *
  35. */
  36. #define DRV_NAME "pata_octeon_cf"
  37. #define DRV_VERSION "2.2"
  38. /* Poll interval in nS. */
  39. #define OCTEON_CF_BUSY_POLL_INTERVAL 500000
  40. #define DMA_CFG 0
  41. #define DMA_TIM 0x20
  42. #define DMA_INT 0x38
  43. #define DMA_INT_EN 0x50
  44. struct octeon_cf_port {
  45. struct hrtimer delayed_finish;
  46. struct ata_port *ap;
  47. int dma_finished;
  48. void *c0;
  49. unsigned int cs0;
  50. unsigned int cs1;
  51. bool is_true_ide;
  52. u64 dma_base;
  53. };
  54. static struct scsi_host_template octeon_cf_sht = {
  55. ATA_PIO_SHT(DRV_NAME),
  56. };
  57. static int enable_dma;
  58. module_param(enable_dma, int, 0444);
  59. MODULE_PARM_DESC(enable_dma,
  60. "Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
  61. /**
  62. * Convert nanosecond based time to setting used in the
  63. * boot bus timing register, based on timing multiple
  64. */
  65. static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
  66. {
  67. /*
  68. * Compute # of eclock periods to get desired duration in
  69. * nanoseconds.
  70. */
  71. return DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
  72. 1000 * tim_mult);
  73. }
  74. static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
  75. {
  76. union cvmx_mio_boot_reg_cfgx reg_cfg;
  77. unsigned int tim_mult;
  78. switch (multiplier) {
  79. case 8:
  80. tim_mult = 3;
  81. break;
  82. case 4:
  83. tim_mult = 0;
  84. break;
  85. case 2:
  86. tim_mult = 2;
  87. break;
  88. default:
  89. tim_mult = 1;
  90. break;
  91. }
  92. reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
  93. reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
  94. reg_cfg.s.tim_mult = tim_mult; /* Timing mutiplier */
  95. reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
  96. reg_cfg.s.sam = 0; /* Don't combine write and output enable */
  97. reg_cfg.s.we_ext = 0; /* No write enable extension */
  98. reg_cfg.s.oe_ext = 0; /* No read enable extension */
  99. reg_cfg.s.en = 1; /* Enable this region */
  100. reg_cfg.s.orbit = 0; /* Don't combine with previous region */
  101. reg_cfg.s.ale = 0; /* Don't do address multiplexing */
  102. cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
  103. }
  104. /**
  105. * Called after libata determines the needed PIO mode. This
  106. * function programs the Octeon bootbus regions to support the
  107. * timing requirements of the PIO mode.
  108. *
  109. * @ap: ATA port information
  110. * @dev: ATA device
  111. */
  112. static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
  113. {
  114. struct octeon_cf_port *cf_port = ap->private_data;
  115. union cvmx_mio_boot_reg_timx reg_tim;
  116. int T;
  117. struct ata_timing timing;
  118. unsigned int div;
  119. int use_iordy;
  120. int trh;
  121. int pause;
  122. /* These names are timing parameters from the ATA spec */
  123. int t2;
  124. /*
  125. * A divisor value of four will overflow the timing fields at
  126. * clock rates greater than 800MHz
  127. */
  128. if (octeon_get_io_clock_rate() <= 800000000)
  129. div = 4;
  130. else
  131. div = 8;
  132. T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
  133. BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
  134. t2 = timing.active;
  135. if (t2)
  136. t2--;
  137. trh = ns_to_tim_reg(div, 20);
  138. if (trh)
  139. trh--;
  140. pause = (int)timing.cycle - (int)timing.active -
  141. (int)timing.setup - trh;
  142. if (pause < 0)
  143. pause = 0;
  144. if (pause)
  145. pause--;
  146. octeon_cf_set_boot_reg_cfg(cf_port->cs0, div);
  147. if (cf_port->is_true_ide)
  148. /* True IDE mode, program both chip selects. */
  149. octeon_cf_set_boot_reg_cfg(cf_port->cs1, div);
  150. use_iordy = ata_pio_need_iordy(dev);
  151. reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0));
  152. /* Disable page mode */
  153. reg_tim.s.pagem = 0;
  154. /* Enable dynamic timing */
  155. reg_tim.s.waitm = use_iordy;
  156. /* Pages are disabled */
  157. reg_tim.s.pages = 0;
  158. /* We don't use multiplexed address mode */
  159. reg_tim.s.ale = 0;
  160. /* Not used */
  161. reg_tim.s.page = 0;
  162. /* Time after IORDY to coninue to assert the data */
  163. reg_tim.s.wait = 0;
  164. /* Time to wait to complete the cycle. */
  165. reg_tim.s.pause = pause;
  166. /* How long to hold after a write to de-assert CE. */
  167. reg_tim.s.wr_hld = trh;
  168. /* How long to wait after a read to de-assert CE. */
  169. reg_tim.s.rd_hld = trh;
  170. /* How long write enable is asserted */
  171. reg_tim.s.we = t2;
  172. /* How long read enable is asserted */
  173. reg_tim.s.oe = t2;
  174. /* Time after CE that read/write starts */
  175. reg_tim.s.ce = ns_to_tim_reg(div, 5);
  176. /* Time before CE that address is valid */
  177. reg_tim.s.adr = 0;
  178. /* Program the bootbus region timing for the data port chip select. */
  179. cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs0), reg_tim.u64);
  180. if (cf_port->is_true_ide)
  181. /* True IDE mode, program both chip selects. */
  182. cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cf_port->cs1),
  183. reg_tim.u64);
  184. }
  185. static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
  186. {
  187. struct octeon_cf_port *cf_port = ap->private_data;
  188. union cvmx_mio_boot_pin_defs pin_defs;
  189. union cvmx_mio_boot_dma_timx dma_tim;
  190. unsigned int oe_a;
  191. unsigned int oe_n;
  192. unsigned int dma_ackh;
  193. unsigned int dma_arq;
  194. unsigned int pause;
  195. unsigned int T0, Tkr, Td;
  196. unsigned int tim_mult;
  197. int c;
  198. const struct ata_timing *timing;
  199. timing = ata_timing_find_mode(dev->dma_mode);
  200. T0 = timing->cycle;
  201. Td = timing->active;
  202. Tkr = timing->recover;
  203. dma_ackh = timing->dmack_hold;
  204. dma_tim.u64 = 0;
  205. /* dma_tim.s.tim_mult = 0 --> 4x */
  206. tim_mult = 4;
  207. /* not spec'ed, value in eclocks, not affected by tim_mult */
  208. dma_arq = 8;
  209. pause = 25 - dma_arq * 1000 /
  210. (octeon_get_io_clock_rate() / 1000000); /* Tz */
  211. oe_a = Td;
  212. /* Tkr from cf spec, lengthened to meet T0 */
  213. oe_n = max(T0 - oe_a, Tkr);
  214. pin_defs.u64 = cvmx_read_csr(CVMX_MIO_BOOT_PIN_DEFS);
  215. /* DMA channel number. */
  216. c = (cf_port->dma_base & 8) >> 3;
  217. /* Invert the polarity if the default is 0*/
  218. dma_tim.s.dmack_pi = (pin_defs.u64 & (1ull << (11 + c))) ? 0 : 1;
  219. dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
  220. dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
  221. /*
  222. * This is tI, C.F. spec. says 0, but Sony CF card requires
  223. * more, we use 20 nS.
  224. */
  225. dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
  226. dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
  227. dma_tim.s.dmarq = dma_arq;
  228. dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
  229. dma_tim.s.rd_dly = 0; /* Sample right on edge */
  230. /* writes only */
  231. dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
  232. dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
  233. ata_dev_dbg(dev, "ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
  234. ns_to_tim_reg(tim_mult, 60));
  235. ata_dev_dbg(dev, "oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
  236. dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
  237. dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
  238. cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
  239. }
  240. /**
  241. * Handle an 8 bit I/O request.
  242. *
  243. * @qc: Queued command
  244. * @buffer: Data buffer
  245. * @buflen: Length of the buffer.
  246. * @rw: True to write.
  247. */
  248. static unsigned int octeon_cf_data_xfer8(struct ata_queued_cmd *qc,
  249. unsigned char *buffer,
  250. unsigned int buflen,
  251. int rw)
  252. {
  253. struct ata_port *ap = qc->dev->link->ap;
  254. void __iomem *data_addr = ap->ioaddr.data_addr;
  255. unsigned long words;
  256. int count;
  257. words = buflen;
  258. if (rw) {
  259. count = 16;
  260. while (words--) {
  261. iowrite8(*buffer, data_addr);
  262. buffer++;
  263. /*
  264. * Every 16 writes do a read so the bootbus
  265. * FIFO doesn't fill up.
  266. */
  267. if (--count == 0) {
  268. ioread8(ap->ioaddr.altstatus_addr);
  269. count = 16;
  270. }
  271. }
  272. } else {
  273. ioread8_rep(data_addr, buffer, words);
  274. }
  275. return buflen;
  276. }
  277. /**
  278. * Handle a 16 bit I/O request.
  279. *
  280. * @qc: Queued command
  281. * @buffer: Data buffer
  282. * @buflen: Length of the buffer.
  283. * @rw: True to write.
  284. */
  285. static unsigned int octeon_cf_data_xfer16(struct ata_queued_cmd *qc,
  286. unsigned char *buffer,
  287. unsigned int buflen,
  288. int rw)
  289. {
  290. struct ata_port *ap = qc->dev->link->ap;
  291. void __iomem *data_addr = ap->ioaddr.data_addr;
  292. unsigned long words;
  293. int count;
  294. words = buflen / 2;
  295. if (rw) {
  296. count = 16;
  297. while (words--) {
  298. iowrite16(*(uint16_t *)buffer, data_addr);
  299. buffer += sizeof(uint16_t);
  300. /*
  301. * Every 16 writes do a read so the bootbus
  302. * FIFO doesn't fill up.
  303. */
  304. if (--count == 0) {
  305. ioread8(ap->ioaddr.altstatus_addr);
  306. count = 16;
  307. }
  308. }
  309. } else {
  310. while (words--) {
  311. *(uint16_t *)buffer = ioread16(data_addr);
  312. buffer += sizeof(uint16_t);
  313. }
  314. }
  315. /* Transfer trailing 1 byte, if any. */
  316. if (unlikely(buflen & 0x01)) {
  317. __le16 align_buf[1] = { 0 };
  318. if (rw == READ) {
  319. align_buf[0] = cpu_to_le16(ioread16(data_addr));
  320. memcpy(buffer, align_buf, 1);
  321. } else {
  322. memcpy(align_buf, buffer, 1);
  323. iowrite16(le16_to_cpu(align_buf[0]), data_addr);
  324. }
  325. words++;
  326. }
  327. return buflen;
  328. }
  329. /**
  330. * Read the taskfile for 16bit non-True IDE only.
  331. */
  332. static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
  333. {
  334. u16 blob;
  335. /* The base of the registers is at ioaddr.data_addr. */
  336. void __iomem *base = ap->ioaddr.data_addr;
  337. blob = __raw_readw(base + 0xc);
  338. tf->error = blob >> 8;
  339. blob = __raw_readw(base + 2);
  340. tf->nsect = blob & 0xff;
  341. tf->lbal = blob >> 8;
  342. blob = __raw_readw(base + 4);
  343. tf->lbam = blob & 0xff;
  344. tf->lbah = blob >> 8;
  345. blob = __raw_readw(base + 6);
  346. tf->device = blob & 0xff;
  347. tf->status = blob >> 8;
  348. if (tf->flags & ATA_TFLAG_LBA48) {
  349. if (likely(ap->ioaddr.ctl_addr)) {
  350. iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
  351. blob = __raw_readw(base + 0xc);
  352. tf->hob_feature = blob >> 8;
  353. blob = __raw_readw(base + 2);
  354. tf->hob_nsect = blob & 0xff;
  355. tf->hob_lbal = blob >> 8;
  356. blob = __raw_readw(base + 4);
  357. tf->hob_lbam = blob & 0xff;
  358. tf->hob_lbah = blob >> 8;
  359. iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
  360. ap->last_ctl = tf->ctl;
  361. } else {
  362. WARN_ON(1);
  363. }
  364. }
  365. }
  366. static u8 octeon_cf_check_status16(struct ata_port *ap)
  367. {
  368. u16 blob;
  369. void __iomem *base = ap->ioaddr.data_addr;
  370. blob = __raw_readw(base + 6);
  371. return blob >> 8;
  372. }
  373. static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
  374. unsigned long deadline)
  375. {
  376. struct ata_port *ap = link->ap;
  377. void __iomem *base = ap->ioaddr.data_addr;
  378. int rc;
  379. u8 err;
  380. __raw_writew(ap->ctl, base + 0xe);
  381. udelay(20);
  382. __raw_writew(ap->ctl | ATA_SRST, base + 0xe);
  383. udelay(20);
  384. __raw_writew(ap->ctl, base + 0xe);
  385. rc = ata_sff_wait_after_reset(link, 1, deadline);
  386. if (rc) {
  387. ata_link_err(link, "SRST failed (errno=%d)\n", rc);
  388. return rc;
  389. }
  390. /* determine by signature whether we have ATA or ATAPI devices */
  391. classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
  392. return 0;
  393. }
  394. /**
  395. * Load the taskfile for 16bit non-True IDE only. The device_addr is
  396. * not loaded, we do this as part of octeon_cf_exec_command16.
  397. */
  398. static void octeon_cf_tf_load16(struct ata_port *ap,
  399. const struct ata_taskfile *tf)
  400. {
  401. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  402. /* The base of the registers is at ioaddr.data_addr. */
  403. void __iomem *base = ap->ioaddr.data_addr;
  404. if (tf->ctl != ap->last_ctl) {
  405. iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
  406. ap->last_ctl = tf->ctl;
  407. ata_wait_idle(ap);
  408. }
  409. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  410. __raw_writew(tf->hob_feature << 8, base + 0xc);
  411. __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
  412. __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
  413. }
  414. if (is_addr) {
  415. __raw_writew(tf->feature << 8, base + 0xc);
  416. __raw_writew(tf->nsect | tf->lbal << 8, base + 2);
  417. __raw_writew(tf->lbam | tf->lbah << 8, base + 4);
  418. }
  419. ata_wait_idle(ap);
  420. }
  421. static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
  422. {
  423. /* There is only one device, do nothing. */
  424. return;
  425. }
  426. /*
  427. * Issue ATA command to host controller. The device_addr is also sent
  428. * as it must be written in a combined write with the command.
  429. */
  430. static void octeon_cf_exec_command16(struct ata_port *ap,
  431. const struct ata_taskfile *tf)
  432. {
  433. /* The base of the registers is at ioaddr.data_addr. */
  434. void __iomem *base = ap->ioaddr.data_addr;
  435. u16 blob = 0;
  436. if (tf->flags & ATA_TFLAG_DEVICE)
  437. blob = tf->device;
  438. blob |= (tf->command << 8);
  439. __raw_writew(blob, base + 6);
  440. ata_wait_idle(ap);
  441. }
  442. static void octeon_cf_ata_port_noaction(struct ata_port *ap)
  443. {
  444. }
  445. static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
  446. {
  447. struct ata_port *ap = qc->ap;
  448. struct octeon_cf_port *cf_port;
  449. cf_port = ap->private_data;
  450. /* issue r/w command */
  451. qc->cursg = qc->sg;
  452. cf_port->dma_finished = 0;
  453. ap->ops->sff_exec_command(ap, &qc->tf);
  454. }
  455. /**
  456. * Start a DMA transfer that was already setup
  457. *
  458. * @qc: Information about the DMA
  459. */
  460. static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
  461. {
  462. struct octeon_cf_port *cf_port = qc->ap->private_data;
  463. union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
  464. union cvmx_mio_boot_dma_intx mio_boot_dma_int;
  465. struct scatterlist *sg;
  466. /* Get the scatter list entry we need to DMA into */
  467. sg = qc->cursg;
  468. BUG_ON(!sg);
  469. /*
  470. * Clear the DMA complete status.
  471. */
  472. mio_boot_dma_int.u64 = 0;
  473. mio_boot_dma_int.s.done = 1;
  474. cvmx_write_csr(cf_port->dma_base + DMA_INT, mio_boot_dma_int.u64);
  475. /* Enable the interrupt. */
  476. cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, mio_boot_dma_int.u64);
  477. /* Set the direction of the DMA */
  478. mio_boot_dma_cfg.u64 = 0;
  479. #ifdef __LITTLE_ENDIAN
  480. mio_boot_dma_cfg.s.endian = 1;
  481. #endif
  482. mio_boot_dma_cfg.s.en = 1;
  483. mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
  484. /*
  485. * Don't stop the DMA if the device deasserts DMARQ. Many
  486. * compact flashes deassert DMARQ for a short time between
  487. * sectors. Instead of stopping and restarting the DMA, we'll
  488. * let the hardware do it. If the DMA is really stopped early
  489. * due to an error condition, a later timeout will force us to
  490. * stop.
  491. */
  492. mio_boot_dma_cfg.s.clr = 0;
  493. /* Size is specified in 16bit words and minus one notation */
  494. mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
  495. /* We need to swap the high and low bytes of every 16 bits */
  496. mio_boot_dma_cfg.s.swap8 = 1;
  497. mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
  498. cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
  499. }
  500. /**
  501. *
  502. * LOCKING:
  503. * spin_lock_irqsave(host lock)
  504. *
  505. */
  506. static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
  507. struct ata_queued_cmd *qc)
  508. {
  509. struct ata_eh_info *ehi = &ap->link.eh_info;
  510. struct octeon_cf_port *cf_port = ap->private_data;
  511. union cvmx_mio_boot_dma_cfgx dma_cfg;
  512. union cvmx_mio_boot_dma_intx dma_int;
  513. u8 status;
  514. trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
  515. if (ap->hsm_task_state != HSM_ST_LAST)
  516. return 0;
  517. dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
  518. if (dma_cfg.s.size != 0xfffff) {
  519. /* Error, the transfer was not complete. */
  520. qc->err_mask |= AC_ERR_HOST_BUS;
  521. ap->hsm_task_state = HSM_ST_ERR;
  522. }
  523. /* Stop and clear the dma engine. */
  524. dma_cfg.u64 = 0;
  525. dma_cfg.s.size = -1;
  526. cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
  527. /* Disable the interrupt. */
  528. dma_int.u64 = 0;
  529. cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
  530. /* Clear the DMA complete status */
  531. dma_int.s.done = 1;
  532. cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
  533. status = ap->ops->sff_check_status(ap);
  534. ata_sff_hsm_move(ap, qc, status, 0);
  535. if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
  536. ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
  537. return 1;
  538. }
  539. /*
  540. * Check if any queued commands have more DMAs, if so start the next
  541. * transfer, else do end of transfer handling.
  542. */
  543. static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
  544. {
  545. struct ata_host *host = dev_instance;
  546. struct octeon_cf_port *cf_port;
  547. int i;
  548. unsigned int handled = 0;
  549. unsigned long flags;
  550. spin_lock_irqsave(&host->lock, flags);
  551. for (i = 0; i < host->n_ports; i++) {
  552. u8 status;
  553. struct ata_port *ap;
  554. struct ata_queued_cmd *qc;
  555. union cvmx_mio_boot_dma_intx dma_int;
  556. union cvmx_mio_boot_dma_cfgx dma_cfg;
  557. ap = host->ports[i];
  558. cf_port = ap->private_data;
  559. dma_int.u64 = cvmx_read_csr(cf_port->dma_base + DMA_INT);
  560. dma_cfg.u64 = cvmx_read_csr(cf_port->dma_base + DMA_CFG);
  561. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  562. if (!qc || (qc->tf.flags & ATA_TFLAG_POLLING))
  563. continue;
  564. if (dma_int.s.done && !dma_cfg.s.en) {
  565. if (!sg_is_last(qc->cursg)) {
  566. qc->cursg = sg_next(qc->cursg);
  567. handled = 1;
  568. trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
  569. octeon_cf_dma_start(qc);
  570. continue;
  571. } else {
  572. cf_port->dma_finished = 1;
  573. }
  574. }
  575. if (!cf_port->dma_finished)
  576. continue;
  577. status = ioread8(ap->ioaddr.altstatus_addr);
  578. if (status & (ATA_BUSY | ATA_DRQ)) {
  579. /*
  580. * We are busy, try to handle it later. This
  581. * is the DMA finished interrupt, and it could
  582. * take a little while for the card to be
  583. * ready for more commands.
  584. */
  585. /* Clear DMA irq. */
  586. dma_int.u64 = 0;
  587. dma_int.s.done = 1;
  588. cvmx_write_csr(cf_port->dma_base + DMA_INT,
  589. dma_int.u64);
  590. hrtimer_start_range_ns(&cf_port->delayed_finish,
  591. ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL),
  592. OCTEON_CF_BUSY_POLL_INTERVAL / 5,
  593. HRTIMER_MODE_REL);
  594. handled = 1;
  595. } else {
  596. handled |= octeon_cf_dma_finished(ap, qc);
  597. }
  598. }
  599. spin_unlock_irqrestore(&host->lock, flags);
  600. return IRQ_RETVAL(handled);
  601. }
  602. static enum hrtimer_restart octeon_cf_delayed_finish(struct hrtimer *hrt)
  603. {
  604. struct octeon_cf_port *cf_port = container_of(hrt,
  605. struct octeon_cf_port,
  606. delayed_finish);
  607. struct ata_port *ap = cf_port->ap;
  608. struct ata_host *host = ap->host;
  609. struct ata_queued_cmd *qc;
  610. unsigned long flags;
  611. u8 status;
  612. enum hrtimer_restart rv = HRTIMER_NORESTART;
  613. spin_lock_irqsave(&host->lock, flags);
  614. /*
  615. * If the port is not waiting for completion, it must have
  616. * handled it previously. The hsm_task_state is
  617. * protected by host->lock.
  618. */
  619. if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
  620. goto out;
  621. status = ioread8(ap->ioaddr.altstatus_addr);
  622. if (status & (ATA_BUSY | ATA_DRQ)) {
  623. /* Still busy, try again. */
  624. hrtimer_forward_now(hrt,
  625. ns_to_ktime(OCTEON_CF_BUSY_POLL_INTERVAL));
  626. rv = HRTIMER_RESTART;
  627. goto out;
  628. }
  629. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  630. if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
  631. octeon_cf_dma_finished(ap, qc);
  632. out:
  633. spin_unlock_irqrestore(&host->lock, flags);
  634. return rv;
  635. }
  636. static void octeon_cf_dev_config(struct ata_device *dev)
  637. {
  638. /*
  639. * A maximum of 2^20 - 1 16 bit transfers are possible with
  640. * the bootbus DMA. So we need to throttle max_sectors to
  641. * (2^12 - 1 == 4095) to assure that this can never happen.
  642. */
  643. dev->max_sectors = min(dev->max_sectors, 4095U);
  644. }
  645. /*
  646. * We don't do ATAPI DMA so return 0.
  647. */
  648. static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
  649. {
  650. return 0;
  651. }
  652. static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
  653. {
  654. struct ata_port *ap = qc->ap;
  655. switch (qc->tf.protocol) {
  656. case ATA_PROT_DMA:
  657. WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
  658. trace_ata_tf_load(ap, &qc->tf);
  659. ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
  660. trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
  661. octeon_cf_dma_setup(qc); /* set up dma */
  662. trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
  663. octeon_cf_dma_start(qc); /* initiate dma */
  664. ap->hsm_task_state = HSM_ST_LAST;
  665. break;
  666. case ATAPI_PROT_DMA:
  667. dev_err(ap->dev, "Error, ATAPI not supported\n");
  668. BUG();
  669. default:
  670. return ata_sff_qc_issue(qc);
  671. }
  672. return 0;
  673. }
  674. static struct ata_port_operations octeon_cf_ops = {
  675. .inherits = &ata_sff_port_ops,
  676. .check_atapi_dma = octeon_cf_check_atapi_dma,
  677. .qc_prep = ata_noop_qc_prep,
  678. .qc_issue = octeon_cf_qc_issue,
  679. .sff_dev_select = octeon_cf_dev_select,
  680. .sff_irq_on = octeon_cf_ata_port_noaction,
  681. .sff_irq_clear = octeon_cf_ata_port_noaction,
  682. .cable_detect = ata_cable_40wire,
  683. .set_piomode = octeon_cf_set_piomode,
  684. .set_dmamode = octeon_cf_set_dmamode,
  685. .dev_config = octeon_cf_dev_config,
  686. };
  687. static int octeon_cf_probe(struct platform_device *pdev)
  688. {
  689. struct resource *res_cs0, *res_cs1;
  690. bool is_16bit;
  691. const __be32 *cs_num;
  692. struct property *reg_prop;
  693. int n_addr, n_size, reg_len;
  694. struct device_node *node;
  695. void __iomem *cs0;
  696. void __iomem *cs1 = NULL;
  697. struct ata_host *host;
  698. struct ata_port *ap;
  699. int irq = 0;
  700. irq_handler_t irq_handler = NULL;
  701. void __iomem *base;
  702. struct octeon_cf_port *cf_port;
  703. int rv = -ENOMEM;
  704. u32 bus_width;
  705. node = pdev->dev.of_node;
  706. if (node == NULL)
  707. return -EINVAL;
  708. cf_port = devm_kzalloc(&pdev->dev, sizeof(*cf_port), GFP_KERNEL);
  709. if (!cf_port)
  710. return -ENOMEM;
  711. cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
  712. if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
  713. is_16bit = (bus_width == 16);
  714. else
  715. is_16bit = false;
  716. n_addr = of_n_addr_cells(node);
  717. n_size = of_n_size_cells(node);
  718. reg_prop = of_find_property(node, "reg", &reg_len);
  719. if (!reg_prop || reg_len < sizeof(__be32))
  720. return -EINVAL;
  721. cs_num = reg_prop->value;
  722. cf_port->cs0 = be32_to_cpup(cs_num);
  723. if (cf_port->is_true_ide) {
  724. struct device_node *dma_node;
  725. dma_node = of_parse_phandle(node,
  726. "cavium,dma-engine-handle", 0);
  727. if (dma_node) {
  728. struct platform_device *dma_dev;
  729. dma_dev = of_find_device_by_node(dma_node);
  730. if (dma_dev) {
  731. struct resource *res_dma;
  732. int i;
  733. res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
  734. if (!res_dma) {
  735. put_device(&dma_dev->dev);
  736. of_node_put(dma_node);
  737. return -EINVAL;
  738. }
  739. cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
  740. resource_size(res_dma));
  741. if (!cf_port->dma_base) {
  742. put_device(&dma_dev->dev);
  743. of_node_put(dma_node);
  744. return -EINVAL;
  745. }
  746. i = platform_get_irq(dma_dev, 0);
  747. if (i > 0) {
  748. irq = i;
  749. irq_handler = octeon_cf_interrupt;
  750. }
  751. put_device(&dma_dev->dev);
  752. }
  753. of_node_put(dma_node);
  754. }
  755. res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  756. if (!res_cs1)
  757. return -EINVAL;
  758. cs1 = devm_ioremap(&pdev->dev, res_cs1->start,
  759. resource_size(res_cs1));
  760. if (!cs1)
  761. return rv;
  762. if (reg_len < (n_addr + n_size + 1) * sizeof(__be32))
  763. return -EINVAL;
  764. cs_num += n_addr + n_size;
  765. cf_port->cs1 = be32_to_cpup(cs_num);
  766. }
  767. res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  768. if (!res_cs0)
  769. return -EINVAL;
  770. cs0 = devm_ioremap(&pdev->dev, res_cs0->start,
  771. resource_size(res_cs0));
  772. if (!cs0)
  773. return rv;
  774. /* allocate host */
  775. host = ata_host_alloc(&pdev->dev, 1);
  776. if (!host)
  777. return rv;
  778. ap = host->ports[0];
  779. ap->private_data = cf_port;
  780. pdev->dev.platform_data = cf_port;
  781. cf_port->ap = ap;
  782. ap->ops = &octeon_cf_ops;
  783. ap->pio_mask = ATA_PIO6;
  784. ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
  785. if (!is_16bit) {
  786. base = cs0 + 0x800;
  787. ap->ioaddr.cmd_addr = base;
  788. ata_sff_std_ports(&ap->ioaddr);
  789. ap->ioaddr.altstatus_addr = base + 0xe;
  790. ap->ioaddr.ctl_addr = base + 0xe;
  791. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
  792. } else if (cf_port->is_true_ide) {
  793. base = cs0;
  794. ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
  795. ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
  796. ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
  797. ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
  798. ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
  799. ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
  800. ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
  801. ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
  802. ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
  803. ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
  804. ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
  805. ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
  806. ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
  807. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
  808. ap->mwdma_mask = enable_dma ? ATA_MWDMA4 : 0;
  809. /* True IDE mode needs a timer to poll for not-busy. */
  810. hrtimer_init(&cf_port->delayed_finish, CLOCK_MONOTONIC,
  811. HRTIMER_MODE_REL);
  812. cf_port->delayed_finish.function = octeon_cf_delayed_finish;
  813. } else {
  814. /* 16 bit but not True IDE */
  815. base = cs0 + 0x800;
  816. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
  817. octeon_cf_ops.softreset = octeon_cf_softreset16;
  818. octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
  819. octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
  820. octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
  821. octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
  822. ap->ioaddr.data_addr = base + ATA_REG_DATA;
  823. ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
  824. ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
  825. ap->ioaddr.ctl_addr = base + 0xe;
  826. ap->ioaddr.altstatus_addr = base + 0xe;
  827. }
  828. cf_port->c0 = ap->ioaddr.ctl_addr;
  829. rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  830. if (rv)
  831. return rv;
  832. ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
  833. dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
  834. is_16bit ? 16 : 8,
  835. cf_port->is_true_ide ? ", True IDE" : "");
  836. return ata_host_activate(host, irq, irq_handler,
  837. IRQF_SHARED, &octeon_cf_sht);
  838. }
  839. static void octeon_cf_shutdown(struct device *dev)
  840. {
  841. union cvmx_mio_boot_dma_cfgx dma_cfg;
  842. union cvmx_mio_boot_dma_intx dma_int;
  843. struct octeon_cf_port *cf_port = dev_get_platdata(dev);
  844. if (cf_port->dma_base) {
  845. /* Stop and clear the dma engine. */
  846. dma_cfg.u64 = 0;
  847. dma_cfg.s.size = -1;
  848. cvmx_write_csr(cf_port->dma_base + DMA_CFG, dma_cfg.u64);
  849. /* Disable the interrupt. */
  850. dma_int.u64 = 0;
  851. cvmx_write_csr(cf_port->dma_base + DMA_INT_EN, dma_int.u64);
  852. /* Clear the DMA complete status */
  853. dma_int.s.done = 1;
  854. cvmx_write_csr(cf_port->dma_base + DMA_INT, dma_int.u64);
  855. __raw_writeb(0, cf_port->c0);
  856. udelay(20);
  857. __raw_writeb(ATA_SRST, cf_port->c0);
  858. udelay(20);
  859. __raw_writeb(0, cf_port->c0);
  860. mdelay(100);
  861. }
  862. }
  863. static const struct of_device_id octeon_cf_match[] = {
  864. { .compatible = "cavium,ebt3000-compact-flash", },
  865. { /* sentinel */ }
  866. };
  867. MODULE_DEVICE_TABLE(of, octeon_cf_match);
  868. static struct platform_driver octeon_cf_driver = {
  869. .probe = octeon_cf_probe,
  870. .driver = {
  871. .name = DRV_NAME,
  872. .of_match_table = octeon_cf_match,
  873. .shutdown = octeon_cf_shutdown
  874. },
  875. };
  876. static int __init octeon_cf_init(void)
  877. {
  878. return platform_driver_register(&octeon_cf_driver);
  879. }
  880. MODULE_AUTHOR("David Daney <[email protected]>");
  881. MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
  882. MODULE_LICENSE("GPL");
  883. MODULE_VERSION(DRV_VERSION);
  884. MODULE_ALIAS("platform:" DRV_NAME);
  885. module_init(octeon_cf_init);