sun_esp.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* sun_esp.c: ESP front-end for Sparc SBUS systems.
  3. *
  4. * Copyright (C) 2007, 2008 David S. Miller ([email protected])
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/delay.h>
  9. #include <linux/module.h>
  10. #include <linux/mm.h>
  11. #include <linux/init.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/of.h>
  14. #include <linux/of_device.h>
  15. #include <linux/gfp.h>
  16. #include <asm/irq.h>
  17. #include <asm/io.h>
  18. #include <asm/dma.h>
  19. #include <scsi/scsi_host.h>
  20. #include "esp_scsi.h"
  21. #define DRV_MODULE_NAME "sun_esp"
  22. #define PFX DRV_MODULE_NAME ": "
  23. #define DRV_VERSION "1.100"
  24. #define DRV_MODULE_RELDATE "August 27, 2008"
  25. #define dma_read32(REG) \
  26. sbus_readl(esp->dma_regs + (REG))
  27. #define dma_write32(VAL, REG) \
  28. sbus_writel((VAL), esp->dma_regs + (REG))
  29. /* DVMA chip revisions */
  30. enum dvma_rev {
  31. dvmarev0,
  32. dvmaesc1,
  33. dvmarev1,
  34. dvmarev2,
  35. dvmarev3,
  36. dvmarevplus,
  37. dvmahme
  38. };
  39. static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
  40. {
  41. esp->dma = dma_of;
  42. esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
  43. resource_size(&dma_of->resource[0]),
  44. "espdma");
  45. if (!esp->dma_regs)
  46. return -ENOMEM;
  47. switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
  48. case DMA_VERS0:
  49. esp->dmarev = dvmarev0;
  50. break;
  51. case DMA_ESCV1:
  52. esp->dmarev = dvmaesc1;
  53. break;
  54. case DMA_VERS1:
  55. esp->dmarev = dvmarev1;
  56. break;
  57. case DMA_VERS2:
  58. esp->dmarev = dvmarev2;
  59. break;
  60. case DMA_VERHME:
  61. esp->dmarev = dvmahme;
  62. break;
  63. case DMA_VERSPLUS:
  64. esp->dmarev = dvmarevplus;
  65. break;
  66. }
  67. return 0;
  68. }
  69. static int esp_sbus_map_regs(struct esp *esp, int hme)
  70. {
  71. struct platform_device *op = to_platform_device(esp->dev);
  72. struct resource *res;
  73. /* On HME, two reg sets exist, first is DVMA,
  74. * second is ESP registers.
  75. */
  76. if (hme)
  77. res = &op->resource[1];
  78. else
  79. res = &op->resource[0];
  80. esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
  81. if (!esp->regs)
  82. return -ENOMEM;
  83. return 0;
  84. }
  85. static int esp_sbus_map_command_block(struct esp *esp)
  86. {
  87. esp->command_block = dma_alloc_coherent(esp->dev, 16,
  88. &esp->command_block_dma,
  89. GFP_KERNEL);
  90. if (!esp->command_block)
  91. return -ENOMEM;
  92. return 0;
  93. }
  94. static int esp_sbus_register_irq(struct esp *esp)
  95. {
  96. struct Scsi_Host *host = esp->host;
  97. struct platform_device *op = to_platform_device(esp->dev);
  98. host->irq = op->archdata.irqs[0];
  99. return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
  100. }
  101. static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
  102. {
  103. struct platform_device *op = to_platform_device(esp->dev);
  104. struct device_node *dp;
  105. dp = op->dev.of_node;
  106. esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
  107. if (esp->scsi_id != 0xff)
  108. goto done;
  109. esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
  110. if (esp->scsi_id != 0xff)
  111. goto done;
  112. esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
  113. "scsi-initiator-id", 7);
  114. done:
  115. esp->host->this_id = esp->scsi_id;
  116. esp->scsi_id_mask = (1 << esp->scsi_id);
  117. }
  118. static void esp_get_differential(struct esp *esp)
  119. {
  120. struct platform_device *op = to_platform_device(esp->dev);
  121. struct device_node *dp;
  122. dp = op->dev.of_node;
  123. if (of_find_property(dp, "differential", NULL))
  124. esp->flags |= ESP_FLAG_DIFFERENTIAL;
  125. else
  126. esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
  127. }
  128. static void esp_get_clock_params(struct esp *esp)
  129. {
  130. struct platform_device *op = to_platform_device(esp->dev);
  131. struct device_node *bus_dp, *dp;
  132. int fmhz;
  133. dp = op->dev.of_node;
  134. bus_dp = dp->parent;
  135. fmhz = of_getintprop_default(dp, "clock-frequency", 0);
  136. if (fmhz == 0)
  137. fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
  138. esp->cfreq = fmhz;
  139. }
  140. static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
  141. {
  142. struct device_node *dma_dp = dma_of->dev.of_node;
  143. struct platform_device *op = to_platform_device(esp->dev);
  144. struct device_node *dp;
  145. u8 bursts, val;
  146. dp = op->dev.of_node;
  147. bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
  148. val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
  149. if (val != 0xff)
  150. bursts &= val;
  151. val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
  152. if (val != 0xff)
  153. bursts &= val;
  154. if (bursts == 0xff ||
  155. (bursts & DMA_BURST16) == 0 ||
  156. (bursts & DMA_BURST32) == 0)
  157. bursts = (DMA_BURST32 - 1);
  158. esp->bursts = bursts;
  159. }
  160. static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
  161. {
  162. esp_get_scsi_id(esp, espdma);
  163. esp_get_differential(esp);
  164. esp_get_clock_params(esp);
  165. esp_get_bursts(esp, espdma);
  166. }
  167. static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
  168. {
  169. sbus_writeb(val, esp->regs + (reg * 4UL));
  170. }
  171. static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
  172. {
  173. return sbus_readb(esp->regs + (reg * 4UL));
  174. }
  175. static int sbus_esp_irq_pending(struct esp *esp)
  176. {
  177. if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
  178. return 1;
  179. return 0;
  180. }
  181. static void sbus_esp_reset_dma(struct esp *esp)
  182. {
  183. int can_do_burst16, can_do_burst32, can_do_burst64;
  184. int can_do_sbus64, lim;
  185. struct platform_device *op = to_platform_device(esp->dev);
  186. u32 val;
  187. can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
  188. can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
  189. can_do_burst64 = 0;
  190. can_do_sbus64 = 0;
  191. if (sbus_can_dma_64bit())
  192. can_do_sbus64 = 1;
  193. if (sbus_can_burst64())
  194. can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
  195. /* Put the DVMA into a known state. */
  196. if (esp->dmarev != dvmahme) {
  197. val = dma_read32(DMA_CSR);
  198. dma_write32(val | DMA_RST_SCSI, DMA_CSR);
  199. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  200. }
  201. switch (esp->dmarev) {
  202. case dvmahme:
  203. dma_write32(DMA_RESET_FAS366, DMA_CSR);
  204. dma_write32(DMA_RST_SCSI, DMA_CSR);
  205. esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
  206. DMA_SCSI_DISAB | DMA_INT_ENAB);
  207. esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
  208. DMA_BRST_SZ);
  209. if (can_do_burst64)
  210. esp->prev_hme_dmacsr |= DMA_BRST64;
  211. else if (can_do_burst32)
  212. esp->prev_hme_dmacsr |= DMA_BRST32;
  213. if (can_do_sbus64) {
  214. esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
  215. sbus_set_sbus64(&op->dev, esp->bursts);
  216. }
  217. lim = 1000;
  218. while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
  219. if (--lim == 0) {
  220. printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
  221. "will not clear!\n",
  222. esp->host->unique_id);
  223. break;
  224. }
  225. udelay(1);
  226. }
  227. dma_write32(0, DMA_CSR);
  228. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  229. dma_write32(0, DMA_ADDR);
  230. break;
  231. case dvmarev2:
  232. if (esp->rev != ESP100) {
  233. val = dma_read32(DMA_CSR);
  234. dma_write32(val | DMA_3CLKS, DMA_CSR);
  235. }
  236. break;
  237. case dvmarev3:
  238. val = dma_read32(DMA_CSR);
  239. val &= ~DMA_3CLKS;
  240. val |= DMA_2CLKS;
  241. if (can_do_burst32) {
  242. val &= ~DMA_BRST_SZ;
  243. val |= DMA_BRST32;
  244. }
  245. dma_write32(val, DMA_CSR);
  246. break;
  247. case dvmaesc1:
  248. val = dma_read32(DMA_CSR);
  249. val |= DMA_ADD_ENABLE;
  250. val &= ~DMA_BCNT_ENAB;
  251. if (!can_do_burst32 && can_do_burst16) {
  252. val |= DMA_ESC_BURST;
  253. } else {
  254. val &= ~(DMA_ESC_BURST);
  255. }
  256. dma_write32(val, DMA_CSR);
  257. break;
  258. default:
  259. break;
  260. }
  261. /* Enable interrupts. */
  262. val = dma_read32(DMA_CSR);
  263. dma_write32(val | DMA_INT_ENAB, DMA_CSR);
  264. }
  265. static void sbus_esp_dma_drain(struct esp *esp)
  266. {
  267. u32 csr;
  268. int lim;
  269. if (esp->dmarev == dvmahme)
  270. return;
  271. csr = dma_read32(DMA_CSR);
  272. if (!(csr & DMA_FIFO_ISDRAIN))
  273. return;
  274. if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
  275. dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
  276. lim = 1000;
  277. while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
  278. if (--lim == 0) {
  279. printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
  280. esp->host->unique_id);
  281. break;
  282. }
  283. udelay(1);
  284. }
  285. }
  286. static void sbus_esp_dma_invalidate(struct esp *esp)
  287. {
  288. if (esp->dmarev == dvmahme) {
  289. dma_write32(DMA_RST_SCSI, DMA_CSR);
  290. esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
  291. (DMA_PARITY_OFF | DMA_2CLKS |
  292. DMA_SCSI_DISAB | DMA_INT_ENAB)) &
  293. ~(DMA_ST_WRITE | DMA_ENABLE));
  294. dma_write32(0, DMA_CSR);
  295. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  296. /* This is necessary to avoid having the SCSI channel
  297. * engine lock up on us.
  298. */
  299. dma_write32(0, DMA_ADDR);
  300. } else {
  301. u32 val;
  302. int lim;
  303. lim = 1000;
  304. while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
  305. if (--lim == 0) {
  306. printk(KERN_ALERT PFX "esp%d: DMA will not "
  307. "invalidate!\n", esp->host->unique_id);
  308. break;
  309. }
  310. udelay(1);
  311. }
  312. val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
  313. val |= DMA_FIFO_INV;
  314. dma_write32(val, DMA_CSR);
  315. val &= ~DMA_FIFO_INV;
  316. dma_write32(val, DMA_CSR);
  317. }
  318. }
  319. static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
  320. u32 dma_count, int write, u8 cmd)
  321. {
  322. u32 csr;
  323. BUG_ON(!(cmd & ESP_CMD_DMA));
  324. sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
  325. sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
  326. if (esp->rev == FASHME) {
  327. sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
  328. sbus_esp_write8(esp, 0, FAS_RHI);
  329. scsi_esp_cmd(esp, cmd);
  330. csr = esp->prev_hme_dmacsr;
  331. csr |= DMA_SCSI_DISAB | DMA_ENABLE;
  332. if (write)
  333. csr |= DMA_ST_WRITE;
  334. else
  335. csr &= ~DMA_ST_WRITE;
  336. esp->prev_hme_dmacsr = csr;
  337. dma_write32(dma_count, DMA_COUNT);
  338. dma_write32(addr, DMA_ADDR);
  339. dma_write32(csr, DMA_CSR);
  340. } else {
  341. csr = dma_read32(DMA_CSR);
  342. csr |= DMA_ENABLE;
  343. if (write)
  344. csr |= DMA_ST_WRITE;
  345. else
  346. csr &= ~DMA_ST_WRITE;
  347. dma_write32(csr, DMA_CSR);
  348. if (esp->dmarev == dvmaesc1) {
  349. u32 end = PAGE_ALIGN(addr + dma_count + 16U);
  350. dma_write32(end - addr, DMA_COUNT);
  351. }
  352. dma_write32(addr, DMA_ADDR);
  353. scsi_esp_cmd(esp, cmd);
  354. }
  355. }
  356. static int sbus_esp_dma_error(struct esp *esp)
  357. {
  358. u32 csr = dma_read32(DMA_CSR);
  359. if (csr & DMA_HNDL_ERROR)
  360. return 1;
  361. return 0;
  362. }
  363. static const struct esp_driver_ops sbus_esp_ops = {
  364. .esp_write8 = sbus_esp_write8,
  365. .esp_read8 = sbus_esp_read8,
  366. .irq_pending = sbus_esp_irq_pending,
  367. .reset_dma = sbus_esp_reset_dma,
  368. .dma_drain = sbus_esp_dma_drain,
  369. .dma_invalidate = sbus_esp_dma_invalidate,
  370. .send_dma_cmd = sbus_esp_send_dma_cmd,
  371. .dma_error = sbus_esp_dma_error,
  372. };
  373. static int esp_sbus_probe_one(struct platform_device *op,
  374. struct platform_device *espdma, int hme)
  375. {
  376. struct scsi_host_template *tpnt = &scsi_esp_template;
  377. struct Scsi_Host *host;
  378. struct esp *esp;
  379. int err;
  380. host = scsi_host_alloc(tpnt, sizeof(struct esp));
  381. err = -ENOMEM;
  382. if (!host)
  383. goto fail;
  384. host->max_id = (hme ? 16 : 8);
  385. esp = shost_priv(host);
  386. esp->host = host;
  387. esp->dev = &op->dev;
  388. esp->ops = &sbus_esp_ops;
  389. if (hme)
  390. esp->flags |= ESP_FLAG_WIDE_CAPABLE;
  391. err = esp_sbus_setup_dma(esp, espdma);
  392. if (err < 0)
  393. goto fail_unlink;
  394. err = esp_sbus_map_regs(esp, hme);
  395. if (err < 0)
  396. goto fail_unlink;
  397. err = esp_sbus_map_command_block(esp);
  398. if (err < 0)
  399. goto fail_unmap_regs;
  400. err = esp_sbus_register_irq(esp);
  401. if (err < 0)
  402. goto fail_unmap_command_block;
  403. esp_sbus_get_props(esp, espdma);
  404. /* Before we try to touch the ESP chip, ESC1 dma can
  405. * come up with the reset bit set, so make sure that
  406. * is clear first.
  407. */
  408. if (esp->dmarev == dvmaesc1) {
  409. u32 val = dma_read32(DMA_CSR);
  410. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  411. }
  412. dev_set_drvdata(&op->dev, esp);
  413. err = scsi_esp_register(esp);
  414. if (err)
  415. goto fail_free_irq;
  416. return 0;
  417. fail_free_irq:
  418. free_irq(host->irq, esp);
  419. fail_unmap_command_block:
  420. dma_free_coherent(&op->dev, 16,
  421. esp->command_block,
  422. esp->command_block_dma);
  423. fail_unmap_regs:
  424. of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
  425. fail_unlink:
  426. scsi_host_put(host);
  427. fail:
  428. return err;
  429. }
  430. static int esp_sbus_probe(struct platform_device *op)
  431. {
  432. struct device_node *dma_node = NULL;
  433. struct device_node *dp = op->dev.of_node;
  434. struct platform_device *dma_of = NULL;
  435. int hme = 0;
  436. int ret;
  437. if (of_node_name_eq(dp->parent, "espdma") ||
  438. of_node_name_eq(dp->parent, "dma"))
  439. dma_node = dp->parent;
  440. else if (of_node_name_eq(dp, "SUNW,fas")) {
  441. dma_node = op->dev.of_node;
  442. hme = 1;
  443. }
  444. if (dma_node)
  445. dma_of = of_find_device_by_node(dma_node);
  446. if (!dma_of)
  447. return -ENODEV;
  448. ret = esp_sbus_probe_one(op, dma_of, hme);
  449. if (ret)
  450. put_device(&dma_of->dev);
  451. return ret;
  452. }
  453. static int esp_sbus_remove(struct platform_device *op)
  454. {
  455. struct esp *esp = dev_get_drvdata(&op->dev);
  456. struct platform_device *dma_of = esp->dma;
  457. unsigned int irq = esp->host->irq;
  458. bool is_hme;
  459. u32 val;
  460. scsi_esp_unregister(esp);
  461. /* Disable interrupts. */
  462. val = dma_read32(DMA_CSR);
  463. dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
  464. free_irq(irq, esp);
  465. is_hme = (esp->dmarev == dvmahme);
  466. dma_free_coherent(&op->dev, 16,
  467. esp->command_block,
  468. esp->command_block_dma);
  469. of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
  470. SBUS_ESP_REG_SIZE);
  471. of_iounmap(&dma_of->resource[0], esp->dma_regs,
  472. resource_size(&dma_of->resource[0]));
  473. scsi_host_put(esp->host);
  474. dev_set_drvdata(&op->dev, NULL);
  475. put_device(&dma_of->dev);
  476. return 0;
  477. }
  478. static const struct of_device_id esp_match[] = {
  479. {
  480. .name = "SUNW,esp",
  481. },
  482. {
  483. .name = "SUNW,fas",
  484. },
  485. {
  486. .name = "esp",
  487. },
  488. {},
  489. };
  490. MODULE_DEVICE_TABLE(of, esp_match);
  491. static struct platform_driver esp_sbus_driver = {
  492. .driver = {
  493. .name = "esp",
  494. .of_match_table = esp_match,
  495. },
  496. .probe = esp_sbus_probe,
  497. .remove = esp_sbus_remove,
  498. };
  499. module_platform_driver(esp_sbus_driver);
  500. MODULE_DESCRIPTION("Sun ESP SCSI driver");
  501. MODULE_AUTHOR("David S. Miller ([email protected])");
  502. MODULE_LICENSE("GPL");
  503. MODULE_VERSION(DRV_VERSION);