sata_dwc_460ex.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * drivers/ata/sata_dwc_460ex.c
  4. *
  5. * Synopsys DesignWare Cores (DWC) SATA host driver
  6. *
  7. * Author: Mark Miesfeld <[email protected]>
  8. *
  9. * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <[email protected]>
  10. * Copyright 2008 DENX Software Engineering
  11. *
  12. * Based on versions provided by AMCC and Synopsys which are:
  13. * Copyright 2006 Applied Micro Circuits Corporation
  14. * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/device.h>
  19. #include <linux/dmaengine.h>
  20. #include <linux/of_address.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/of_platform.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/phy/phy.h>
  25. #include <linux/libata.h>
  26. #include <linux/slab.h>
  27. #include <trace/events/libata.h>
  28. #include "libata.h"
  29. #include <scsi/scsi_host.h>
  30. #include <scsi/scsi_cmnd.h>
  31. /* These two are defined in "libata.h" */
  32. #undef DRV_NAME
  33. #undef DRV_VERSION
  34. #define DRV_NAME "sata-dwc"
  35. #define DRV_VERSION "1.3"
  36. #define sata_dwc_writel(a, v) writel_relaxed(v, a)
  37. #define sata_dwc_readl(a) readl_relaxed(a)
  38. #ifndef NO_IRQ
  39. #define NO_IRQ 0
  40. #endif
  41. #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
  42. enum {
  43. SATA_DWC_MAX_PORTS = 1,
  44. SATA_DWC_SCR_OFFSET = 0x24,
  45. SATA_DWC_REG_OFFSET = 0x64,
  46. };
  47. /* DWC SATA Registers */
  48. struct sata_dwc_regs {
  49. u32 fptagr; /* 1st party DMA tag */
  50. u32 fpbor; /* 1st party DMA buffer offset */
  51. u32 fptcr; /* 1st party DMA Xfr count */
  52. u32 dmacr; /* DMA Control */
  53. u32 dbtsr; /* DMA Burst Transac size */
  54. u32 intpr; /* Interrupt Pending */
  55. u32 intmr; /* Interrupt Mask */
  56. u32 errmr; /* Error Mask */
  57. u32 llcr; /* Link Layer Control */
  58. u32 phycr; /* PHY Control */
  59. u32 physr; /* PHY Status */
  60. u32 rxbistpd; /* Recvd BIST pattern def register */
  61. u32 rxbistpd1; /* Recvd BIST data dword1 */
  62. u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
  63. u32 txbistpd; /* Trans BIST pattern def register */
  64. u32 txbistpd1; /* Trans BIST data dword1 */
  65. u32 txbistpd2; /* Trans BIST data dword2 */
  66. u32 bistcr; /* BIST Control Register */
  67. u32 bistfctr; /* BIST FIS Count Register */
  68. u32 bistsr; /* BIST Status Register */
  69. u32 bistdecr; /* BIST Dword Error count register */
  70. u32 res[15]; /* Reserved locations */
  71. u32 testr; /* Test Register */
  72. u32 versionr; /* Version Register */
  73. u32 idr; /* ID Register */
  74. u32 unimpl[192]; /* Unimplemented */
  75. u32 dmadr[256]; /* FIFO Locations in DMA Mode */
  76. };
  77. enum {
  78. SCR_SCONTROL_DET_ENABLE = 0x00000001,
  79. SCR_SSTATUS_DET_PRESENT = 0x00000001,
  80. SCR_SERROR_DIAG_X = 0x04000000,
  81. /* DWC SATA Register Operations */
  82. SATA_DWC_TXFIFO_DEPTH = 0x01FF,
  83. SATA_DWC_RXFIFO_DEPTH = 0x01FF,
  84. SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004,
  85. SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
  86. SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
  87. SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN,
  88. SATA_DWC_INTPR_DMAT = 0x00000001,
  89. SATA_DWC_INTPR_NEWFP = 0x00000002,
  90. SATA_DWC_INTPR_PMABRT = 0x00000004,
  91. SATA_DWC_INTPR_ERR = 0x00000008,
  92. SATA_DWC_INTPR_NEWBIST = 0x00000010,
  93. SATA_DWC_INTPR_IPF = 0x10000000,
  94. SATA_DWC_INTMR_DMATM = 0x00000001,
  95. SATA_DWC_INTMR_NEWFPM = 0x00000002,
  96. SATA_DWC_INTMR_PMABRTM = 0x00000004,
  97. SATA_DWC_INTMR_ERRM = 0x00000008,
  98. SATA_DWC_INTMR_NEWBISTM = 0x00000010,
  99. SATA_DWC_LLCR_SCRAMEN = 0x00000001,
  100. SATA_DWC_LLCR_DESCRAMEN = 0x00000002,
  101. SATA_DWC_LLCR_RPDEN = 0x00000004,
  102. /* This is all error bits, zero's are reserved fields. */
  103. SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03
  104. };
  105. #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
  106. #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
  107. SATA_DWC_DMACR_TMOD_TXCHEN)
  108. #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
  109. SATA_DWC_DMACR_TMOD_TXCHEN)
  110. #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
  111. #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
  112. << 16)
  113. struct sata_dwc_device {
  114. struct device *dev; /* generic device struct */
  115. struct ata_probe_ent *pe; /* ptr to probe-ent */
  116. struct ata_host *host;
  117. struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
  118. u32 sactive_issued;
  119. u32 sactive_queued;
  120. struct phy *phy;
  121. phys_addr_t dmadr;
  122. #ifdef CONFIG_SATA_DWC_OLD_DMA
  123. struct dw_dma_chip *dma;
  124. #endif
  125. };
  126. /*
  127. * Allow one extra special slot for commands and DMA management
  128. * to account for libata internal commands.
  129. */
  130. #define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1)
  131. struct sata_dwc_device_port {
  132. struct sata_dwc_device *hsdev;
  133. int cmd_issued[SATA_DWC_QCMD_MAX];
  134. int dma_pending[SATA_DWC_QCMD_MAX];
  135. /* DMA info */
  136. struct dma_chan *chan;
  137. struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
  138. u32 dma_interrupt_count;
  139. };
  140. /*
  141. * Commonly used DWC SATA driver macros
  142. */
  143. #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
  144. #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
  145. #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
  146. #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
  147. #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
  148. enum {
  149. SATA_DWC_CMD_ISSUED_NOT = 0,
  150. SATA_DWC_CMD_ISSUED_PEND = 1,
  151. SATA_DWC_CMD_ISSUED_EXEC = 2,
  152. SATA_DWC_CMD_ISSUED_NODATA = 3,
  153. SATA_DWC_DMA_PENDING_NONE = 0,
  154. SATA_DWC_DMA_PENDING_TX = 1,
  155. SATA_DWC_DMA_PENDING_RX = 2,
  156. };
  157. /*
  158. * Prototypes
  159. */
  160. static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
  161. static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
  162. static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
  163. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
  164. #ifdef CONFIG_SATA_DWC_OLD_DMA
  165. #include <linux/platform_data/dma-dw.h>
  166. #include <linux/dma/dw.h>
  167. static struct dw_dma_slave sata_dwc_dma_dws = {
  168. .src_id = 0,
  169. .dst_id = 0,
  170. .m_master = 1,
  171. .p_master = 0,
  172. };
  173. static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
  174. {
  175. struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  176. if (dws->dma_dev != chan->device->dev)
  177. return false;
  178. chan->private = dws;
  179. return true;
  180. }
  181. static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
  182. {
  183. struct sata_dwc_device *hsdev = hsdevp->hsdev;
  184. struct dw_dma_slave *dws = &sata_dwc_dma_dws;
  185. struct device *dev = hsdev->dev;
  186. dma_cap_mask_t mask;
  187. dws->dma_dev = dev;
  188. dma_cap_zero(mask);
  189. dma_cap_set(DMA_SLAVE, mask);
  190. /* Acquire DMA channel */
  191. hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
  192. if (!hsdevp->chan) {
  193. dev_err(dev, "%s: dma channel unavailable\n", __func__);
  194. return -EAGAIN;
  195. }
  196. return 0;
  197. }
  198. static int sata_dwc_dma_init_old(struct platform_device *pdev,
  199. struct sata_dwc_device *hsdev)
  200. {
  201. struct device *dev = &pdev->dev;
  202. struct device_node *np = dev->of_node;
  203. hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
  204. if (!hsdev->dma)
  205. return -ENOMEM;
  206. hsdev->dma->dev = dev;
  207. hsdev->dma->id = pdev->id;
  208. /* Get SATA DMA interrupt number */
  209. hsdev->dma->irq = irq_of_parse_and_map(np, 1);
  210. if (hsdev->dma->irq == NO_IRQ) {
  211. dev_err(dev, "no SATA DMA irq\n");
  212. return -ENODEV;
  213. }
  214. /* Get physical SATA DMA register base address */
  215. hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
  216. if (IS_ERR(hsdev->dma->regs))
  217. return PTR_ERR(hsdev->dma->regs);
  218. /* Initialize AHB DMAC */
  219. return dw_dma_probe(hsdev->dma);
  220. }
  221. static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
  222. {
  223. if (!hsdev->dma)
  224. return;
  225. dw_dma_remove(hsdev->dma);
  226. }
  227. #endif
  228. static const char *get_prot_descript(u8 protocol)
  229. {
  230. switch (protocol) {
  231. case ATA_PROT_NODATA:
  232. return "ATA no data";
  233. case ATA_PROT_PIO:
  234. return "ATA PIO";
  235. case ATA_PROT_DMA:
  236. return "ATA DMA";
  237. case ATA_PROT_NCQ:
  238. return "ATA NCQ";
  239. case ATA_PROT_NCQ_NODATA:
  240. return "ATA NCQ no data";
  241. case ATAPI_PROT_NODATA:
  242. return "ATAPI no data";
  243. case ATAPI_PROT_PIO:
  244. return "ATAPI PIO";
  245. case ATAPI_PROT_DMA:
  246. return "ATAPI DMA";
  247. default:
  248. return "unknown";
  249. }
  250. }
  251. static void dma_dwc_xfer_done(void *hsdev_instance)
  252. {
  253. unsigned long flags;
  254. struct sata_dwc_device *hsdev = hsdev_instance;
  255. struct ata_host *host = (struct ata_host *)hsdev->host;
  256. struct ata_port *ap;
  257. struct sata_dwc_device_port *hsdevp;
  258. u8 tag = 0;
  259. unsigned int port = 0;
  260. spin_lock_irqsave(&host->lock, flags);
  261. ap = host->ports[port];
  262. hsdevp = HSDEVP_FROM_AP(ap);
  263. tag = ap->link.active_tag;
  264. /*
  265. * Each DMA command produces 2 interrupts. Only
  266. * complete the command after both interrupts have been
  267. * seen. (See sata_dwc_isr())
  268. */
  269. hsdevp->dma_interrupt_count++;
  270. sata_dwc_clear_dmacr(hsdevp, tag);
  271. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
  272. dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
  273. tag, hsdevp->dma_pending[tag]);
  274. }
  275. if ((hsdevp->dma_interrupt_count % 2) == 0)
  276. sata_dwc_dma_xfer_complete(ap);
  277. spin_unlock_irqrestore(&host->lock, flags);
  278. }
  279. static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
  280. {
  281. struct ata_port *ap = qc->ap;
  282. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  283. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  284. struct dma_slave_config sconf;
  285. struct dma_async_tx_descriptor *desc;
  286. if (qc->dma_dir == DMA_DEV_TO_MEM) {
  287. sconf.src_addr = hsdev->dmadr;
  288. sconf.device_fc = false;
  289. } else { /* DMA_MEM_TO_DEV */
  290. sconf.dst_addr = hsdev->dmadr;
  291. sconf.device_fc = false;
  292. }
  293. sconf.direction = qc->dma_dir;
  294. sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  295. sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
  296. sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  297. sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  298. dmaengine_slave_config(hsdevp->chan, &sconf);
  299. /* Convert SG list to linked list of items (LLIs) for AHB DMA */
  300. desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
  301. qc->dma_dir,
  302. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  303. if (!desc)
  304. return NULL;
  305. desc->callback = dma_dwc_xfer_done;
  306. desc->callback_param = hsdev;
  307. dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
  308. qc->sg, qc->n_elem, &hsdev->dmadr);
  309. return desc;
  310. }
  311. static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
  312. {
  313. if (scr > SCR_NOTIFICATION) {
  314. dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
  315. __func__, scr);
  316. return -EINVAL;
  317. }
  318. *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
  319. dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  320. link->ap->print_id, scr, *val);
  321. return 0;
  322. }
  323. static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
  324. {
  325. dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
  326. link->ap->print_id, scr, val);
  327. if (scr > SCR_NOTIFICATION) {
  328. dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
  329. __func__, scr);
  330. return -EINVAL;
  331. }
  332. sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
  333. return 0;
  334. }
  335. static void clear_serror(struct ata_port *ap)
  336. {
  337. u32 val;
  338. sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
  339. sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
  340. }
  341. static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
  342. {
  343. sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
  344. sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
  345. }
  346. static u32 qcmd_tag_to_mask(u8 tag)
  347. {
  348. return 0x00000001 << (tag & 0x1f);
  349. }
  350. /* See ahci.c */
  351. static void sata_dwc_error_intr(struct ata_port *ap,
  352. struct sata_dwc_device *hsdev, uint intpr)
  353. {
  354. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  355. struct ata_eh_info *ehi = &ap->link.eh_info;
  356. unsigned int err_mask = 0, action = 0;
  357. struct ata_queued_cmd *qc;
  358. u32 serror;
  359. u8 status, tag;
  360. ata_ehi_clear_desc(ehi);
  361. sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
  362. status = ap->ops->sff_check_status(ap);
  363. tag = ap->link.active_tag;
  364. dev_err(ap->dev,
  365. "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
  366. __func__, serror, intpr, status, hsdevp->dma_interrupt_count,
  367. hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
  368. /* Clear error register and interrupt bit */
  369. clear_serror(ap);
  370. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
  371. /* This is the only error happening now. TODO check for exact error */
  372. err_mask |= AC_ERR_HOST_BUS;
  373. action |= ATA_EH_RESET;
  374. /* Pass this on to EH */
  375. ehi->serror |= serror;
  376. ehi->action |= action;
  377. qc = ata_qc_from_tag(ap, tag);
  378. if (qc)
  379. qc->err_mask |= err_mask;
  380. else
  381. ehi->err_mask |= err_mask;
  382. ata_port_abort(ap);
  383. }
  384. /*
  385. * Function : sata_dwc_isr
  386. * arguments : irq, void *dev_instance, struct pt_regs *regs
  387. * Return value : irqreturn_t - status of IRQ
  388. * This Interrupt handler called via port ops registered function.
  389. * .irq_handler = sata_dwc_isr
  390. */
  391. static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
  392. {
  393. struct ata_host *host = (struct ata_host *)dev_instance;
  394. struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
  395. struct ata_port *ap;
  396. struct ata_queued_cmd *qc;
  397. unsigned long flags;
  398. u8 status, tag;
  399. int handled, num_processed, port = 0;
  400. uint intpr, sactive, sactive2, tag_mask;
  401. struct sata_dwc_device_port *hsdevp;
  402. hsdev->sactive_issued = 0;
  403. spin_lock_irqsave(&host->lock, flags);
  404. /* Read the interrupt register */
  405. intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
  406. ap = host->ports[port];
  407. hsdevp = HSDEVP_FROM_AP(ap);
  408. dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
  409. ap->link.active_tag);
  410. /* Check for error interrupt */
  411. if (intpr & SATA_DWC_INTPR_ERR) {
  412. sata_dwc_error_intr(ap, hsdev, intpr);
  413. handled = 1;
  414. goto DONE;
  415. }
  416. /* Check for DMA SETUP FIS (FP DMA) interrupt */
  417. if (intpr & SATA_DWC_INTPR_NEWFP) {
  418. clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
  419. tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
  420. dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
  421. if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
  422. dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
  423. hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
  424. qc = ata_qc_from_tag(ap, tag);
  425. if (unlikely(!qc)) {
  426. dev_err(ap->dev, "failed to get qc");
  427. handled = 1;
  428. goto DONE;
  429. }
  430. /*
  431. * Start FP DMA for NCQ command. At this point the tag is the
  432. * active tag. It is the tag that matches the command about to
  433. * be completed.
  434. */
  435. trace_ata_bmdma_start(ap, &qc->tf, tag);
  436. qc->ap->link.active_tag = tag;
  437. sata_dwc_bmdma_start_by_tag(qc, tag);
  438. handled = 1;
  439. goto DONE;
  440. }
  441. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  442. tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  443. /* If no sactive issued and tag_mask is zero then this is not NCQ */
  444. if (hsdev->sactive_issued == 0 && tag_mask == 0) {
  445. if (ap->link.active_tag == ATA_TAG_POISON)
  446. tag = 0;
  447. else
  448. tag = ap->link.active_tag;
  449. qc = ata_qc_from_tag(ap, tag);
  450. /* DEV interrupt w/ no active qc? */
  451. if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
  452. dev_err(ap->dev,
  453. "%s interrupt with no active qc qc=%p\n",
  454. __func__, qc);
  455. ap->ops->sff_check_status(ap);
  456. handled = 1;
  457. goto DONE;
  458. }
  459. status = ap->ops->sff_check_status(ap);
  460. qc->ap->link.active_tag = tag;
  461. hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
  462. if (status & ATA_ERR) {
  463. dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
  464. sata_dwc_qc_complete(ap, qc);
  465. handled = 1;
  466. goto DONE;
  467. }
  468. dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
  469. __func__, get_prot_descript(qc->tf.protocol));
  470. DRVSTILLBUSY:
  471. if (ata_is_dma(qc->tf.protocol)) {
  472. /*
  473. * Each DMA transaction produces 2 interrupts. The DMAC
  474. * transfer complete interrupt and the SATA controller
  475. * operation done interrupt. The command should be
  476. * completed only after both interrupts are seen.
  477. */
  478. hsdevp->dma_interrupt_count++;
  479. if (hsdevp->dma_pending[tag] == \
  480. SATA_DWC_DMA_PENDING_NONE) {
  481. dev_err(ap->dev,
  482. "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
  483. __func__, intpr, status,
  484. hsdevp->dma_pending[tag]);
  485. }
  486. if ((hsdevp->dma_interrupt_count % 2) == 0)
  487. sata_dwc_dma_xfer_complete(ap);
  488. } else if (ata_is_pio(qc->tf.protocol)) {
  489. ata_sff_hsm_move(ap, qc, status, 0);
  490. handled = 1;
  491. goto DONE;
  492. } else {
  493. if (unlikely(sata_dwc_qc_complete(ap, qc)))
  494. goto DRVSTILLBUSY;
  495. }
  496. handled = 1;
  497. goto DONE;
  498. }
  499. /*
  500. * This is a NCQ command. At this point we need to figure out for which
  501. * tags we have gotten a completion interrupt. One interrupt may serve
  502. * as completion for more than one operation when commands are queued
  503. * (NCQ). We need to process each completed command.
  504. */
  505. /* process completed commands */
  506. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  507. tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
  508. if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
  509. dev_dbg(ap->dev,
  510. "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  511. __func__, sactive, hsdev->sactive_issued, tag_mask);
  512. }
  513. if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
  514. dev_warn(ap->dev,
  515. "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
  516. sactive, hsdev->sactive_issued, tag_mask);
  517. }
  518. /* read just to clear ... not bad if currently still busy */
  519. status = ap->ops->sff_check_status(ap);
  520. dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
  521. tag = 0;
  522. num_processed = 0;
  523. while (tag_mask) {
  524. num_processed++;
  525. while (!(tag_mask & 0x00000001)) {
  526. tag++;
  527. tag_mask <<= 1;
  528. }
  529. tag_mask &= (~0x00000001);
  530. qc = ata_qc_from_tag(ap, tag);
  531. if (unlikely(!qc)) {
  532. dev_err(ap->dev, "failed to get qc");
  533. handled = 1;
  534. goto DONE;
  535. }
  536. /* To be picked up by completion functions */
  537. qc->ap->link.active_tag = tag;
  538. hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
  539. /* Let libata/scsi layers handle error */
  540. if (status & ATA_ERR) {
  541. dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
  542. status);
  543. sata_dwc_qc_complete(ap, qc);
  544. handled = 1;
  545. goto DONE;
  546. }
  547. /* Process completed command */
  548. dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
  549. get_prot_descript(qc->tf.protocol));
  550. if (ata_is_dma(qc->tf.protocol)) {
  551. hsdevp->dma_interrupt_count++;
  552. if (hsdevp->dma_pending[tag] == \
  553. SATA_DWC_DMA_PENDING_NONE)
  554. dev_warn(ap->dev, "%s: DMA not pending?\n",
  555. __func__);
  556. if ((hsdevp->dma_interrupt_count % 2) == 0)
  557. sata_dwc_dma_xfer_complete(ap);
  558. } else {
  559. if (unlikely(sata_dwc_qc_complete(ap, qc)))
  560. goto STILLBUSY;
  561. }
  562. continue;
  563. STILLBUSY:
  564. ap->stats.idle_irq++;
  565. dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
  566. ap->print_id);
  567. } /* while tag_mask */
  568. /*
  569. * Check to see if any commands completed while we were processing our
  570. * initial set of completed commands (read status clears interrupts,
  571. * so we might miss a completed command interrupt if one came in while
  572. * we were processing --we read status as part of processing a completed
  573. * command).
  574. */
  575. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
  576. if (sactive2 != sactive) {
  577. dev_dbg(ap->dev,
  578. "More completed - sactive=0x%x sactive2=0x%x\n",
  579. sactive, sactive2);
  580. }
  581. handled = 1;
  582. DONE:
  583. spin_unlock_irqrestore(&host->lock, flags);
  584. return IRQ_RETVAL(handled);
  585. }
  586. static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
  587. {
  588. struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
  589. u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
  590. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
  591. dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
  592. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  593. } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
  594. dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
  595. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
  596. } else {
  597. /*
  598. * This should not happen, it indicates the driver is out of
  599. * sync. If it does happen, clear dmacr anyway.
  600. */
  601. dev_err(hsdev->dev,
  602. "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
  603. __func__, tag, hsdevp->dma_pending[tag], dmacr);
  604. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  605. SATA_DWC_DMACR_TXRXCH_CLEAR);
  606. }
  607. }
  608. static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
  609. {
  610. struct ata_queued_cmd *qc;
  611. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  612. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  613. u8 tag = 0;
  614. tag = ap->link.active_tag;
  615. qc = ata_qc_from_tag(ap, tag);
  616. if (!qc) {
  617. dev_err(ap->dev, "failed to get qc");
  618. return;
  619. }
  620. if (ata_is_dma(qc->tf.protocol)) {
  621. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
  622. dev_err(ap->dev,
  623. "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
  624. __func__,
  625. sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
  626. }
  627. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
  628. sata_dwc_qc_complete(ap, qc);
  629. ap->link.active_tag = ATA_TAG_POISON;
  630. } else {
  631. sata_dwc_qc_complete(ap, qc);
  632. }
  633. }
  634. static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
  635. {
  636. u8 status = 0;
  637. u32 mask = 0x0;
  638. u8 tag = qc->hw_tag;
  639. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  640. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  641. hsdev->sactive_queued = 0;
  642. if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
  643. dev_err(ap->dev, "TX DMA PENDING\n");
  644. else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
  645. dev_err(ap->dev, "RX DMA PENDING\n");
  646. dev_dbg(ap->dev,
  647. "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
  648. qc->tf.command, status, ap->print_id, qc->tf.protocol);
  649. /* clear active bit */
  650. mask = (~(qcmd_tag_to_mask(tag)));
  651. hsdev->sactive_queued = hsdev->sactive_queued & mask;
  652. hsdev->sactive_issued = hsdev->sactive_issued & mask;
  653. ata_qc_complete(qc);
  654. return 0;
  655. }
  656. static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
  657. {
  658. /* Enable selective interrupts by setting the interrupt maskregister*/
  659. sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
  660. SATA_DWC_INTMR_ERRM |
  661. SATA_DWC_INTMR_NEWFPM |
  662. SATA_DWC_INTMR_PMABRTM |
  663. SATA_DWC_INTMR_DMATM);
  664. /*
  665. * Unmask the error bits that should trigger an error interrupt by
  666. * setting the error mask register.
  667. */
  668. sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
  669. dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
  670. __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
  671. sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
  672. }
  673. static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
  674. {
  675. port->cmd_addr = base + 0x00;
  676. port->data_addr = base + 0x00;
  677. port->error_addr = base + 0x04;
  678. port->feature_addr = base + 0x04;
  679. port->nsect_addr = base + 0x08;
  680. port->lbal_addr = base + 0x0c;
  681. port->lbam_addr = base + 0x10;
  682. port->lbah_addr = base + 0x14;
  683. port->device_addr = base + 0x18;
  684. port->command_addr = base + 0x1c;
  685. port->status_addr = base + 0x1c;
  686. port->altstatus_addr = base + 0x20;
  687. port->ctl_addr = base + 0x20;
  688. }
  689. static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
  690. {
  691. struct sata_dwc_device *hsdev = hsdevp->hsdev;
  692. struct device *dev = hsdev->dev;
  693. #ifdef CONFIG_SATA_DWC_OLD_DMA
  694. if (!of_find_property(dev->of_node, "dmas", NULL))
  695. return sata_dwc_dma_get_channel_old(hsdevp);
  696. #endif
  697. hsdevp->chan = dma_request_chan(dev, "sata-dma");
  698. if (IS_ERR(hsdevp->chan)) {
  699. dev_err(dev, "failed to allocate dma channel: %ld\n",
  700. PTR_ERR(hsdevp->chan));
  701. return PTR_ERR(hsdevp->chan);
  702. }
  703. return 0;
  704. }
  705. /*
  706. * Function : sata_dwc_port_start
  707. * arguments : struct ata_ioports *port
  708. * Return value : returns 0 if success, error code otherwise
  709. * This function allocates the scatter gather LLI table for AHB DMA
  710. */
  711. static int sata_dwc_port_start(struct ata_port *ap)
  712. {
  713. int err = 0;
  714. struct sata_dwc_device *hsdev;
  715. struct sata_dwc_device_port *hsdevp = NULL;
  716. struct device *pdev;
  717. int i;
  718. hsdev = HSDEV_FROM_AP(ap);
  719. dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
  720. hsdev->host = ap->host;
  721. pdev = ap->host->dev;
  722. if (!pdev) {
  723. dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
  724. err = -ENODEV;
  725. goto CLEANUP;
  726. }
  727. /* Allocate Port Struct */
  728. hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
  729. if (!hsdevp) {
  730. err = -ENOMEM;
  731. goto CLEANUP;
  732. }
  733. hsdevp->hsdev = hsdev;
  734. err = sata_dwc_dma_get_channel(hsdevp);
  735. if (err)
  736. goto CLEANUP_ALLOC;
  737. err = phy_power_on(hsdev->phy);
  738. if (err)
  739. goto CLEANUP_ALLOC;
  740. for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
  741. hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
  742. ap->bmdma_prd = NULL; /* set these so libata doesn't use them */
  743. ap->bmdma_prd_dma = 0;
  744. if (ap->port_no == 0) {
  745. dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
  746. __func__);
  747. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  748. SATA_DWC_DMACR_TXRXCH_CLEAR);
  749. dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
  750. __func__);
  751. sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  752. (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  753. SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
  754. }
  755. /* Clear any error bits before libata starts issuing commands */
  756. clear_serror(ap);
  757. ap->private_data = hsdevp;
  758. dev_dbg(ap->dev, "%s: done\n", __func__);
  759. return 0;
  760. CLEANUP_ALLOC:
  761. kfree(hsdevp);
  762. CLEANUP:
  763. dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
  764. return err;
  765. }
  766. static void sata_dwc_port_stop(struct ata_port *ap)
  767. {
  768. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  769. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
  770. dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
  771. dmaengine_terminate_sync(hsdevp->chan);
  772. dma_release_channel(hsdevp->chan);
  773. phy_power_off(hsdev->phy);
  774. kfree(hsdevp);
  775. ap->private_data = NULL;
  776. }
  777. /*
  778. * Function : sata_dwc_exec_command_by_tag
  779. * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
  780. * Return value : None
  781. * This function keeps track of individual command tag ids and calls
  782. * ata_exec_command in libata
  783. */
  784. static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
  785. struct ata_taskfile *tf,
  786. u8 tag, u32 cmd_issued)
  787. {
  788. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  789. hsdevp->cmd_issued[tag] = cmd_issued;
  790. /*
  791. * Clear SError before executing a new command.
  792. * sata_dwc_scr_write and read can not be used here. Clearing the PM
  793. * managed SError register for the disk needs to be done before the
  794. * task file is loaded.
  795. */
  796. clear_serror(ap);
  797. ata_sff_exec_command(ap, tf);
  798. }
  799. static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
  800. {
  801. sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
  802. SATA_DWC_CMD_ISSUED_PEND);
  803. }
  804. static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
  805. {
  806. u8 tag = qc->hw_tag;
  807. if (!ata_is_ncq(qc->tf.protocol))
  808. tag = 0;
  809. sata_dwc_bmdma_setup_by_tag(qc, tag);
  810. }
  811. static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
  812. {
  813. int start_dma;
  814. u32 reg;
  815. struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
  816. struct ata_port *ap = qc->ap;
  817. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  818. struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
  819. int dir = qc->dma_dir;
  820. if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
  821. start_dma = 1;
  822. if (dir == DMA_TO_DEVICE)
  823. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
  824. else
  825. hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
  826. } else {
  827. dev_err(ap->dev,
  828. "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
  829. __func__, hsdevp->cmd_issued[tag], tag);
  830. start_dma = 0;
  831. }
  832. if (start_dma) {
  833. sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
  834. if (reg & SATA_DWC_SERROR_ERR_BITS) {
  835. dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
  836. __func__, reg);
  837. }
  838. if (dir == DMA_TO_DEVICE)
  839. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  840. SATA_DWC_DMACR_TXCHEN);
  841. else
  842. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  843. SATA_DWC_DMACR_RXCHEN);
  844. /* Enable AHB DMA transfer on the specified channel */
  845. dmaengine_submit(desc);
  846. dma_async_issue_pending(hsdevp->chan);
  847. }
  848. }
  849. static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
  850. {
  851. u8 tag = qc->hw_tag;
  852. if (!ata_is_ncq(qc->tf.protocol))
  853. tag = 0;
  854. sata_dwc_bmdma_start_by_tag(qc, tag);
  855. }
  856. static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
  857. {
  858. u32 sactive;
  859. u8 tag = qc->hw_tag;
  860. struct ata_port *ap = qc->ap;
  861. struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
  862. if (!ata_is_ncq(qc->tf.protocol))
  863. tag = 0;
  864. if (ata_is_dma(qc->tf.protocol)) {
  865. hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
  866. if (!hsdevp->desc[tag])
  867. return AC_ERR_SYSTEM;
  868. } else {
  869. hsdevp->desc[tag] = NULL;
  870. }
  871. if (ata_is_ncq(qc->tf.protocol)) {
  872. sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
  873. sactive |= (0x00000001 << tag);
  874. sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
  875. trace_ata_tf_load(ap, &qc->tf);
  876. ap->ops->sff_tf_load(ap, &qc->tf);
  877. trace_ata_exec_command(ap, &qc->tf, tag);
  878. sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
  879. SATA_DWC_CMD_ISSUED_PEND);
  880. } else {
  881. return ata_bmdma_qc_issue(qc);
  882. }
  883. return 0;
  884. }
  885. static void sata_dwc_error_handler(struct ata_port *ap)
  886. {
  887. ata_sff_error_handler(ap);
  888. }
  889. static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
  890. unsigned long deadline)
  891. {
  892. struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
  893. int ret;
  894. ret = sata_sff_hardreset(link, class, deadline);
  895. sata_dwc_enable_interrupts(hsdev);
  896. /* Reconfigure the DMA control register */
  897. sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
  898. SATA_DWC_DMACR_TXRXCH_CLEAR);
  899. /* Reconfigure the DMA Burst Transaction Size register */
  900. sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
  901. SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
  902. SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
  903. return ret;
  904. }
  905. static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
  906. {
  907. /* SATA DWC is master only */
  908. }
  909. /*
  910. * scsi mid-layer and libata interface structures
  911. */
  912. static struct scsi_host_template sata_dwc_sht = {
  913. ATA_NCQ_SHT(DRV_NAME),
  914. /*
  915. * test-only: Currently this driver doesn't handle NCQ
  916. * correctly. We enable NCQ but set the queue depth to a
  917. * max of 1. This will get fixed in in a future release.
  918. */
  919. .sg_tablesize = LIBATA_MAX_PRD,
  920. /* .can_queue = ATA_MAX_QUEUE, */
  921. /*
  922. * Make sure a LLI block is not created that will span 8K max FIS
  923. * boundary. If the block spans such a FIS boundary, there is a chance
  924. * that a DMA burst will cross that boundary -- this results in an
  925. * error in the host controller.
  926. */
  927. .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
  928. };
  929. static struct ata_port_operations sata_dwc_ops = {
  930. .inherits = &ata_sff_port_ops,
  931. .error_handler = sata_dwc_error_handler,
  932. .hardreset = sata_dwc_hardreset,
  933. .qc_issue = sata_dwc_qc_issue,
  934. .scr_read = sata_dwc_scr_read,
  935. .scr_write = sata_dwc_scr_write,
  936. .port_start = sata_dwc_port_start,
  937. .port_stop = sata_dwc_port_stop,
  938. .sff_dev_select = sata_dwc_dev_select,
  939. .bmdma_setup = sata_dwc_bmdma_setup,
  940. .bmdma_start = sata_dwc_bmdma_start,
  941. };
  942. static const struct ata_port_info sata_dwc_port_info[] = {
  943. {
  944. .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
  945. .pio_mask = ATA_PIO4,
  946. .udma_mask = ATA_UDMA6,
  947. .port_ops = &sata_dwc_ops,
  948. },
  949. };
  950. static int sata_dwc_probe(struct platform_device *ofdev)
  951. {
  952. struct device *dev = &ofdev->dev;
  953. struct device_node *np = dev->of_node;
  954. struct sata_dwc_device *hsdev;
  955. u32 idr, versionr;
  956. char *ver = (char *)&versionr;
  957. void __iomem *base;
  958. int err = 0;
  959. int irq;
  960. struct ata_host *host;
  961. struct ata_port_info pi = sata_dwc_port_info[0];
  962. const struct ata_port_info *ppi[] = { &pi, NULL };
  963. struct resource *res;
  964. /* Allocate DWC SATA device */
  965. host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
  966. hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
  967. if (!host || !hsdev)
  968. return -ENOMEM;
  969. host->private_data = hsdev;
  970. /* Ioremap SATA registers */
  971. base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
  972. if (IS_ERR(base))
  973. return PTR_ERR(base);
  974. dev_dbg(dev, "ioremap done for SATA register address\n");
  975. /* Synopsys DWC SATA specific Registers */
  976. hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
  977. hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
  978. /* Setup port */
  979. host->ports[0]->ioaddr.cmd_addr = base;
  980. host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
  981. sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
  982. /* Read the ID and Version Registers */
  983. idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
  984. versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
  985. dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
  986. /* Save dev for later use in dev_xxx() routines */
  987. hsdev->dev = dev;
  988. /* Enable SATA Interrupts */
  989. sata_dwc_enable_interrupts(hsdev);
  990. /* Get SATA interrupt number */
  991. irq = irq_of_parse_and_map(np, 0);
  992. if (irq == NO_IRQ) {
  993. dev_err(dev, "no SATA DMA irq\n");
  994. return -ENODEV;
  995. }
  996. #ifdef CONFIG_SATA_DWC_OLD_DMA
  997. if (!of_find_property(np, "dmas", NULL)) {
  998. err = sata_dwc_dma_init_old(ofdev, hsdev);
  999. if (err)
  1000. return err;
  1001. }
  1002. #endif
  1003. hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
  1004. if (IS_ERR(hsdev->phy))
  1005. return PTR_ERR(hsdev->phy);
  1006. err = phy_init(hsdev->phy);
  1007. if (err)
  1008. goto error_out;
  1009. /*
  1010. * Now, register with libATA core, this will also initiate the
  1011. * device discovery process, invoking our port_start() handler &
  1012. * error_handler() to execute a dummy Softreset EH session
  1013. */
  1014. err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
  1015. if (err)
  1016. dev_err(dev, "failed to activate host");
  1017. return 0;
  1018. error_out:
  1019. phy_exit(hsdev->phy);
  1020. return err;
  1021. }
  1022. static int sata_dwc_remove(struct platform_device *ofdev)
  1023. {
  1024. struct device *dev = &ofdev->dev;
  1025. struct ata_host *host = dev_get_drvdata(dev);
  1026. struct sata_dwc_device *hsdev = host->private_data;
  1027. ata_host_detach(host);
  1028. phy_exit(hsdev->phy);
  1029. #ifdef CONFIG_SATA_DWC_OLD_DMA
  1030. /* Free SATA DMA resources */
  1031. sata_dwc_dma_exit_old(hsdev);
  1032. #endif
  1033. dev_dbg(dev, "done\n");
  1034. return 0;
  1035. }
  1036. static const struct of_device_id sata_dwc_match[] = {
  1037. { .compatible = "amcc,sata-460ex", },
  1038. {}
  1039. };
  1040. MODULE_DEVICE_TABLE(of, sata_dwc_match);
  1041. static struct platform_driver sata_dwc_driver = {
  1042. .driver = {
  1043. .name = DRV_NAME,
  1044. .of_match_table = sata_dwc_match,
  1045. },
  1046. .probe = sata_dwc_probe,
  1047. .remove = sata_dwc_remove,
  1048. };
  1049. module_platform_driver(sata_dwc_driver);
  1050. MODULE_LICENSE("GPL");
  1051. MODULE_AUTHOR("Mark Miesfeld <[email protected]>");
  1052. MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
  1053. MODULE_VERSION(DRV_VERSION);