fsl_rmu.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Freescale MPC85xx/MPC86xx RapidIO RMU support
  4. *
  5. * Copyright 2009 Sysgo AG
  6. * Thomas Moll <[email protected]>
  7. * - fixed maintenance access routines, check for aligned access
  8. *
  9. * Copyright 2009 Integrated Device Technology, Inc.
  10. * Alex Bounine <[email protected]>
  11. * - Added Port-Write message handling
  12. * - Added Machine Check exception handling
  13. *
  14. * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
  15. * Zhang Wei <[email protected]>
  16. * Lian Minghuan-B31939 <[email protected]>
  17. * Liu Gang <[email protected]>
  18. *
  19. * Copyright 2005 MontaVista Software, Inc.
  20. * Matt Porter <[email protected]>
  21. */
  22. #include <linux/types.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/of_platform.h>
  27. #include <linux/slab.h>
  28. #include "fsl_rio.h"
  29. #define GET_RMM_HANDLE(mport) \
  30. (((struct rio_priv *)(mport->priv))->rmm_handle)
  31. /* RapidIO definition irq, which read from OF-tree */
  32. #define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
  33. #define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
  34. #define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
  35. #define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
  36. #define RIO_MIN_TX_RING_SIZE 2
  37. #define RIO_MAX_TX_RING_SIZE 2048
  38. #define RIO_MIN_RX_RING_SIZE 2
  39. #define RIO_MAX_RX_RING_SIZE 2048
  40. #define RIO_IPWMR_SEN 0x00100000
  41. #define RIO_IPWMR_QFIE 0x00000100
  42. #define RIO_IPWMR_EIE 0x00000020
  43. #define RIO_IPWMR_CQ 0x00000002
  44. #define RIO_IPWMR_PWE 0x00000001
  45. #define RIO_IPWSR_QF 0x00100000
  46. #define RIO_IPWSR_TE 0x00000080
  47. #define RIO_IPWSR_QFI 0x00000010
  48. #define RIO_IPWSR_PWD 0x00000008
  49. #define RIO_IPWSR_PWB 0x00000004
  50. #define RIO_EPWISR 0x10010
  51. /* EPWISR Error match value */
  52. #define RIO_EPWISR_PINT1 0x80000000
  53. #define RIO_EPWISR_PINT2 0x40000000
  54. #define RIO_EPWISR_MU 0x00000002
  55. #define RIO_EPWISR_PW 0x00000001
  56. #define IPWSR_CLEAR 0x98
  57. #define OMSR_CLEAR 0x1cb3
  58. #define IMSR_CLEAR 0x491
  59. #define IDSR_CLEAR 0x91
  60. #define ODSR_CLEAR 0x1c00
  61. #define LTLEECSR_ENABLE_ALL 0xFFC000FC
  62. #define RIO_LTLEECSR 0x060c
  63. #define RIO_IM0SR 0x64
  64. #define RIO_IM1SR 0x164
  65. #define RIO_OM0SR 0x4
  66. #define RIO_OM1SR 0x104
  67. #define RIO_DBELL_WIN_SIZE 0x1000
  68. #define RIO_MSG_OMR_MUI 0x00000002
  69. #define RIO_MSG_OSR_TE 0x00000080
  70. #define RIO_MSG_OSR_QOI 0x00000020
  71. #define RIO_MSG_OSR_QFI 0x00000010
  72. #define RIO_MSG_OSR_MUB 0x00000004
  73. #define RIO_MSG_OSR_EOMI 0x00000002
  74. #define RIO_MSG_OSR_QEI 0x00000001
  75. #define RIO_MSG_IMR_MI 0x00000002
  76. #define RIO_MSG_ISR_TE 0x00000080
  77. #define RIO_MSG_ISR_QFI 0x00000010
  78. #define RIO_MSG_ISR_DIQI 0x00000001
  79. #define RIO_MSG_DESC_SIZE 32
  80. #define RIO_MSG_BUFFER_SIZE 4096
  81. #define DOORBELL_DMR_DI 0x00000002
  82. #define DOORBELL_DSR_TE 0x00000080
  83. #define DOORBELL_DSR_QFI 0x00000010
  84. #define DOORBELL_DSR_DIQI 0x00000001
  85. #define DOORBELL_MESSAGE_SIZE 0x08
  86. static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
  87. struct rio_msg_regs {
  88. u32 omr;
  89. u32 osr;
  90. u32 pad1;
  91. u32 odqdpar;
  92. u32 pad2;
  93. u32 osar;
  94. u32 odpr;
  95. u32 odatr;
  96. u32 odcr;
  97. u32 pad3;
  98. u32 odqepar;
  99. u32 pad4[13];
  100. u32 imr;
  101. u32 isr;
  102. u32 pad5;
  103. u32 ifqdpar;
  104. u32 pad6;
  105. u32 ifqepar;
  106. };
  107. struct rio_dbell_regs {
  108. u32 odmr;
  109. u32 odsr;
  110. u32 pad1[4];
  111. u32 oddpr;
  112. u32 oddatr;
  113. u32 pad2[3];
  114. u32 odretcr;
  115. u32 pad3[12];
  116. u32 dmr;
  117. u32 dsr;
  118. u32 pad4;
  119. u32 dqdpar;
  120. u32 pad5;
  121. u32 dqepar;
  122. };
  123. struct rio_pw_regs {
  124. u32 pwmr;
  125. u32 pwsr;
  126. u32 epwqbar;
  127. u32 pwqbar;
  128. };
  129. struct rio_tx_desc {
  130. u32 pad1;
  131. u32 saddr;
  132. u32 dport;
  133. u32 dattr;
  134. u32 pad2;
  135. u32 pad3;
  136. u32 dwcnt;
  137. u32 pad4;
  138. };
  139. struct rio_msg_tx_ring {
  140. void *virt;
  141. dma_addr_t phys;
  142. void *virt_buffer[RIO_MAX_TX_RING_SIZE];
  143. dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
  144. int tx_slot;
  145. int size;
  146. void *dev_id;
  147. };
  148. struct rio_msg_rx_ring {
  149. void *virt;
  150. dma_addr_t phys;
  151. void *virt_buffer[RIO_MAX_RX_RING_SIZE];
  152. int rx_slot;
  153. int size;
  154. void *dev_id;
  155. };
  156. struct fsl_rmu {
  157. struct rio_msg_regs __iomem *msg_regs;
  158. struct rio_msg_tx_ring msg_tx_ring;
  159. struct rio_msg_rx_ring msg_rx_ring;
  160. int txirq;
  161. int rxirq;
  162. };
  163. struct rio_dbell_msg {
  164. u16 pad1;
  165. u16 tid;
  166. u16 sid;
  167. u16 info;
  168. };
  169. /**
  170. * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
  171. * @irq: Linux interrupt number
  172. * @dev_instance: Pointer to interrupt-specific data
  173. *
  174. * Handles outbound message interrupts. Executes a register outbound
  175. * mailbox event handler and acks the interrupt occurrence.
  176. */
  177. static irqreturn_t
  178. fsl_rio_tx_handler(int irq, void *dev_instance)
  179. {
  180. int osr;
  181. struct rio_mport *port = (struct rio_mport *)dev_instance;
  182. struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
  183. osr = in_be32(&rmu->msg_regs->osr);
  184. if (osr & RIO_MSG_OSR_TE) {
  185. pr_info("RIO: outbound message transmission error\n");
  186. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
  187. goto out;
  188. }
  189. if (osr & RIO_MSG_OSR_QOI) {
  190. pr_info("RIO: outbound message queue overflow\n");
  191. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
  192. goto out;
  193. }
  194. if (osr & RIO_MSG_OSR_EOMI) {
  195. u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
  196. int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
  197. if (port->outb_msg[0].mcback != NULL) {
  198. port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
  199. -1,
  200. slot);
  201. }
  202. /* Ack the end-of-message interrupt */
  203. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
  204. }
  205. out:
  206. return IRQ_HANDLED;
  207. }
  208. /**
  209. * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
  210. * @irq: Linux interrupt number
  211. * @dev_instance: Pointer to interrupt-specific data
  212. *
  213. * Handles inbound message interrupts. Executes a registered inbound
  214. * mailbox event handler and acks the interrupt occurrence.
  215. */
  216. static irqreturn_t
  217. fsl_rio_rx_handler(int irq, void *dev_instance)
  218. {
  219. int isr;
  220. struct rio_mport *port = (struct rio_mport *)dev_instance;
  221. struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
  222. isr = in_be32(&rmu->msg_regs->isr);
  223. if (isr & RIO_MSG_ISR_TE) {
  224. pr_info("RIO: inbound message reception error\n");
  225. out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
  226. goto out;
  227. }
  228. /* XXX Need to check/dispatch until queue empty */
  229. if (isr & RIO_MSG_ISR_DIQI) {
  230. /*
  231. * Can receive messages for any mailbox/letter to that
  232. * mailbox destination. So, make the callback with an
  233. * unknown/invalid mailbox number argument.
  234. */
  235. if (port->inb_msg[0].mcback != NULL)
  236. port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
  237. -1,
  238. -1);
  239. /* Ack the queueing interrupt */
  240. out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
  241. }
  242. out:
  243. return IRQ_HANDLED;
  244. }
  245. /**
  246. * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
  247. * @irq: Linux interrupt number
  248. * @dev_instance: Pointer to interrupt-specific data
  249. *
  250. * Handles doorbell interrupts. Parses a list of registered
  251. * doorbell event handlers and executes a matching event handler.
  252. */
  253. static irqreturn_t
  254. fsl_rio_dbell_handler(int irq, void *dev_instance)
  255. {
  256. int dsr;
  257. struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
  258. int i;
  259. dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
  260. if (dsr & DOORBELL_DSR_TE) {
  261. pr_info("RIO: doorbell reception error\n");
  262. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
  263. goto out;
  264. }
  265. if (dsr & DOORBELL_DSR_QFI) {
  266. pr_info("RIO: doorbell queue full\n");
  267. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
  268. }
  269. /* XXX Need to check/dispatch until queue empty */
  270. if (dsr & DOORBELL_DSR_DIQI) {
  271. struct rio_dbell_msg *dmsg =
  272. fsl_dbell->dbell_ring.virt +
  273. (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
  274. struct rio_dbell *dbell;
  275. int found = 0;
  276. pr_debug
  277. ("RIO: processing doorbell,"
  278. " sid %2.2x tid %2.2x info %4.4x\n",
  279. dmsg->sid, dmsg->tid, dmsg->info);
  280. for (i = 0; i < MAX_PORT_NUM; i++) {
  281. if (fsl_dbell->mport[i]) {
  282. list_for_each_entry(dbell,
  283. &fsl_dbell->mport[i]->dbells, node) {
  284. if ((dbell->res->start
  285. <= dmsg->info)
  286. && (dbell->res->end
  287. >= dmsg->info)) {
  288. found = 1;
  289. break;
  290. }
  291. }
  292. if (found && dbell->dinb) {
  293. dbell->dinb(fsl_dbell->mport[i],
  294. dbell->dev_id, dmsg->sid,
  295. dmsg->tid,
  296. dmsg->info);
  297. break;
  298. }
  299. }
  300. }
  301. if (!found) {
  302. pr_debug
  303. ("RIO: spurious doorbell,"
  304. " sid %2.2x tid %2.2x info %4.4x\n",
  305. dmsg->sid, dmsg->tid,
  306. dmsg->info);
  307. }
  308. setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
  309. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
  310. }
  311. out:
  312. return IRQ_HANDLED;
  313. }
  314. void msg_unit_error_handler(void)
  315. {
  316. /*XXX: Error recovery is not implemented, we just clear errors */
  317. out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
  318. out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
  319. out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
  320. out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
  321. out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
  322. out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
  323. out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
  324. out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
  325. }
  326. /**
  327. * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
  328. * @irq: Linux interrupt number
  329. * @dev_instance: Pointer to interrupt-specific data
  330. *
  331. * Handles port write interrupts. Parses a list of registered
  332. * port write event handlers and executes a matching event handler.
  333. */
  334. static irqreturn_t
  335. fsl_rio_port_write_handler(int irq, void *dev_instance)
  336. {
  337. u32 ipwmr, ipwsr;
  338. struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
  339. u32 epwisr, tmp;
  340. epwisr = in_be32(rio_regs_win + RIO_EPWISR);
  341. if (!(epwisr & RIO_EPWISR_PW))
  342. goto pw_done;
  343. ipwmr = in_be32(&pw->pw_regs->pwmr);
  344. ipwsr = in_be32(&pw->pw_regs->pwsr);
  345. #ifdef DEBUG_PW
  346. pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
  347. if (ipwsr & RIO_IPWSR_QF)
  348. pr_debug(" QF");
  349. if (ipwsr & RIO_IPWSR_TE)
  350. pr_debug(" TE");
  351. if (ipwsr & RIO_IPWSR_QFI)
  352. pr_debug(" QFI");
  353. if (ipwsr & RIO_IPWSR_PWD)
  354. pr_debug(" PWD");
  355. if (ipwsr & RIO_IPWSR_PWB)
  356. pr_debug(" PWB");
  357. pr_debug(" )\n");
  358. #endif
  359. /* Schedule deferred processing if PW was received */
  360. if (ipwsr & RIO_IPWSR_QFI) {
  361. /* Save PW message (if there is room in FIFO),
  362. * otherwise discard it.
  363. */
  364. if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
  365. pw->port_write_msg.msg_count++;
  366. kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
  367. RIO_PW_MSG_SIZE);
  368. } else {
  369. pw->port_write_msg.discard_count++;
  370. pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
  371. pw->port_write_msg.discard_count);
  372. }
  373. /* Clear interrupt and issue Clear Queue command. This allows
  374. * another port-write to be received.
  375. */
  376. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
  377. out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
  378. schedule_work(&pw->pw_work);
  379. }
  380. if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
  381. pw->port_write_msg.err_count++;
  382. pr_debug("RIO: Port-Write Transaction Err (%d)\n",
  383. pw->port_write_msg.err_count);
  384. /* Clear Transaction Error: port-write controller should be
  385. * disabled when clearing this error
  386. */
  387. out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
  388. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
  389. out_be32(&pw->pw_regs->pwmr, ipwmr);
  390. }
  391. if (ipwsr & RIO_IPWSR_PWD) {
  392. pw->port_write_msg.discard_count++;
  393. pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
  394. pw->port_write_msg.discard_count);
  395. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
  396. }
  397. pw_done:
  398. if (epwisr & RIO_EPWISR_PINT1) {
  399. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  400. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  401. fsl_rio_port_error_handler(0);
  402. }
  403. if (epwisr & RIO_EPWISR_PINT2) {
  404. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  405. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  406. fsl_rio_port_error_handler(1);
  407. }
  408. if (epwisr & RIO_EPWISR_MU) {
  409. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  410. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  411. msg_unit_error_handler();
  412. }
  413. return IRQ_HANDLED;
  414. }
  415. static void fsl_pw_dpc(struct work_struct *work)
  416. {
  417. struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
  418. union rio_pw_msg msg_buffer;
  419. int i;
  420. /*
  421. * Process port-write messages
  422. */
  423. while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
  424. RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
  425. #ifdef DEBUG_PW
  426. {
  427. u32 i;
  428. pr_debug("%s : Port-Write Message:", __func__);
  429. for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
  430. if ((i%4) == 0)
  431. pr_debug("\n0x%02x: 0x%08x", i*4,
  432. msg_buffer.raw[i]);
  433. else
  434. pr_debug(" 0x%08x", msg_buffer.raw[i]);
  435. }
  436. pr_debug("\n");
  437. }
  438. #endif
  439. /* Pass the port-write message to RIO core for processing */
  440. for (i = 0; i < MAX_PORT_NUM; i++) {
  441. if (pw->mport[i])
  442. rio_inb_pwrite_handler(pw->mport[i],
  443. &msg_buffer);
  444. }
  445. }
  446. }
  447. /**
  448. * fsl_rio_pw_enable - enable/disable port-write interface init
  449. * @mport: Master port implementing the port write unit
  450. * @enable: 1=enable; 0=disable port-write message handling
  451. */
  452. int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
  453. {
  454. u32 rval;
  455. rval = in_be32(&pw->pw_regs->pwmr);
  456. if (enable)
  457. rval |= RIO_IPWMR_PWE;
  458. else
  459. rval &= ~RIO_IPWMR_PWE;
  460. out_be32(&pw->pw_regs->pwmr, rval);
  461. return 0;
  462. }
  463. /**
  464. * fsl_rio_port_write_init - MPC85xx port write interface init
  465. * @mport: Master port implementing the port write unit
  466. *
  467. * Initializes port write unit hardware and DMA buffer
  468. * ring. Called from fsl_rio_setup(). Returns %0 on success
  469. * or %-ENOMEM on failure.
  470. */
  471. int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
  472. {
  473. int rc = 0;
  474. /* Following configurations require a disabled port write controller */
  475. out_be32(&pw->pw_regs->pwmr,
  476. in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
  477. /* Initialize port write */
  478. pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
  479. RIO_PW_MSG_SIZE,
  480. &pw->port_write_msg.phys, GFP_KERNEL);
  481. if (!pw->port_write_msg.virt) {
  482. pr_err("RIO: unable allocate port write queue\n");
  483. return -ENOMEM;
  484. }
  485. pw->port_write_msg.err_count = 0;
  486. pw->port_write_msg.discard_count = 0;
  487. /* Point dequeue/enqueue pointers at first entry */
  488. out_be32(&pw->pw_regs->epwqbar, 0);
  489. out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
  490. pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
  491. in_be32(&pw->pw_regs->epwqbar),
  492. in_be32(&pw->pw_regs->pwqbar));
  493. /* Clear interrupt status IPWSR */
  494. out_be32(&pw->pw_regs->pwsr,
  495. (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
  496. /* Configure port write controller for snooping enable all reporting,
  497. clear queue full */
  498. out_be32(&pw->pw_regs->pwmr,
  499. RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
  500. /* Hook up port-write handler */
  501. rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
  502. IRQF_SHARED, "port-write", (void *)pw);
  503. if (rc < 0) {
  504. pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
  505. goto err_out;
  506. }
  507. /* Enable Error Interrupt */
  508. out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
  509. INIT_WORK(&pw->pw_work, fsl_pw_dpc);
  510. spin_lock_init(&pw->pw_fifo_lock);
  511. if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
  512. pr_err("FIFO allocation failed\n");
  513. rc = -ENOMEM;
  514. goto err_out_irq;
  515. }
  516. pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
  517. in_be32(&pw->pw_regs->pwmr),
  518. in_be32(&pw->pw_regs->pwsr));
  519. return rc;
  520. err_out_irq:
  521. free_irq(IRQ_RIO_PW(pw), (void *)pw);
  522. err_out:
  523. dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
  524. pw->port_write_msg.virt,
  525. pw->port_write_msg.phys);
  526. return rc;
  527. }
  528. /**
  529. * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
  530. * @mport: RapidIO master port info
  531. * @index: ID of RapidIO interface
  532. * @destid: Destination ID of target device
  533. * @data: 16-bit info field of RapidIO doorbell message
  534. *
  535. * Sends a MPC85xx doorbell message. Returns %0 on success or
  536. * %-EINVAL on failure.
  537. */
  538. int fsl_rio_doorbell_send(struct rio_mport *mport,
  539. int index, u16 destid, u16 data)
  540. {
  541. unsigned long flags;
  542. pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
  543. index, destid, data);
  544. spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
  545. /* In the serial version silicons, such as MPC8548, MPC8641,
  546. * below operations is must be.
  547. */
  548. out_be32(&dbell->dbell_regs->odmr, 0x00000000);
  549. out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
  550. out_be32(&dbell->dbell_regs->oddpr, destid << 16);
  551. out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
  552. out_be32(&dbell->dbell_regs->odmr, 0x00000001);
  553. spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
  554. return 0;
  555. }
  556. /**
  557. * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
  558. * @mport: Master port with outbound message queue
  559. * @rdev: Target of outbound message
  560. * @mbox: Outbound mailbox
  561. * @buffer: Message to add to outbound queue
  562. * @len: Length of message
  563. *
  564. * Adds the @buffer message to the MPC85xx outbound message queue. Returns
  565. * %0 on success or %-EINVAL on failure.
  566. */
  567. int
  568. fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
  569. void *buffer, size_t len)
  570. {
  571. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  572. u32 omr;
  573. struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
  574. + rmu->msg_tx_ring.tx_slot;
  575. int ret = 0;
  576. pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
  577. "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
  578. if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
  579. ret = -EINVAL;
  580. goto out;
  581. }
  582. /* Copy and clear rest of buffer */
  583. memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
  584. len);
  585. if (len < (RIO_MAX_MSG_SIZE - 4))
  586. memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
  587. + len, 0, RIO_MAX_MSG_SIZE - len);
  588. /* Set mbox field for message, and set destid */
  589. desc->dport = (rdev->destid << 16) | (mbox & 0x3);
  590. /* Enable EOMI interrupt and priority */
  591. desc->dattr = 0x28000000 | ((mport->index) << 20);
  592. /* Set transfer size aligned to next power of 2 (in double words) */
  593. desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
  594. /* Set snooping and source buffer address */
  595. desc->saddr = 0x00000004
  596. | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
  597. /* Increment enqueue pointer */
  598. omr = in_be32(&rmu->msg_regs->omr);
  599. out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
  600. /* Go to next descriptor */
  601. if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
  602. rmu->msg_tx_ring.tx_slot = 0;
  603. out:
  604. return ret;
  605. }
  606. /**
  607. * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
  608. * @mport: Master port implementing the outbound message unit
  609. * @dev_id: Device specific pointer to pass on event
  610. * @mbox: Mailbox to open
  611. * @entries: Number of entries in the outbound mailbox ring
  612. *
  613. * Initializes buffer ring, request the outbound message interrupt,
  614. * and enables the outbound message unit. Returns %0 on success and
  615. * %-EINVAL or %-ENOMEM on failure.
  616. */
  617. int
  618. fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
  619. {
  620. int i, j, rc = 0;
  621. struct rio_priv *priv = mport->priv;
  622. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  623. if ((entries < RIO_MIN_TX_RING_SIZE) ||
  624. (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
  625. rc = -EINVAL;
  626. goto out;
  627. }
  628. /* Initialize shadow copy ring */
  629. rmu->msg_tx_ring.dev_id = dev_id;
  630. rmu->msg_tx_ring.size = entries;
  631. for (i = 0; i < rmu->msg_tx_ring.size; i++) {
  632. rmu->msg_tx_ring.virt_buffer[i] =
  633. dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
  634. &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
  635. if (!rmu->msg_tx_ring.virt_buffer[i]) {
  636. rc = -ENOMEM;
  637. for (j = 0; j < rmu->msg_tx_ring.size; j++)
  638. if (rmu->msg_tx_ring.virt_buffer[j])
  639. dma_free_coherent(priv->dev,
  640. RIO_MSG_BUFFER_SIZE,
  641. rmu->msg_tx_ring.
  642. virt_buffer[j],
  643. rmu->msg_tx_ring.
  644. phys_buffer[j]);
  645. goto out;
  646. }
  647. }
  648. /* Initialize outbound message descriptor ring */
  649. rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
  650. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  651. &rmu->msg_tx_ring.phys,
  652. GFP_KERNEL);
  653. if (!rmu->msg_tx_ring.virt) {
  654. rc = -ENOMEM;
  655. goto out_dma;
  656. }
  657. rmu->msg_tx_ring.tx_slot = 0;
  658. /* Point dequeue/enqueue pointers at first entry in ring */
  659. out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
  660. out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
  661. /* Configure for snooping */
  662. out_be32(&rmu->msg_regs->osar, 0x00000004);
  663. /* Clear interrupt status */
  664. out_be32(&rmu->msg_regs->osr, 0x000000b3);
  665. /* Hook up outbound message handler */
  666. rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
  667. "msg_tx", (void *)mport);
  668. if (rc < 0)
  669. goto out_irq;
  670. /*
  671. * Configure outbound message unit
  672. * Snooping
  673. * Interrupts (all enabled, except QEIE)
  674. * Chaining mode
  675. * Disable
  676. */
  677. out_be32(&rmu->msg_regs->omr, 0x00100220);
  678. /* Set number of entries */
  679. out_be32(&rmu->msg_regs->omr,
  680. in_be32(&rmu->msg_regs->omr) |
  681. ((get_bitmask_order(entries) - 2) << 12));
  682. /* Now enable the unit */
  683. out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
  684. out:
  685. return rc;
  686. out_irq:
  687. dma_free_coherent(priv->dev,
  688. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  689. rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
  690. out_dma:
  691. for (i = 0; i < rmu->msg_tx_ring.size; i++)
  692. dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
  693. rmu->msg_tx_ring.virt_buffer[i],
  694. rmu->msg_tx_ring.phys_buffer[i]);
  695. return rc;
  696. }
  697. /**
  698. * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
  699. * @mport: Master port implementing the outbound message unit
  700. * @mbox: Mailbox to close
  701. *
  702. * Disables the outbound message unit, free all buffers, and
  703. * frees the outbound message interrupt.
  704. */
  705. void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
  706. {
  707. struct rio_priv *priv = mport->priv;
  708. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  709. /* Disable inbound message unit */
  710. out_be32(&rmu->msg_regs->omr, 0);
  711. /* Free ring */
  712. dma_free_coherent(priv->dev,
  713. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  714. rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
  715. /* Free interrupt */
  716. free_irq(IRQ_RIO_TX(mport), (void *)mport);
  717. }
  718. /**
  719. * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
  720. * @mport: Master port implementing the inbound message unit
  721. * @dev_id: Device specific pointer to pass on event
  722. * @mbox: Mailbox to open
  723. * @entries: Number of entries in the inbound mailbox ring
  724. *
  725. * Initializes buffer ring, request the inbound message interrupt,
  726. * and enables the inbound message unit. Returns %0 on success
  727. * and %-EINVAL or %-ENOMEM on failure.
  728. */
  729. int
  730. fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
  731. {
  732. int i, rc = 0;
  733. struct rio_priv *priv = mport->priv;
  734. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  735. if ((entries < RIO_MIN_RX_RING_SIZE) ||
  736. (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
  737. rc = -EINVAL;
  738. goto out;
  739. }
  740. /* Initialize client buffer ring */
  741. rmu->msg_rx_ring.dev_id = dev_id;
  742. rmu->msg_rx_ring.size = entries;
  743. rmu->msg_rx_ring.rx_slot = 0;
  744. for (i = 0; i < rmu->msg_rx_ring.size; i++)
  745. rmu->msg_rx_ring.virt_buffer[i] = NULL;
  746. /* Initialize inbound message ring */
  747. rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
  748. rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  749. &rmu->msg_rx_ring.phys, GFP_KERNEL);
  750. if (!rmu->msg_rx_ring.virt) {
  751. rc = -ENOMEM;
  752. goto out;
  753. }
  754. /* Point dequeue/enqueue pointers at first entry in ring */
  755. out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
  756. out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
  757. /* Clear interrupt status */
  758. out_be32(&rmu->msg_regs->isr, 0x00000091);
  759. /* Hook up inbound message handler */
  760. rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
  761. "msg_rx", (void *)mport);
  762. if (rc < 0) {
  763. dma_free_coherent(priv->dev,
  764. rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  765. rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
  766. goto out;
  767. }
  768. /*
  769. * Configure inbound message unit:
  770. * Snooping
  771. * 4KB max message size
  772. * Unmask all interrupt sources
  773. * Disable
  774. */
  775. out_be32(&rmu->msg_regs->imr, 0x001b0060);
  776. /* Set number of queue entries */
  777. setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
  778. /* Now enable the unit */
  779. setbits32(&rmu->msg_regs->imr, 0x1);
  780. out:
  781. return rc;
  782. }
  783. /**
  784. * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
  785. * @mport: Master port implementing the inbound message unit
  786. * @mbox: Mailbox to close
  787. *
  788. * Disables the inbound message unit, free all buffers, and
  789. * frees the inbound message interrupt.
  790. */
  791. void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
  792. {
  793. struct rio_priv *priv = mport->priv;
  794. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  795. /* Disable inbound message unit */
  796. out_be32(&rmu->msg_regs->imr, 0);
  797. /* Free ring */
  798. dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  799. rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
  800. /* Free interrupt */
  801. free_irq(IRQ_RIO_RX(mport), (void *)mport);
  802. }
  803. /**
  804. * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
  805. * @mport: Master port implementing the inbound message unit
  806. * @mbox: Inbound mailbox number
  807. * @buf: Buffer to add to inbound queue
  808. *
  809. * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
  810. * %0 on success or %-EINVAL on failure.
  811. */
  812. int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
  813. {
  814. int rc = 0;
  815. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  816. pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
  817. rmu->msg_rx_ring.rx_slot);
  818. if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
  819. printk(KERN_ERR
  820. "RIO: error adding inbound buffer %d, buffer exists\n",
  821. rmu->msg_rx_ring.rx_slot);
  822. rc = -EINVAL;
  823. goto out;
  824. }
  825. rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
  826. if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
  827. rmu->msg_rx_ring.rx_slot = 0;
  828. out:
  829. return rc;
  830. }
  831. /**
  832. * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
  833. * @mport: Master port implementing the inbound message unit
  834. * @mbox: Inbound mailbox number
  835. *
  836. * Gets the next available inbound message from the inbound message queue.
  837. * A pointer to the message is returned on success or NULL on failure.
  838. */
  839. void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
  840. {
  841. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  842. u32 phys_buf;
  843. void *virt_buf;
  844. void *buf = NULL;
  845. int buf_idx;
  846. phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
  847. /* If no more messages, then bail out */
  848. if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
  849. goto out2;
  850. virt_buf = rmu->msg_rx_ring.virt + (phys_buf
  851. - rmu->msg_rx_ring.phys);
  852. buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
  853. buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
  854. if (!buf) {
  855. printk(KERN_ERR
  856. "RIO: inbound message copy failed, no buffers\n");
  857. goto out1;
  858. }
  859. /* Copy max message size, caller is expected to allocate that big */
  860. memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
  861. /* Clear the available buffer */
  862. rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
  863. out1:
  864. setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
  865. out2:
  866. return buf;
  867. }
  868. /**
  869. * fsl_rio_doorbell_init - MPC85xx doorbell interface init
  870. * @mport: Master port implementing the inbound doorbell unit
  871. *
  872. * Initializes doorbell unit hardware and inbound DMA buffer
  873. * ring. Called from fsl_rio_setup(). Returns %0 on success
  874. * or %-ENOMEM on failure.
  875. */
  876. int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
  877. {
  878. int rc = 0;
  879. /* Initialize inbound doorbells */
  880. dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
  881. DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
  882. if (!dbell->dbell_ring.virt) {
  883. printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
  884. rc = -ENOMEM;
  885. goto out;
  886. }
  887. /* Point dequeue/enqueue pointers at first entry in ring */
  888. out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
  889. out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
  890. /* Clear interrupt status */
  891. out_be32(&dbell->dbell_regs->dsr, 0x00000091);
  892. /* Hook up doorbell handler */
  893. rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
  894. "dbell_rx", (void *)dbell);
  895. if (rc < 0) {
  896. dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
  897. dbell->dbell_ring.virt, dbell->dbell_ring.phys);
  898. printk(KERN_ERR
  899. "MPC85xx RIO: unable to request inbound doorbell irq");
  900. goto out;
  901. }
  902. /* Configure doorbells for snooping, 512 entries, and enable */
  903. out_be32(&dbell->dbell_regs->dmr, 0x00108161);
  904. out:
  905. return rc;
  906. }
  907. int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
  908. {
  909. struct rio_priv *priv;
  910. struct fsl_rmu *rmu;
  911. u64 msg_start;
  912. const u32 *msg_addr;
  913. int mlen;
  914. int aw;
  915. if (!mport || !mport->priv)
  916. return -EINVAL;
  917. priv = mport->priv;
  918. if (!node) {
  919. dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
  920. priv->dev->of_node);
  921. return -EINVAL;
  922. }
  923. rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
  924. if (!rmu)
  925. return -ENOMEM;
  926. aw = of_n_addr_cells(node);
  927. msg_addr = of_get_property(node, "reg", &mlen);
  928. if (!msg_addr) {
  929. pr_err("%pOF: unable to find 'reg' property of message-unit\n",
  930. node);
  931. kfree(rmu);
  932. return -ENOMEM;
  933. }
  934. msg_start = of_read_number(msg_addr, aw);
  935. rmu->msg_regs = (struct rio_msg_regs *)
  936. (rmu_regs_win + (u32)msg_start);
  937. rmu->txirq = irq_of_parse_and_map(node, 0);
  938. rmu->rxirq = irq_of_parse_and_map(node, 1);
  939. printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
  940. node, rmu->txirq, rmu->rxirq);
  941. priv->rmm_handle = rmu;
  942. rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
  943. rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
  944. rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
  945. return 0;
  946. }