netjet.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * NETJet mISDN driver
  4. *
  5. * Author Karsten Keil <[email protected]>
  6. *
  7. * Copyright 2009 by Karsten Keil <[email protected]>
  8. */
  9. #include <linux/interrupt.h>
  10. #include <linux/module.h>
  11. #include <linux/pci.h>
  12. #include <linux/delay.h>
  13. #include <linux/mISDNhw.h>
  14. #include <linux/slab.h>
  15. #include "ipac.h"
  16. #include "iohelper.h"
  17. #include "netjet.h"
  18. #include "isdnhdlc.h"
  19. #define NETJET_REV "2.0"
  20. enum nj_types {
  21. NETJET_S_TJ300,
  22. NETJET_S_TJ320,
  23. ENTERNOW__TJ320,
  24. };
  25. struct tiger_dma {
  26. size_t size;
  27. u32 *start;
  28. int idx;
  29. u32 dmastart;
  30. u32 dmairq;
  31. u32 dmaend;
  32. u32 dmacur;
  33. };
  34. struct tiger_hw;
  35. struct tiger_ch {
  36. struct bchannel bch;
  37. struct tiger_hw *nj;
  38. int idx;
  39. int free;
  40. int lastrx;
  41. u16 rxstate;
  42. u16 txstate;
  43. struct isdnhdlc_vars hsend;
  44. struct isdnhdlc_vars hrecv;
  45. u8 *hsbuf;
  46. u8 *hrbuf;
  47. };
  48. #define TX_INIT 0x0001
  49. #define TX_IDLE 0x0002
  50. #define TX_RUN 0x0004
  51. #define TX_UNDERRUN 0x0100
  52. #define RX_OVERRUN 0x0100
  53. #define LOG_SIZE 64
  54. struct tiger_hw {
  55. struct list_head list;
  56. struct pci_dev *pdev;
  57. char name[MISDN_MAX_IDLEN];
  58. enum nj_types typ;
  59. int irq;
  60. u32 irqcnt;
  61. u32 base;
  62. size_t base_s;
  63. dma_addr_t dma;
  64. void *dma_p;
  65. spinlock_t lock; /* lock HW */
  66. struct isac_hw isac;
  67. struct tiger_dma send;
  68. struct tiger_dma recv;
  69. struct tiger_ch bc[2];
  70. u8 ctrlreg;
  71. u8 dmactrl;
  72. u8 auxd;
  73. u8 last_is0;
  74. u8 irqmask0;
  75. char log[LOG_SIZE];
  76. };
  77. static LIST_HEAD(Cards);
  78. static DEFINE_RWLOCK(card_lock); /* protect Cards */
  79. static u32 debug;
  80. static int nj_cnt;
  81. static void
  82. _set_debug(struct tiger_hw *card)
  83. {
  84. card->isac.dch.debug = debug;
  85. card->bc[0].bch.debug = debug;
  86. card->bc[1].bch.debug = debug;
  87. }
  88. static int
  89. set_debug(const char *val, const struct kernel_param *kp)
  90. {
  91. int ret;
  92. struct tiger_hw *card;
  93. ret = param_set_uint(val, kp);
  94. if (!ret) {
  95. read_lock(&card_lock);
  96. list_for_each_entry(card, &Cards, list)
  97. _set_debug(card);
  98. read_unlock(&card_lock);
  99. }
  100. return ret;
  101. }
  102. MODULE_AUTHOR("Karsten Keil");
  103. MODULE_LICENSE("GPL v2");
  104. MODULE_VERSION(NETJET_REV);
  105. module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
  106. MODULE_PARM_DESC(debug, "Netjet debug mask");
  107. static void
  108. nj_disable_hwirq(struct tiger_hw *card)
  109. {
  110. outb(0, card->base + NJ_IRQMASK0);
  111. outb(0, card->base + NJ_IRQMASK1);
  112. }
  113. static u8
  114. ReadISAC_nj(void *p, u8 offset)
  115. {
  116. struct tiger_hw *card = p;
  117. u8 ret;
  118. card->auxd &= 0xfc;
  119. card->auxd |= (offset >> 4) & 3;
  120. outb(card->auxd, card->base + NJ_AUXDATA);
  121. ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  122. return ret;
  123. }
  124. static void
  125. WriteISAC_nj(void *p, u8 offset, u8 value)
  126. {
  127. struct tiger_hw *card = p;
  128. card->auxd &= 0xfc;
  129. card->auxd |= (offset >> 4) & 3;
  130. outb(card->auxd, card->base + NJ_AUXDATA);
  131. outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  132. }
  133. static void
  134. ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  135. {
  136. struct tiger_hw *card = p;
  137. card->auxd &= 0xfc;
  138. outb(card->auxd, card->base + NJ_AUXDATA);
  139. insb(card->base + NJ_ISAC_OFF, data, size);
  140. }
  141. static void
  142. WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  143. {
  144. struct tiger_hw *card = p;
  145. card->auxd &= 0xfc;
  146. outb(card->auxd, card->base + NJ_AUXDATA);
  147. outsb(card->base + NJ_ISAC_OFF, data, size);
  148. }
  149. static void
  150. fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
  151. {
  152. struct tiger_hw *card = bc->bch.hw;
  153. u32 mask = 0xff, val;
  154. pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
  155. bc->bch.nr, fill, cnt, idx, card->send.idx);
  156. if (bc->bch.nr & 2) {
  157. fill <<= 8;
  158. mask <<= 8;
  159. }
  160. mask ^= 0xffffffff;
  161. while (cnt--) {
  162. val = card->send.start[idx];
  163. val &= mask;
  164. val |= fill;
  165. card->send.start[idx++] = val;
  166. if (idx >= card->send.size)
  167. idx = 0;
  168. }
  169. }
  170. static int
  171. mode_tiger(struct tiger_ch *bc, u32 protocol)
  172. {
  173. struct tiger_hw *card = bc->bch.hw;
  174. pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
  175. bc->bch.nr, bc->bch.state, protocol);
  176. switch (protocol) {
  177. case ISDN_P_NONE:
  178. if (bc->bch.state == ISDN_P_NONE)
  179. break;
  180. fill_mem(bc, 0, card->send.size, 0xff);
  181. bc->bch.state = protocol;
  182. /* only stop dma and interrupts if both channels NULL */
  183. if ((card->bc[0].bch.state == ISDN_P_NONE) &&
  184. (card->bc[1].bch.state == ISDN_P_NONE)) {
  185. card->dmactrl = 0;
  186. outb(card->dmactrl, card->base + NJ_DMACTRL);
  187. outb(0, card->base + NJ_IRQMASK0);
  188. }
  189. test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
  190. test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  191. bc->txstate = 0;
  192. bc->rxstate = 0;
  193. bc->lastrx = -1;
  194. break;
  195. case ISDN_P_B_RAW:
  196. test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  197. bc->bch.state = protocol;
  198. bc->idx = 0;
  199. bc->free = card->send.size / 2;
  200. bc->rxstate = 0;
  201. bc->txstate = TX_INIT | TX_IDLE;
  202. bc->lastrx = -1;
  203. if (!card->dmactrl) {
  204. card->dmactrl = 1;
  205. outb(card->dmactrl, card->base + NJ_DMACTRL);
  206. outb(0x0f, card->base + NJ_IRQMASK0);
  207. }
  208. break;
  209. case ISDN_P_B_HDLC:
  210. test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
  211. bc->bch.state = protocol;
  212. bc->idx = 0;
  213. bc->free = card->send.size / 2;
  214. bc->rxstate = 0;
  215. bc->txstate = TX_INIT | TX_IDLE;
  216. isdnhdlc_rcv_init(&bc->hrecv, 0);
  217. isdnhdlc_out_init(&bc->hsend, 0);
  218. bc->lastrx = -1;
  219. if (!card->dmactrl) {
  220. card->dmactrl = 1;
  221. outb(card->dmactrl, card->base + NJ_DMACTRL);
  222. outb(0x0f, card->base + NJ_IRQMASK0);
  223. }
  224. break;
  225. default:
  226. pr_info("%s: %s protocol %x not handled\n", card->name,
  227. __func__, protocol);
  228. return -ENOPROTOOPT;
  229. }
  230. card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
  231. card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
  232. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  233. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  234. pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
  235. card->name, __func__,
  236. inb(card->base + NJ_DMACTRL),
  237. inb(card->base + NJ_IRQMASK0),
  238. inb(card->base + NJ_IRQSTAT0),
  239. card->send.idx,
  240. card->recv.idx);
  241. return 0;
  242. }
  243. static void
  244. nj_reset(struct tiger_hw *card)
  245. {
  246. outb(0xff, card->base + NJ_CTRL); /* Reset On */
  247. mdelay(1);
  248. /* now edge triggered for TJ320 GE 13/07/00 */
  249. /* see comment in IRQ function */
  250. if (card->typ == NETJET_S_TJ320) /* TJ320 */
  251. card->ctrlreg = 0x40; /* Reset Off and status read clear */
  252. else
  253. card->ctrlreg = 0x00; /* Reset Off and status read clear */
  254. outb(card->ctrlreg, card->base + NJ_CTRL);
  255. mdelay(10);
  256. /* configure AUX pins (all output except ISAC IRQ pin) */
  257. card->auxd = 0;
  258. card->dmactrl = 0;
  259. outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
  260. outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1);
  261. outb(card->auxd, card->base + NJ_AUXDATA);
  262. }
  263. static int
  264. inittiger(struct tiger_hw *card)
  265. {
  266. int i;
  267. card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE,
  268. &card->dma, GFP_ATOMIC);
  269. if (!card->dma_p) {
  270. pr_info("%s: No DMA memory\n", card->name);
  271. return -ENOMEM;
  272. }
  273. if ((u64)card->dma > 0xffffffff) {
  274. pr_info("%s: DMA outside 32 bit\n", card->name);
  275. return -ENOMEM;
  276. }
  277. for (i = 0; i < 2; i++) {
  278. card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
  279. if (!card->bc[i].hsbuf) {
  280. pr_info("%s: no B%d send buffer\n", card->name, i + 1);
  281. return -ENOMEM;
  282. }
  283. card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
  284. if (!card->bc[i].hrbuf) {
  285. pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
  286. return -ENOMEM;
  287. }
  288. }
  289. memset(card->dma_p, 0xff, NJ_DMA_SIZE);
  290. card->send.start = card->dma_p;
  291. card->send.dmastart = (u32)card->dma;
  292. card->send.dmaend = card->send.dmastart +
  293. (4 * (NJ_DMA_TXSIZE - 1));
  294. card->send.dmairq = card->send.dmastart +
  295. (4 * ((NJ_DMA_TXSIZE / 2) - 1));
  296. card->send.size = NJ_DMA_TXSIZE;
  297. if (debug & DEBUG_HW)
  298. pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
  299. " size %zu u32\n", card->name,
  300. card->send.dmastart, card->send.dmairq,
  301. card->send.dmaend, card->send.start, card->send.size);
  302. outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
  303. outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
  304. outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
  305. card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
  306. card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2);
  307. card->recv.dmaend = card->recv.dmastart +
  308. (4 * (NJ_DMA_RXSIZE - 1));
  309. card->recv.dmairq = card->recv.dmastart +
  310. (4 * ((NJ_DMA_RXSIZE / 2) - 1));
  311. card->recv.size = NJ_DMA_RXSIZE;
  312. if (debug & DEBUG_HW)
  313. pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
  314. " size %zu u32\n", card->name,
  315. card->recv.dmastart, card->recv.dmairq,
  316. card->recv.dmaend, card->recv.start, card->recv.size);
  317. outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
  318. outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
  319. outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
  320. return 0;
  321. }
  322. static void
  323. read_dma(struct tiger_ch *bc, u32 idx, int cnt)
  324. {
  325. struct tiger_hw *card = bc->bch.hw;
  326. int i, stat;
  327. u32 val;
  328. u8 *p, *pn;
  329. if (bc->lastrx == idx) {
  330. bc->rxstate |= RX_OVERRUN;
  331. pr_info("%s: B%1d overrun at idx %d\n", card->name,
  332. bc->bch.nr, idx);
  333. }
  334. bc->lastrx = idx;
  335. if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
  336. bc->bch.dropcnt += cnt;
  337. return;
  338. }
  339. stat = bchannel_get_rxbuf(&bc->bch, cnt);
  340. /* only transparent use the count here, HDLC overun is detected later */
  341. if (stat == -ENOMEM) {
  342. pr_warn("%s.B%d: No memory for %d bytes\n",
  343. card->name, bc->bch.nr, cnt);
  344. return;
  345. }
  346. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
  347. p = skb_put(bc->bch.rx_skb, cnt);
  348. else
  349. p = bc->hrbuf;
  350. for (i = 0; i < cnt; i++) {
  351. val = card->recv.start[idx++];
  352. if (bc->bch.nr & 2)
  353. val >>= 8;
  354. if (idx >= card->recv.size)
  355. idx = 0;
  356. p[i] = val & 0xff;
  357. }
  358. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
  359. recv_Bchannel(&bc->bch, 0, false);
  360. return;
  361. }
  362. pn = bc->hrbuf;
  363. while (cnt > 0) {
  364. stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
  365. bc->bch.rx_skb->data, bc->bch.maxlen);
  366. if (stat > 0) { /* valid frame received */
  367. p = skb_put(bc->bch.rx_skb, stat);
  368. if (debug & DEBUG_HW_BFIFO) {
  369. snprintf(card->log, LOG_SIZE,
  370. "B%1d-recv %s %d ", bc->bch.nr,
  371. card->name, stat);
  372. print_hex_dump_bytes(card->log,
  373. DUMP_PREFIX_OFFSET, p,
  374. stat);
  375. }
  376. recv_Bchannel(&bc->bch, 0, false);
  377. stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
  378. if (stat < 0) {
  379. pr_warn("%s.B%d: No memory for %d bytes\n",
  380. card->name, bc->bch.nr, cnt);
  381. return;
  382. }
  383. } else if (stat == -HDLC_CRC_ERROR) {
  384. pr_info("%s: B%1d receive frame CRC error\n",
  385. card->name, bc->bch.nr);
  386. } else if (stat == -HDLC_FRAMING_ERROR) {
  387. pr_info("%s: B%1d receive framing error\n",
  388. card->name, bc->bch.nr);
  389. } else if (stat == -HDLC_LENGTH_ERROR) {
  390. pr_info("%s: B%1d receive frame too long (> %d)\n",
  391. card->name, bc->bch.nr, bc->bch.maxlen);
  392. }
  393. pn += i;
  394. cnt -= i;
  395. }
  396. }
  397. static void
  398. recv_tiger(struct tiger_hw *card, u8 irq_stat)
  399. {
  400. u32 idx;
  401. int cnt = card->recv.size / 2;
  402. /* Note receive is via the WRITE DMA channel */
  403. card->last_is0 &= ~NJ_IRQM0_WR_MASK;
  404. card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
  405. if (irq_stat & NJ_IRQM0_WR_END)
  406. idx = cnt - 1;
  407. else
  408. idx = card->recv.size - 1;
  409. if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
  410. read_dma(&card->bc[0], idx, cnt);
  411. if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
  412. read_dma(&card->bc[1], idx, cnt);
  413. }
  414. /* sync with current DMA address at start or after exception */
  415. static void
  416. resync(struct tiger_ch *bc, struct tiger_hw *card)
  417. {
  418. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  419. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  420. if (bc->free > card->send.size / 2)
  421. bc->free = card->send.size / 2;
  422. /* currently we simple sync to the next complete free area
  423. * this hast the advantage that we have always maximum time to
  424. * handle TX irq
  425. */
  426. if (card->send.idx < ((card->send.size / 2) - 1))
  427. bc->idx = (card->recv.size / 2) - 1;
  428. else
  429. bc->idx = card->recv.size - 1;
  430. bc->txstate = TX_RUN;
  431. pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
  432. __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
  433. }
  434. static int bc_next_frame(struct tiger_ch *);
  435. static void
  436. fill_hdlc_flag(struct tiger_ch *bc)
  437. {
  438. struct tiger_hw *card = bc->bch.hw;
  439. int count, i;
  440. u32 m, v;
  441. u8 *p;
  442. if (bc->free == 0)
  443. return;
  444. pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
  445. __func__, bc->bch.nr, bc->free, bc->txstate,
  446. bc->idx, card->send.idx);
  447. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  448. resync(bc, card);
  449. count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
  450. bc->hsbuf, bc->free);
  451. pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
  452. bc->bch.nr, count);
  453. bc->free -= count;
  454. p = bc->hsbuf;
  455. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  456. for (i = 0; i < count; i++) {
  457. if (bc->idx >= card->send.size)
  458. bc->idx = 0;
  459. v = card->send.start[bc->idx];
  460. v &= m;
  461. v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
  462. card->send.start[bc->idx++] = v;
  463. }
  464. if (debug & DEBUG_HW_BFIFO) {
  465. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  466. bc->bch.nr, card->name, count);
  467. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  468. }
  469. }
  470. static void
  471. fill_dma(struct tiger_ch *bc)
  472. {
  473. struct tiger_hw *card = bc->bch.hw;
  474. int count, i, fillempty = 0;
  475. u32 m, v, n = 0;
  476. u8 *p;
  477. if (bc->free == 0)
  478. return;
  479. if (!bc->bch.tx_skb) {
  480. if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
  481. return;
  482. fillempty = 1;
  483. count = card->send.size >> 1;
  484. p = bc->bch.fill;
  485. } else {
  486. count = bc->bch.tx_skb->len - bc->bch.tx_idx;
  487. if (count <= 0)
  488. return;
  489. pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
  490. card->name, __func__, bc->bch.nr, count, bc->free,
  491. bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
  492. bc->idx, card->send.idx);
  493. p = bc->bch.tx_skb->data + bc->bch.tx_idx;
  494. }
  495. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  496. resync(bc, card);
  497. if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
  498. count = isdnhdlc_encode(&bc->hsend, p, count, &i,
  499. bc->hsbuf, bc->free);
  500. pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
  501. bc->bch.nr, i, count);
  502. bc->bch.tx_idx += i;
  503. bc->free -= count;
  504. p = bc->hsbuf;
  505. } else {
  506. if (count > bc->free)
  507. count = bc->free;
  508. if (!fillempty)
  509. bc->bch.tx_idx += count;
  510. bc->free -= count;
  511. }
  512. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  513. if (fillempty) {
  514. n = p[0];
  515. if (!(bc->bch.nr & 1))
  516. n <<= 8;
  517. for (i = 0; i < count; i++) {
  518. if (bc->idx >= card->send.size)
  519. bc->idx = 0;
  520. v = card->send.start[bc->idx];
  521. v &= m;
  522. v |= n;
  523. card->send.start[bc->idx++] = v;
  524. }
  525. } else {
  526. for (i = 0; i < count; i++) {
  527. if (bc->idx >= card->send.size)
  528. bc->idx = 0;
  529. v = card->send.start[bc->idx];
  530. v &= m;
  531. n = p[i];
  532. v |= (bc->bch.nr & 1) ? n : n << 8;
  533. card->send.start[bc->idx++] = v;
  534. }
  535. }
  536. if (debug & DEBUG_HW_BFIFO) {
  537. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  538. bc->bch.nr, card->name, count);
  539. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  540. }
  541. if (bc->free)
  542. bc_next_frame(bc);
  543. }
  544. static int
  545. bc_next_frame(struct tiger_ch *bc)
  546. {
  547. int ret = 1;
  548. if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
  549. fill_dma(bc);
  550. } else {
  551. dev_kfree_skb(bc->bch.tx_skb);
  552. if (get_next_bframe(&bc->bch)) {
  553. fill_dma(bc);
  554. test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
  555. } else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
  556. fill_dma(bc);
  557. } else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
  558. test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
  559. ret = 0;
  560. } else {
  561. ret = 0;
  562. }
  563. }
  564. return ret;
  565. }
  566. static void
  567. send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
  568. {
  569. int ret;
  570. bc->free += card->send.size / 2;
  571. if (bc->free >= card->send.size) {
  572. if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
  573. pr_info("%s: B%1d TX underrun state %x\n", card->name,
  574. bc->bch.nr, bc->txstate);
  575. bc->txstate |= TX_UNDERRUN;
  576. }
  577. bc->free = card->send.size;
  578. }
  579. ret = bc_next_frame(bc);
  580. if (!ret) {
  581. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  582. fill_hdlc_flag(bc);
  583. return;
  584. }
  585. pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
  586. bc->bch.nr, bc->free, bc->idx, card->send.idx);
  587. if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
  588. fill_mem(bc, bc->idx, bc->free, 0xff);
  589. if (bc->free == card->send.size)
  590. bc->txstate |= TX_IDLE;
  591. }
  592. }
  593. }
  594. static void
  595. send_tiger(struct tiger_hw *card, u8 irq_stat)
  596. {
  597. int i;
  598. /* Note send is via the READ DMA channel */
  599. if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
  600. pr_info("%s: tiger warn write double dma %x/%x\n",
  601. card->name, irq_stat, card->last_is0);
  602. return;
  603. } else {
  604. card->last_is0 &= ~NJ_IRQM0_RD_MASK;
  605. card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
  606. }
  607. for (i = 0; i < 2; i++) {
  608. if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
  609. send_tiger_bc(card, &card->bc[i]);
  610. }
  611. }
  612. static irqreturn_t
  613. nj_irq(int intno, void *dev_id)
  614. {
  615. struct tiger_hw *card = dev_id;
  616. u8 val, s1val, s0val;
  617. spin_lock(&card->lock);
  618. s0val = inb(card->base | NJ_IRQSTAT0);
  619. s1val = inb(card->base | NJ_IRQSTAT1);
  620. if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
  621. /* shared IRQ */
  622. spin_unlock(&card->lock);
  623. return IRQ_NONE;
  624. }
  625. pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
  626. card->irqcnt++;
  627. if (!(s1val & NJ_ISACIRQ)) {
  628. val = ReadISAC_nj(card, ISAC_ISTA);
  629. if (val)
  630. mISDNisac_irq(&card->isac, val);
  631. }
  632. if (s0val)
  633. /* write to clear */
  634. outb(s0val, card->base | NJ_IRQSTAT0);
  635. else
  636. goto end;
  637. s1val = s0val;
  638. /* set bits in sval to indicate which page is free */
  639. card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
  640. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  641. if (card->recv.dmacur < card->recv.dmairq)
  642. s0val = 0x08; /* the 2nd write area is free */
  643. else
  644. s0val = 0x04; /* the 1st write area is free */
  645. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  646. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  647. if (card->send.dmacur < card->send.dmairq)
  648. s0val |= 0x02; /* the 2nd read area is free */
  649. else
  650. s0val |= 0x01; /* the 1st read area is free */
  651. pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
  652. s1val, s0val, card->last_is0,
  653. card->recv.idx, card->send.idx);
  654. /* test if we have a DMA interrupt */
  655. if (s0val != card->last_is0) {
  656. if ((s0val & NJ_IRQM0_RD_MASK) !=
  657. (card->last_is0 & NJ_IRQM0_RD_MASK))
  658. /* got a write dma int */
  659. send_tiger(card, s0val);
  660. if ((s0val & NJ_IRQM0_WR_MASK) !=
  661. (card->last_is0 & NJ_IRQM0_WR_MASK))
  662. /* got a read dma int */
  663. recv_tiger(card, s0val);
  664. }
  665. end:
  666. spin_unlock(&card->lock);
  667. return IRQ_HANDLED;
  668. }
  669. static int
  670. nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
  671. {
  672. int ret = -EINVAL;
  673. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  674. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  675. struct tiger_hw *card = bch->hw;
  676. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  677. unsigned long flags;
  678. switch (hh->prim) {
  679. case PH_DATA_REQ:
  680. spin_lock_irqsave(&card->lock, flags);
  681. ret = bchannel_senddata(bch, skb);
  682. if (ret > 0) { /* direct TX */
  683. fill_dma(bc);
  684. ret = 0;
  685. }
  686. spin_unlock_irqrestore(&card->lock, flags);
  687. return ret;
  688. case PH_ACTIVATE_REQ:
  689. spin_lock_irqsave(&card->lock, flags);
  690. if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
  691. ret = mode_tiger(bc, ch->protocol);
  692. else
  693. ret = 0;
  694. spin_unlock_irqrestore(&card->lock, flags);
  695. if (!ret)
  696. _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
  697. NULL, GFP_KERNEL);
  698. break;
  699. case PH_DEACTIVATE_REQ:
  700. spin_lock_irqsave(&card->lock, flags);
  701. mISDN_clear_bchannel(bch);
  702. mode_tiger(bc, ISDN_P_NONE);
  703. spin_unlock_irqrestore(&card->lock, flags);
  704. _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
  705. NULL, GFP_KERNEL);
  706. ret = 0;
  707. break;
  708. }
  709. if (!ret)
  710. dev_kfree_skb(skb);
  711. return ret;
  712. }
  713. static int
  714. channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
  715. {
  716. return mISDN_ctrl_bchannel(&bc->bch, cq);
  717. }
  718. static int
  719. nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  720. {
  721. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  722. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  723. struct tiger_hw *card = bch->hw;
  724. int ret = -EINVAL;
  725. u_long flags;
  726. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  727. switch (cmd) {
  728. case CLOSE_CHANNEL:
  729. test_and_clear_bit(FLG_OPEN, &bch->Flags);
  730. cancel_work_sync(&bch->workq);
  731. spin_lock_irqsave(&card->lock, flags);
  732. mISDN_clear_bchannel(bch);
  733. mode_tiger(bc, ISDN_P_NONE);
  734. spin_unlock_irqrestore(&card->lock, flags);
  735. ch->protocol = ISDN_P_NONE;
  736. ch->peer = NULL;
  737. module_put(THIS_MODULE);
  738. ret = 0;
  739. break;
  740. case CONTROL_CHANNEL:
  741. ret = channel_bctrl(bc, arg);
  742. break;
  743. default:
  744. pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
  745. }
  746. return ret;
  747. }
  748. static int
  749. channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
  750. {
  751. int ret = 0;
  752. switch (cq->op) {
  753. case MISDN_CTRL_GETOP:
  754. cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
  755. break;
  756. case MISDN_CTRL_LOOP:
  757. /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
  758. if (cq->channel < 0 || cq->channel > 3) {
  759. ret = -EINVAL;
  760. break;
  761. }
  762. ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
  763. break;
  764. case MISDN_CTRL_L1_TIMER3:
  765. ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
  766. break;
  767. default:
  768. pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
  769. ret = -EINVAL;
  770. break;
  771. }
  772. return ret;
  773. }
  774. static int
  775. open_bchannel(struct tiger_hw *card, struct channel_req *rq)
  776. {
  777. struct bchannel *bch;
  778. if (rq->adr.channel == 0 || rq->adr.channel > 2)
  779. return -EINVAL;
  780. if (rq->protocol == ISDN_P_NONE)
  781. return -EINVAL;
  782. bch = &card->bc[rq->adr.channel - 1].bch;
  783. if (test_and_set_bit(FLG_OPEN, &bch->Flags))
  784. return -EBUSY; /* b-channel can be only open once */
  785. test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
  786. bch->ch.protocol = rq->protocol;
  787. rq->ch = &bch->ch;
  788. return 0;
  789. }
  790. /*
  791. * device control function
  792. */
  793. static int
  794. nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  795. {
  796. struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
  797. struct dchannel *dch = container_of(dev, struct dchannel, dev);
  798. struct tiger_hw *card = dch->hw;
  799. struct channel_req *rq;
  800. int err = 0;
  801. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  802. switch (cmd) {
  803. case OPEN_CHANNEL:
  804. rq = arg;
  805. if (rq->protocol == ISDN_P_TE_S0)
  806. err = card->isac.open(&card->isac, rq);
  807. else
  808. err = open_bchannel(card, rq);
  809. if (err)
  810. break;
  811. if (!try_module_get(THIS_MODULE))
  812. pr_info("%s: cannot get module\n", card->name);
  813. break;
  814. case CLOSE_CHANNEL:
  815. pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
  816. __builtin_return_address(0));
  817. module_put(THIS_MODULE);
  818. break;
  819. case CONTROL_CHANNEL:
  820. err = channel_ctrl(card, arg);
  821. break;
  822. default:
  823. pr_debug("%s: %s unknown command %x\n",
  824. card->name, __func__, cmd);
  825. return -EINVAL;
  826. }
  827. return err;
  828. }
  829. static int
  830. nj_init_card(struct tiger_hw *card)
  831. {
  832. u_long flags;
  833. int ret;
  834. spin_lock_irqsave(&card->lock, flags);
  835. nj_disable_hwirq(card);
  836. spin_unlock_irqrestore(&card->lock, flags);
  837. card->irq = card->pdev->irq;
  838. if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
  839. pr_info("%s: couldn't get interrupt %d\n",
  840. card->name, card->irq);
  841. card->irq = -1;
  842. return -EIO;
  843. }
  844. spin_lock_irqsave(&card->lock, flags);
  845. nj_reset(card);
  846. ret = card->isac.init(&card->isac);
  847. if (ret)
  848. goto error;
  849. ret = inittiger(card);
  850. if (ret)
  851. goto error;
  852. mode_tiger(&card->bc[0], ISDN_P_NONE);
  853. mode_tiger(&card->bc[1], ISDN_P_NONE);
  854. error:
  855. spin_unlock_irqrestore(&card->lock, flags);
  856. return ret;
  857. }
  858. static void
  859. nj_release(struct tiger_hw *card)
  860. {
  861. u_long flags;
  862. int i;
  863. if (card->base_s) {
  864. spin_lock_irqsave(&card->lock, flags);
  865. nj_disable_hwirq(card);
  866. mode_tiger(&card->bc[0], ISDN_P_NONE);
  867. mode_tiger(&card->bc[1], ISDN_P_NONE);
  868. spin_unlock_irqrestore(&card->lock, flags);
  869. card->isac.release(&card->isac);
  870. release_region(card->base, card->base_s);
  871. card->base_s = 0;
  872. }
  873. if (card->irq > 0)
  874. free_irq(card->irq, card);
  875. if (device_is_registered(&card->isac.dch.dev.dev))
  876. mISDN_unregister_device(&card->isac.dch.dev);
  877. for (i = 0; i < 2; i++) {
  878. mISDN_freebchannel(&card->bc[i].bch);
  879. kfree(card->bc[i].hsbuf);
  880. kfree(card->bc[i].hrbuf);
  881. }
  882. if (card->dma_p)
  883. dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p,
  884. card->dma);
  885. write_lock_irqsave(&card_lock, flags);
  886. list_del(&card->list);
  887. write_unlock_irqrestore(&card_lock, flags);
  888. pci_clear_master(card->pdev);
  889. pci_disable_device(card->pdev);
  890. pci_set_drvdata(card->pdev, NULL);
  891. kfree(card);
  892. }
  893. static int
  894. nj_setup(struct tiger_hw *card)
  895. {
  896. card->base = pci_resource_start(card->pdev, 0);
  897. card->base_s = pci_resource_len(card->pdev, 0);
  898. if (!request_region(card->base, card->base_s, card->name)) {
  899. pr_info("%s: NETjet config port %#x-%#x already in use\n",
  900. card->name, card->base,
  901. (u32)(card->base + card->base_s - 1));
  902. card->base_s = 0;
  903. return -EIO;
  904. }
  905. ASSIGN_FUNC(nj, ISAC, card->isac);
  906. return 0;
  907. }
  908. static int
  909. setup_instance(struct tiger_hw *card)
  910. {
  911. int i, err;
  912. u_long flags;
  913. snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
  914. write_lock_irqsave(&card_lock, flags);
  915. list_add_tail(&card->list, &Cards);
  916. write_unlock_irqrestore(&card_lock, flags);
  917. _set_debug(card);
  918. card->isac.name = card->name;
  919. spin_lock_init(&card->lock);
  920. card->isac.hwlock = &card->lock;
  921. mISDNisac_init(&card->isac, card);
  922. card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
  923. (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
  924. card->isac.dch.dev.D.ctrl = nj_dctrl;
  925. for (i = 0; i < 2; i++) {
  926. card->bc[i].bch.nr = i + 1;
  927. set_channelmap(i + 1, card->isac.dch.dev.channelmap);
  928. mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
  929. NJ_DMA_RXSIZE >> 1);
  930. card->bc[i].bch.hw = card;
  931. card->bc[i].bch.ch.send = nj_l2l1B;
  932. card->bc[i].bch.ch.ctrl = nj_bctrl;
  933. card->bc[i].bch.ch.nr = i + 1;
  934. list_add(&card->bc[i].bch.ch.list,
  935. &card->isac.dch.dev.bchannels);
  936. card->bc[i].bch.hw = card;
  937. }
  938. err = nj_setup(card);
  939. if (err)
  940. goto error;
  941. err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
  942. card->name);
  943. if (err)
  944. goto error;
  945. err = nj_init_card(card);
  946. if (!err) {
  947. nj_cnt++;
  948. pr_notice("Netjet %d cards installed\n", nj_cnt);
  949. return 0;
  950. }
  951. error:
  952. nj_release(card);
  953. return err;
  954. }
  955. static int
  956. nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  957. {
  958. int err = -ENOMEM;
  959. int cfg;
  960. struct tiger_hw *card;
  961. if (pdev->subsystem_vendor == 0x8086 &&
  962. pdev->subsystem_device == 0x0003) {
  963. pr_notice("Netjet: Digium X100P/X101P not handled\n");
  964. return -ENODEV;
  965. }
  966. if (pdev->subsystem_vendor == 0x55 &&
  967. pdev->subsystem_device == 0x02) {
  968. pr_notice("Netjet: Enter!Now not handled yet\n");
  969. return -ENODEV;
  970. }
  971. if (pdev->subsystem_vendor == 0xb100 &&
  972. pdev->subsystem_device == 0x0003) {
  973. pr_notice("Netjet: Digium TDM400P not handled yet\n");
  974. return -ENODEV;
  975. }
  976. card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
  977. if (!card) {
  978. pr_info("No kmem for Netjet\n");
  979. return err;
  980. }
  981. card->pdev = pdev;
  982. err = pci_enable_device(pdev);
  983. if (err) {
  984. kfree(card);
  985. return err;
  986. }
  987. printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
  988. pci_name(pdev));
  989. pci_set_master(pdev);
  990. /* the TJ300 and TJ320 must be detected, the IRQ handling is different
  991. * unfortunately the chips use the same device ID, but the TJ320 has
  992. * the bit20 in status PCI cfg register set
  993. */
  994. pci_read_config_dword(pdev, 0x04, &cfg);
  995. if (cfg & 0x00100000)
  996. card->typ = NETJET_S_TJ320;
  997. else
  998. card->typ = NETJET_S_TJ300;
  999. card->base = pci_resource_start(pdev, 0);
  1000. pci_set_drvdata(pdev, card);
  1001. err = setup_instance(card);
  1002. if (err)
  1003. pci_set_drvdata(pdev, NULL);
  1004. return err;
  1005. }
  1006. static void nj_remove(struct pci_dev *pdev)
  1007. {
  1008. struct tiger_hw *card = pci_get_drvdata(pdev);
  1009. if (card)
  1010. nj_release(card);
  1011. else
  1012. pr_info("%s drvdata already removed\n", __func__);
  1013. }
  1014. /* We cannot select cards with PCI_SUB... IDs, since here are cards with
  1015. * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
  1016. * known other cards which not work with this driver - see probe function */
  1017. static const struct pci_device_id nj_pci_ids[] = {
  1018. { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
  1019. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  1020. { }
  1021. };
  1022. MODULE_DEVICE_TABLE(pci, nj_pci_ids);
  1023. static struct pci_driver nj_driver = {
  1024. .name = "netjet",
  1025. .probe = nj_probe,
  1026. .remove = nj_remove,
  1027. .id_table = nj_pci_ids,
  1028. };
  1029. static int __init nj_init(void)
  1030. {
  1031. int err;
  1032. pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
  1033. err = pci_register_driver(&nj_driver);
  1034. return err;
  1035. }
  1036. static void __exit nj_cleanup(void)
  1037. {
  1038. pci_unregister_driver(&nj_driver);
  1039. }
  1040. module_init(nj_init);
  1041. module_exit(nj_cleanup);