hwchannel.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Author Karsten Keil <[email protected]>
  5. *
  6. * Copyright 2008 by Karsten Keil <[email protected]>
  7. */
  8. #include <linux/gfp.h>
  9. #include <linux/module.h>
  10. #include <linux/mISDNhw.h>
  11. static void
  12. dchannel_bh(struct work_struct *ws)
  13. {
  14. struct dchannel *dch = container_of(ws, struct dchannel, workq);
  15. struct sk_buff *skb;
  16. int err;
  17. if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
  18. while ((skb = skb_dequeue(&dch->rqueue))) {
  19. if (likely(dch->dev.D.peer)) {
  20. err = dch->dev.D.recv(dch->dev.D.peer, skb);
  21. if (err)
  22. dev_kfree_skb(skb);
  23. } else
  24. dev_kfree_skb(skb);
  25. }
  26. }
  27. if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
  28. if (dch->phfunc)
  29. dch->phfunc(dch);
  30. }
  31. }
  32. static void
  33. bchannel_bh(struct work_struct *ws)
  34. {
  35. struct bchannel *bch = container_of(ws, struct bchannel, workq);
  36. struct sk_buff *skb;
  37. int err;
  38. if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
  39. while ((skb = skb_dequeue(&bch->rqueue))) {
  40. bch->rcount--;
  41. if (likely(bch->ch.peer)) {
  42. err = bch->ch.recv(bch->ch.peer, skb);
  43. if (err)
  44. dev_kfree_skb(skb);
  45. } else
  46. dev_kfree_skb(skb);
  47. }
  48. }
  49. }
  50. int
  51. mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
  52. {
  53. test_and_set_bit(FLG_HDLC, &ch->Flags);
  54. ch->maxlen = maxlen;
  55. ch->hw = NULL;
  56. ch->rx_skb = NULL;
  57. ch->tx_skb = NULL;
  58. ch->tx_idx = 0;
  59. ch->phfunc = phf;
  60. skb_queue_head_init(&ch->squeue);
  61. skb_queue_head_init(&ch->rqueue);
  62. INIT_LIST_HEAD(&ch->dev.bchannels);
  63. INIT_WORK(&ch->workq, dchannel_bh);
  64. return 0;
  65. }
  66. EXPORT_SYMBOL(mISDN_initdchannel);
  67. int
  68. mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
  69. unsigned short minlen)
  70. {
  71. ch->Flags = 0;
  72. ch->minlen = minlen;
  73. ch->next_minlen = minlen;
  74. ch->init_minlen = minlen;
  75. ch->maxlen = maxlen;
  76. ch->next_maxlen = maxlen;
  77. ch->init_maxlen = maxlen;
  78. ch->hw = NULL;
  79. ch->rx_skb = NULL;
  80. ch->tx_skb = NULL;
  81. ch->tx_idx = 0;
  82. skb_queue_head_init(&ch->rqueue);
  83. ch->rcount = 0;
  84. ch->next_skb = NULL;
  85. INIT_WORK(&ch->workq, bchannel_bh);
  86. return 0;
  87. }
  88. EXPORT_SYMBOL(mISDN_initbchannel);
  89. int
  90. mISDN_freedchannel(struct dchannel *ch)
  91. {
  92. if (ch->tx_skb) {
  93. dev_kfree_skb(ch->tx_skb);
  94. ch->tx_skb = NULL;
  95. }
  96. if (ch->rx_skb) {
  97. dev_kfree_skb(ch->rx_skb);
  98. ch->rx_skb = NULL;
  99. }
  100. skb_queue_purge(&ch->squeue);
  101. skb_queue_purge(&ch->rqueue);
  102. flush_work(&ch->workq);
  103. return 0;
  104. }
  105. EXPORT_SYMBOL(mISDN_freedchannel);
  106. void
  107. mISDN_clear_bchannel(struct bchannel *ch)
  108. {
  109. if (ch->tx_skb) {
  110. dev_kfree_skb(ch->tx_skb);
  111. ch->tx_skb = NULL;
  112. }
  113. ch->tx_idx = 0;
  114. if (ch->rx_skb) {
  115. dev_kfree_skb(ch->rx_skb);
  116. ch->rx_skb = NULL;
  117. }
  118. if (ch->next_skb) {
  119. dev_kfree_skb(ch->next_skb);
  120. ch->next_skb = NULL;
  121. }
  122. test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
  123. test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
  124. test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
  125. test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
  126. test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
  127. test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
  128. ch->dropcnt = 0;
  129. ch->minlen = ch->init_minlen;
  130. ch->next_minlen = ch->init_minlen;
  131. ch->maxlen = ch->init_maxlen;
  132. ch->next_maxlen = ch->init_maxlen;
  133. skb_queue_purge(&ch->rqueue);
  134. ch->rcount = 0;
  135. }
  136. EXPORT_SYMBOL(mISDN_clear_bchannel);
  137. void
  138. mISDN_freebchannel(struct bchannel *ch)
  139. {
  140. cancel_work_sync(&ch->workq);
  141. mISDN_clear_bchannel(ch);
  142. }
  143. EXPORT_SYMBOL(mISDN_freebchannel);
  144. int
  145. mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
  146. {
  147. int ret = 0;
  148. switch (cq->op) {
  149. case MISDN_CTRL_GETOP:
  150. cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
  151. MISDN_CTRL_RX_OFF;
  152. break;
  153. case MISDN_CTRL_FILL_EMPTY:
  154. if (cq->p1) {
  155. memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
  156. test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
  157. } else {
  158. test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
  159. }
  160. break;
  161. case MISDN_CTRL_RX_OFF:
  162. /* read back dropped byte count */
  163. cq->p2 = bch->dropcnt;
  164. if (cq->p1)
  165. test_and_set_bit(FLG_RX_OFF, &bch->Flags);
  166. else
  167. test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
  168. bch->dropcnt = 0;
  169. break;
  170. case MISDN_CTRL_RX_BUFFER:
  171. if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
  172. bch->next_maxlen = cq->p2;
  173. if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
  174. bch->next_minlen = cq->p1;
  175. /* we return the old values */
  176. cq->p1 = bch->minlen;
  177. cq->p2 = bch->maxlen;
  178. break;
  179. default:
  180. pr_info("mISDN unhandled control %x operation\n", cq->op);
  181. ret = -EINVAL;
  182. break;
  183. }
  184. return ret;
  185. }
  186. EXPORT_SYMBOL(mISDN_ctrl_bchannel);
  187. static inline u_int
  188. get_sapi_tei(u_char *p)
  189. {
  190. u_int sapi, tei;
  191. sapi = *p >> 2;
  192. tei = p[1] >> 1;
  193. return sapi | (tei << 8);
  194. }
  195. void
  196. recv_Dchannel(struct dchannel *dch)
  197. {
  198. struct mISDNhead *hh;
  199. if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
  200. dev_kfree_skb(dch->rx_skb);
  201. dch->rx_skb = NULL;
  202. return;
  203. }
  204. hh = mISDN_HEAD_P(dch->rx_skb);
  205. hh->prim = PH_DATA_IND;
  206. hh->id = get_sapi_tei(dch->rx_skb->data);
  207. skb_queue_tail(&dch->rqueue, dch->rx_skb);
  208. dch->rx_skb = NULL;
  209. schedule_event(dch, FLG_RECVQUEUE);
  210. }
  211. EXPORT_SYMBOL(recv_Dchannel);
  212. void
  213. recv_Echannel(struct dchannel *ech, struct dchannel *dch)
  214. {
  215. struct mISDNhead *hh;
  216. if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
  217. dev_kfree_skb(ech->rx_skb);
  218. ech->rx_skb = NULL;
  219. return;
  220. }
  221. hh = mISDN_HEAD_P(ech->rx_skb);
  222. hh->prim = PH_DATA_E_IND;
  223. hh->id = get_sapi_tei(ech->rx_skb->data);
  224. skb_queue_tail(&dch->rqueue, ech->rx_skb);
  225. ech->rx_skb = NULL;
  226. schedule_event(dch, FLG_RECVQUEUE);
  227. }
  228. EXPORT_SYMBOL(recv_Echannel);
  229. void
  230. recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
  231. {
  232. struct mISDNhead *hh;
  233. /* if allocation did fail upper functions still may call us */
  234. if (unlikely(!bch->rx_skb))
  235. return;
  236. if (unlikely(!bch->rx_skb->len)) {
  237. /* we have no data to send - this may happen after recovery
  238. * from overflow or too small allocation.
  239. * We need to free the buffer here */
  240. dev_kfree_skb(bch->rx_skb);
  241. bch->rx_skb = NULL;
  242. } else {
  243. if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
  244. (bch->rx_skb->len < bch->minlen) && !force)
  245. return;
  246. hh = mISDN_HEAD_P(bch->rx_skb);
  247. hh->prim = PH_DATA_IND;
  248. hh->id = id;
  249. if (bch->rcount >= 64) {
  250. printk(KERN_WARNING
  251. "B%d receive queue overflow - flushing!\n",
  252. bch->nr);
  253. skb_queue_purge(&bch->rqueue);
  254. }
  255. bch->rcount++;
  256. skb_queue_tail(&bch->rqueue, bch->rx_skb);
  257. bch->rx_skb = NULL;
  258. schedule_event(bch, FLG_RECVQUEUE);
  259. }
  260. }
  261. EXPORT_SYMBOL(recv_Bchannel);
  262. void
  263. recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
  264. {
  265. skb_queue_tail(&dch->rqueue, skb);
  266. schedule_event(dch, FLG_RECVQUEUE);
  267. }
  268. EXPORT_SYMBOL(recv_Dchannel_skb);
  269. void
  270. recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
  271. {
  272. if (bch->rcount >= 64) {
  273. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  274. "flushing!\n", bch);
  275. skb_queue_purge(&bch->rqueue);
  276. bch->rcount = 0;
  277. }
  278. bch->rcount++;
  279. skb_queue_tail(&bch->rqueue, skb);
  280. schedule_event(bch, FLG_RECVQUEUE);
  281. }
  282. EXPORT_SYMBOL(recv_Bchannel_skb);
  283. static void
  284. confirm_Dsend(struct dchannel *dch)
  285. {
  286. struct sk_buff *skb;
  287. skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
  288. 0, NULL, GFP_ATOMIC);
  289. if (!skb) {
  290. printk(KERN_ERR "%s: no skb id %x\n", __func__,
  291. mISDN_HEAD_ID(dch->tx_skb));
  292. return;
  293. }
  294. skb_queue_tail(&dch->rqueue, skb);
  295. schedule_event(dch, FLG_RECVQUEUE);
  296. }
  297. int
  298. get_next_dframe(struct dchannel *dch)
  299. {
  300. dch->tx_idx = 0;
  301. dch->tx_skb = skb_dequeue(&dch->squeue);
  302. if (dch->tx_skb) {
  303. confirm_Dsend(dch);
  304. return 1;
  305. }
  306. dch->tx_skb = NULL;
  307. test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
  308. return 0;
  309. }
  310. EXPORT_SYMBOL(get_next_dframe);
  311. static void
  312. confirm_Bsend(struct bchannel *bch)
  313. {
  314. struct sk_buff *skb;
  315. if (bch->rcount >= 64) {
  316. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  317. "flushing!\n", bch);
  318. skb_queue_purge(&bch->rqueue);
  319. bch->rcount = 0;
  320. }
  321. skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
  322. 0, NULL, GFP_ATOMIC);
  323. if (!skb) {
  324. printk(KERN_ERR "%s: no skb id %x\n", __func__,
  325. mISDN_HEAD_ID(bch->tx_skb));
  326. return;
  327. }
  328. bch->rcount++;
  329. skb_queue_tail(&bch->rqueue, skb);
  330. schedule_event(bch, FLG_RECVQUEUE);
  331. }
  332. int
  333. get_next_bframe(struct bchannel *bch)
  334. {
  335. bch->tx_idx = 0;
  336. if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
  337. bch->tx_skb = bch->next_skb;
  338. if (bch->tx_skb) {
  339. bch->next_skb = NULL;
  340. test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
  341. /* confirm imediately to allow next data */
  342. confirm_Bsend(bch);
  343. return 1;
  344. } else {
  345. test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
  346. printk(KERN_WARNING "B TX_NEXT without skb\n");
  347. }
  348. }
  349. bch->tx_skb = NULL;
  350. test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
  351. return 0;
  352. }
  353. EXPORT_SYMBOL(get_next_bframe);
  354. void
  355. queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
  356. {
  357. struct mISDNhead *hh;
  358. if (!skb) {
  359. _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
  360. } else {
  361. if (ch->peer) {
  362. hh = mISDN_HEAD_P(skb);
  363. hh->prim = pr;
  364. hh->id = id;
  365. if (!ch->recv(ch->peer, skb))
  366. return;
  367. }
  368. dev_kfree_skb(skb);
  369. }
  370. }
  371. EXPORT_SYMBOL(queue_ch_frame);
  372. int
  373. dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
  374. {
  375. /* check oversize */
  376. if (skb->len <= 0) {
  377. printk(KERN_WARNING "%s: skb too small\n", __func__);
  378. return -EINVAL;
  379. }
  380. if (skb->len > ch->maxlen) {
  381. printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
  382. __func__, skb->len, ch->maxlen);
  383. return -EINVAL;
  384. }
  385. /* HW lock must be obtained */
  386. if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
  387. skb_queue_tail(&ch->squeue, skb);
  388. return 0;
  389. } else {
  390. /* write to fifo */
  391. ch->tx_skb = skb;
  392. ch->tx_idx = 0;
  393. return 1;
  394. }
  395. }
  396. EXPORT_SYMBOL(dchannel_senddata);
  397. int
  398. bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
  399. {
  400. /* check oversize */
  401. if (skb->len <= 0) {
  402. printk(KERN_WARNING "%s: skb too small\n", __func__);
  403. return -EINVAL;
  404. }
  405. if (skb->len > ch->maxlen) {
  406. printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
  407. __func__, skb->len, ch->maxlen);
  408. return -EINVAL;
  409. }
  410. /* HW lock must be obtained */
  411. /* check for pending next_skb */
  412. if (ch->next_skb) {
  413. printk(KERN_WARNING
  414. "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
  415. __func__, skb->len, ch->next_skb->len);
  416. return -EBUSY;
  417. }
  418. if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
  419. test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
  420. ch->next_skb = skb;
  421. return 0;
  422. } else {
  423. /* write to fifo */
  424. ch->tx_skb = skb;
  425. ch->tx_idx = 0;
  426. confirm_Bsend(ch);
  427. return 1;
  428. }
  429. }
  430. EXPORT_SYMBOL(bchannel_senddata);
  431. /* The function allocates a new receive skb on demand with a size for the
  432. * requirements of the current protocol. It returns the tailroom of the
  433. * receive skb or an error.
  434. */
  435. int
  436. bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
  437. {
  438. int len;
  439. if (bch->rx_skb) {
  440. len = skb_tailroom(bch->rx_skb);
  441. if (len < reqlen) {
  442. pr_warn("B%d no space for %d (only %d) bytes\n",
  443. bch->nr, reqlen, len);
  444. if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
  445. /* send what we have now and try a new buffer */
  446. recv_Bchannel(bch, 0, true);
  447. } else {
  448. /* on HDLC we have to drop too big frames */
  449. return -EMSGSIZE;
  450. }
  451. } else {
  452. return len;
  453. }
  454. }
  455. /* update current min/max length first */
  456. if (unlikely(bch->maxlen != bch->next_maxlen))
  457. bch->maxlen = bch->next_maxlen;
  458. if (unlikely(bch->minlen != bch->next_minlen))
  459. bch->minlen = bch->next_minlen;
  460. if (unlikely(reqlen > bch->maxlen))
  461. return -EMSGSIZE;
  462. if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
  463. if (reqlen >= bch->minlen) {
  464. len = reqlen;
  465. } else {
  466. len = 2 * bch->minlen;
  467. if (len > bch->maxlen)
  468. len = bch->maxlen;
  469. }
  470. } else {
  471. /* with HDLC we do not know the length yet */
  472. len = bch->maxlen;
  473. }
  474. bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
  475. if (!bch->rx_skb) {
  476. pr_warn("B%d receive no memory for %d bytes\n", bch->nr, len);
  477. len = -ENOMEM;
  478. }
  479. return len;
  480. }
  481. EXPORT_SYMBOL(bchannel_get_rxbuf);