layer2.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. *
  4. * Author Karsten Keil <[email protected]>
  5. *
  6. * Copyright 2008 by Karsten Keil <[email protected]>
  7. */
  8. #include <linux/mISDNif.h>
  9. #include <linux/slab.h>
  10. #include "core.h"
  11. #include "fsm.h"
  12. #include "layer2.h"
  13. static u_int *debug;
  14. static
  15. struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
  16. static char *strL2State[] =
  17. {
  18. "ST_L2_1",
  19. "ST_L2_2",
  20. "ST_L2_3",
  21. "ST_L2_4",
  22. "ST_L2_5",
  23. "ST_L2_6",
  24. "ST_L2_7",
  25. "ST_L2_8",
  26. };
  27. enum {
  28. EV_L2_UI,
  29. EV_L2_SABME,
  30. EV_L2_DISC,
  31. EV_L2_DM,
  32. EV_L2_UA,
  33. EV_L2_FRMR,
  34. EV_L2_SUPER,
  35. EV_L2_I,
  36. EV_L2_DL_DATA,
  37. EV_L2_ACK_PULL,
  38. EV_L2_DL_UNITDATA,
  39. EV_L2_DL_ESTABLISH_REQ,
  40. EV_L2_DL_RELEASE_REQ,
  41. EV_L2_MDL_ASSIGN,
  42. EV_L2_MDL_REMOVE,
  43. EV_L2_MDL_ERROR,
  44. EV_L1_DEACTIVATE,
  45. EV_L2_T200,
  46. EV_L2_T203,
  47. EV_L2_T200I,
  48. EV_L2_T203I,
  49. EV_L2_SET_OWN_BUSY,
  50. EV_L2_CLEAR_OWN_BUSY,
  51. EV_L2_FRAME_ERROR,
  52. };
  53. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1)
  54. static char *strL2Event[] =
  55. {
  56. "EV_L2_UI",
  57. "EV_L2_SABME",
  58. "EV_L2_DISC",
  59. "EV_L2_DM",
  60. "EV_L2_UA",
  61. "EV_L2_FRMR",
  62. "EV_L2_SUPER",
  63. "EV_L2_I",
  64. "EV_L2_DL_DATA",
  65. "EV_L2_ACK_PULL",
  66. "EV_L2_DL_UNITDATA",
  67. "EV_L2_DL_ESTABLISH_REQ",
  68. "EV_L2_DL_RELEASE_REQ",
  69. "EV_L2_MDL_ASSIGN",
  70. "EV_L2_MDL_REMOVE",
  71. "EV_L2_MDL_ERROR",
  72. "EV_L1_DEACTIVATE",
  73. "EV_L2_T200",
  74. "EV_L2_T203",
  75. "EV_L2_T200I",
  76. "EV_L2_T203I",
  77. "EV_L2_SET_OWN_BUSY",
  78. "EV_L2_CLEAR_OWN_BUSY",
  79. "EV_L2_FRAME_ERROR",
  80. };
  81. static void
  82. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  83. {
  84. struct layer2 *l2 = fi->userdata;
  85. struct va_format vaf;
  86. va_list va;
  87. if (!(*debug & DEBUG_L2_FSM))
  88. return;
  89. va_start(va, fmt);
  90. vaf.fmt = fmt;
  91. vaf.va = &va;
  92. printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n",
  93. mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf);
  94. va_end(va);
  95. }
  96. inline u_int
  97. l2headersize(struct layer2 *l2, int ui)
  98. {
  99. return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  100. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  101. }
  102. inline u_int
  103. l2addrsize(struct layer2 *l2)
  104. {
  105. return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
  106. }
  107. static u_int
  108. l2_newid(struct layer2 *l2)
  109. {
  110. u_int id;
  111. id = l2->next_id++;
  112. if (id == 0x7fff)
  113. l2->next_id = 1;
  114. id <<= 16;
  115. id |= l2->tei << 8;
  116. id |= l2->sapi;
  117. return id;
  118. }
  119. static void
  120. l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
  121. {
  122. int err;
  123. if (!l2->up)
  124. return;
  125. mISDN_HEAD_PRIM(skb) = prim;
  126. mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
  127. err = l2->up->send(l2->up, skb);
  128. if (err) {
  129. printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
  130. mISDNDevName4ch(&l2->ch), err);
  131. dev_kfree_skb(skb);
  132. }
  133. }
  134. static void
  135. l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
  136. {
  137. struct sk_buff *skb;
  138. struct mISDNhead *hh;
  139. int err;
  140. if (!l2->up)
  141. return;
  142. skb = mI_alloc_skb(len, GFP_ATOMIC);
  143. if (!skb)
  144. return;
  145. hh = mISDN_HEAD_P(skb);
  146. hh->prim = prim;
  147. hh->id = (l2->ch.nr << 16) | l2->ch.addr;
  148. if (len)
  149. skb_put_data(skb, arg, len);
  150. err = l2->up->send(l2->up, skb);
  151. if (err) {
  152. printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
  153. mISDNDevName4ch(&l2->ch), err);
  154. dev_kfree_skb(skb);
  155. }
  156. }
  157. static int
  158. l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
  159. int ret;
  160. ret = l2->ch.recv(l2->ch.peer, skb);
  161. if (ret && (*debug & DEBUG_L2_RECV))
  162. printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n",
  163. mISDNDevName4ch(&l2->ch), ret);
  164. return ret;
  165. }
  166. static int
  167. l2down_raw(struct layer2 *l2, struct sk_buff *skb)
  168. {
  169. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  170. if (hh->prim == PH_DATA_REQ) {
  171. if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  172. skb_queue_tail(&l2->down_queue, skb);
  173. return 0;
  174. }
  175. l2->down_id = mISDN_HEAD_ID(skb);
  176. }
  177. return l2down_skb(l2, skb);
  178. }
  179. static int
  180. l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
  181. {
  182. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  183. hh->prim = prim;
  184. hh->id = id;
  185. return l2down_raw(l2, skb);
  186. }
  187. static int
  188. l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
  189. {
  190. struct sk_buff *skb;
  191. int err;
  192. struct mISDNhead *hh;
  193. skb = mI_alloc_skb(len, GFP_ATOMIC);
  194. if (!skb)
  195. return -ENOMEM;
  196. hh = mISDN_HEAD_P(skb);
  197. hh->prim = prim;
  198. hh->id = id;
  199. if (len)
  200. skb_put_data(skb, arg, len);
  201. err = l2down_raw(l2, skb);
  202. if (err)
  203. dev_kfree_skb(skb);
  204. return err;
  205. }
  206. static int
  207. ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
  208. struct sk_buff *nskb = skb;
  209. int ret = -EAGAIN;
  210. if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
  211. if (hh->id == l2->down_id) {
  212. nskb = skb_dequeue(&l2->down_queue);
  213. if (nskb) {
  214. l2->down_id = mISDN_HEAD_ID(nskb);
  215. if (l2down_skb(l2, nskb)) {
  216. dev_kfree_skb(nskb);
  217. l2->down_id = MISDN_ID_NONE;
  218. }
  219. } else
  220. l2->down_id = MISDN_ID_NONE;
  221. if (ret) {
  222. dev_kfree_skb(skb);
  223. ret = 0;
  224. }
  225. if (l2->down_id == MISDN_ID_NONE) {
  226. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  227. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  228. }
  229. }
  230. }
  231. if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  232. nskb = skb_dequeue(&l2->down_queue);
  233. if (nskb) {
  234. l2->down_id = mISDN_HEAD_ID(nskb);
  235. if (l2down_skb(l2, nskb)) {
  236. dev_kfree_skb(nskb);
  237. l2->down_id = MISDN_ID_NONE;
  238. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  239. }
  240. } else
  241. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  242. }
  243. return ret;
  244. }
  245. static void
  246. l2_timeout(struct FsmInst *fi, int event, void *arg)
  247. {
  248. struct layer2 *l2 = fi->userdata;
  249. struct sk_buff *skb;
  250. struct mISDNhead *hh;
  251. skb = mI_alloc_skb(0, GFP_ATOMIC);
  252. if (!skb) {
  253. printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n",
  254. mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
  255. l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
  256. return;
  257. }
  258. hh = mISDN_HEAD_P(skb);
  259. hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
  260. hh->id = l2->ch.nr;
  261. if (*debug & DEBUG_TIMER)
  262. printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n",
  263. mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
  264. l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
  265. if (l2->ch.st)
  266. l2->ch.st->own.recv(&l2->ch.st->own, skb);
  267. }
  268. static int
  269. l2mgr(struct layer2 *l2, u_int prim, void *arg) {
  270. long c = (long)arg;
  271. printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n",
  272. mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c);
  273. if (test_bit(FLG_LAPD, &l2->flag) &&
  274. !test_bit(FLG_FIXED_TEI, &l2->flag)) {
  275. switch (c) {
  276. case 'C':
  277. case 'D':
  278. case 'G':
  279. case 'H':
  280. l2_tei(l2, prim, (u_long)arg);
  281. break;
  282. }
  283. }
  284. return 0;
  285. }
  286. static void
  287. set_peer_busy(struct layer2 *l2) {
  288. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  289. if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
  290. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  291. }
  292. static void
  293. clear_peer_busy(struct layer2 *l2) {
  294. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  295. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  296. }
  297. static void
  298. InitWin(struct layer2 *l2)
  299. {
  300. int i;
  301. for (i = 0; i < MAX_WINDOW; i++)
  302. l2->windowar[i] = NULL;
  303. }
  304. static int
  305. freewin(struct layer2 *l2)
  306. {
  307. int i, cnt = 0;
  308. for (i = 0; i < MAX_WINDOW; i++) {
  309. if (l2->windowar[i]) {
  310. cnt++;
  311. dev_kfree_skb(l2->windowar[i]);
  312. l2->windowar[i] = NULL;
  313. }
  314. }
  315. return cnt;
  316. }
  317. static void
  318. ReleaseWin(struct layer2 *l2)
  319. {
  320. int cnt = freewin(l2);
  321. if (cnt)
  322. printk(KERN_WARNING
  323. "isdnl2 freed %d skbuffs in release\n", cnt);
  324. }
  325. inline unsigned int
  326. cansend(struct layer2 *l2)
  327. {
  328. unsigned int p1;
  329. if (test_bit(FLG_MOD128, &l2->flag))
  330. p1 = (l2->vs - l2->va) % 128;
  331. else
  332. p1 = (l2->vs - l2->va) % 8;
  333. return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
  334. }
  335. inline void
  336. clear_exception(struct layer2 *l2)
  337. {
  338. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  339. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  340. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  341. clear_peer_busy(l2);
  342. }
  343. static int
  344. sethdraddr(struct layer2 *l2, u_char *header, int rsp)
  345. {
  346. u_char *ptr = header;
  347. int crbit = rsp;
  348. if (test_bit(FLG_LAPD, &l2->flag)) {
  349. if (test_bit(FLG_LAPD_NET, &l2->flag))
  350. crbit = !crbit;
  351. *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
  352. *ptr++ = (l2->tei << 1) | 1;
  353. return 2;
  354. } else {
  355. if (test_bit(FLG_ORIG, &l2->flag))
  356. crbit = !crbit;
  357. if (crbit)
  358. *ptr++ = l2->addr.B;
  359. else
  360. *ptr++ = l2->addr.A;
  361. return 1;
  362. }
  363. }
  364. static inline void
  365. enqueue_super(struct layer2 *l2, struct sk_buff *skb)
  366. {
  367. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  368. dev_kfree_skb(skb);
  369. }
  370. static inline void
  371. enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
  372. {
  373. if (l2->tm)
  374. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  375. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  376. dev_kfree_skb(skb);
  377. }
  378. inline int
  379. IsUI(u_char *data)
  380. {
  381. return (data[0] & 0xef) == UI;
  382. }
  383. inline int
  384. IsUA(u_char *data)
  385. {
  386. return (data[0] & 0xef) == UA;
  387. }
  388. inline int
  389. IsDM(u_char *data)
  390. {
  391. return (data[0] & 0xef) == DM;
  392. }
  393. inline int
  394. IsDISC(u_char *data)
  395. {
  396. return (data[0] & 0xef) == DISC;
  397. }
  398. inline int
  399. IsRR(u_char *data, struct layer2 *l2)
  400. {
  401. if (test_bit(FLG_MOD128, &l2->flag))
  402. return data[0] == RR;
  403. else
  404. return (data[0] & 0xf) == 1;
  405. }
  406. inline int
  407. IsSFrame(u_char *data, struct layer2 *l2)
  408. {
  409. register u_char d = *data;
  410. if (!test_bit(FLG_MOD128, &l2->flag))
  411. d &= 0xf;
  412. return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
  413. }
  414. inline int
  415. IsSABME(u_char *data, struct layer2 *l2)
  416. {
  417. u_char d = data[0] & ~0x10;
  418. return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
  419. }
  420. inline int
  421. IsREJ(u_char *data, struct layer2 *l2)
  422. {
  423. return test_bit(FLG_MOD128, &l2->flag) ?
  424. data[0] == REJ : (data[0] & 0xf) == REJ;
  425. }
  426. inline int
  427. IsFRMR(u_char *data)
  428. {
  429. return (data[0] & 0xef) == FRMR;
  430. }
  431. inline int
  432. IsRNR(u_char *data, struct layer2 *l2)
  433. {
  434. return test_bit(FLG_MOD128, &l2->flag) ?
  435. data[0] == RNR : (data[0] & 0xf) == RNR;
  436. }
  437. static int
  438. iframe_error(struct layer2 *l2, struct sk_buff *skb)
  439. {
  440. u_int i;
  441. int rsp = *skb->data & 0x2;
  442. i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
  443. if (test_bit(FLG_ORIG, &l2->flag))
  444. rsp = !rsp;
  445. if (rsp)
  446. return 'L';
  447. if (skb->len < i)
  448. return 'N';
  449. if ((skb->len - i) > l2->maxlen)
  450. return 'O';
  451. return 0;
  452. }
  453. static int
  454. super_error(struct layer2 *l2, struct sk_buff *skb)
  455. {
  456. if (skb->len != l2addrsize(l2) +
  457. (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
  458. return 'N';
  459. return 0;
  460. }
  461. static int
  462. unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
  463. {
  464. int rsp = (*skb->data & 0x2) >> 1;
  465. if (test_bit(FLG_ORIG, &l2->flag))
  466. rsp = !rsp;
  467. if (rsp != wantrsp)
  468. return 'L';
  469. if (skb->len != l2addrsize(l2) + 1)
  470. return 'N';
  471. return 0;
  472. }
  473. static int
  474. UI_error(struct layer2 *l2, struct sk_buff *skb)
  475. {
  476. int rsp = *skb->data & 0x2;
  477. if (test_bit(FLG_ORIG, &l2->flag))
  478. rsp = !rsp;
  479. if (rsp)
  480. return 'L';
  481. if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
  482. return 'O';
  483. return 0;
  484. }
  485. static int
  486. FRMR_error(struct layer2 *l2, struct sk_buff *skb)
  487. {
  488. u_int headers = l2addrsize(l2) + 1;
  489. u_char *datap = skb->data + headers;
  490. int rsp = *skb->data & 0x2;
  491. if (test_bit(FLG_ORIG, &l2->flag))
  492. rsp = !rsp;
  493. if (!rsp)
  494. return 'L';
  495. if (test_bit(FLG_MOD128, &l2->flag)) {
  496. if (skb->len < headers + 5)
  497. return 'N';
  498. else if (*debug & DEBUG_L2)
  499. l2m_debug(&l2->l2m,
  500. "FRMR information %2x %2x %2x %2x %2x",
  501. datap[0], datap[1], datap[2], datap[3], datap[4]);
  502. } else {
  503. if (skb->len < headers + 3)
  504. return 'N';
  505. else if (*debug & DEBUG_L2)
  506. l2m_debug(&l2->l2m,
  507. "FRMR information %2x %2x %2x",
  508. datap[0], datap[1], datap[2]);
  509. }
  510. return 0;
  511. }
  512. static unsigned int
  513. legalnr(struct layer2 *l2, unsigned int nr)
  514. {
  515. if (test_bit(FLG_MOD128, &l2->flag))
  516. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  517. else
  518. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  519. }
  520. static void
  521. setva(struct layer2 *l2, unsigned int nr)
  522. {
  523. struct sk_buff *skb;
  524. while (l2->va != nr) {
  525. l2->va++;
  526. if (test_bit(FLG_MOD128, &l2->flag))
  527. l2->va %= 128;
  528. else
  529. l2->va %= 8;
  530. if (l2->windowar[l2->sow]) {
  531. skb_trim(l2->windowar[l2->sow], 0);
  532. skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
  533. l2->windowar[l2->sow] = NULL;
  534. }
  535. l2->sow = (l2->sow + 1) % l2->window;
  536. }
  537. skb = skb_dequeue(&l2->tmp_queue);
  538. while (skb) {
  539. dev_kfree_skb(skb);
  540. skb = skb_dequeue(&l2->tmp_queue);
  541. }
  542. }
  543. static void
  544. send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
  545. {
  546. u_char tmp[MAX_L2HEADER_LEN];
  547. int i;
  548. i = sethdraddr(l2, tmp, cr);
  549. tmp[i++] = cmd;
  550. if (skb)
  551. skb_trim(skb, 0);
  552. else {
  553. skb = mI_alloc_skb(i, GFP_ATOMIC);
  554. if (!skb) {
  555. printk(KERN_WARNING "%s: can't alloc skbuff in %s\n",
  556. mISDNDevName4ch(&l2->ch), __func__);
  557. return;
  558. }
  559. }
  560. skb_put_data(skb, tmp, i);
  561. enqueue_super(l2, skb);
  562. }
  563. inline u_char
  564. get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
  565. {
  566. return skb->data[l2addrsize(l2)] & 0x10;
  567. }
  568. inline u_char
  569. get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
  570. {
  571. u_char PF;
  572. PF = get_PollFlag(l2, skb);
  573. dev_kfree_skb(skb);
  574. return PF;
  575. }
  576. inline void
  577. start_t200(struct layer2 *l2, int i)
  578. {
  579. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  580. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  581. }
  582. inline void
  583. restart_t200(struct layer2 *l2, int i)
  584. {
  585. mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  586. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  587. }
  588. inline void
  589. stop_t200(struct layer2 *l2, int i)
  590. {
  591. if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
  592. mISDN_FsmDelTimer(&l2->t200, i);
  593. }
  594. inline void
  595. st5_dl_release_l2l3(struct layer2 *l2)
  596. {
  597. int pr;
  598. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  599. pr = DL_RELEASE_CNF;
  600. else
  601. pr = DL_RELEASE_IND;
  602. l2up_create(l2, pr, 0, NULL);
  603. }
  604. inline void
  605. lapb_dl_release_l2l3(struct layer2 *l2, int f)
  606. {
  607. if (test_bit(FLG_LAPB, &l2->flag))
  608. l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
  609. l2up_create(l2, f, 0, NULL);
  610. }
  611. static void
  612. establishlink(struct FsmInst *fi)
  613. {
  614. struct layer2 *l2 = fi->userdata;
  615. u_char cmd;
  616. clear_exception(l2);
  617. l2->rc = 0;
  618. cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
  619. send_uframe(l2, NULL, cmd, CMD);
  620. mISDN_FsmDelTimer(&l2->t203, 1);
  621. restart_t200(l2, 1);
  622. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  623. freewin(l2);
  624. mISDN_FsmChangeState(fi, ST_L2_5);
  625. }
  626. static void
  627. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  628. {
  629. struct sk_buff *skb = arg;
  630. struct layer2 *l2 = fi->userdata;
  631. if (get_PollFlagFree(l2, skb))
  632. l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
  633. else
  634. l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
  635. }
  636. static void
  637. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  638. {
  639. struct sk_buff *skb = arg;
  640. struct layer2 *l2 = fi->userdata;
  641. if (get_PollFlagFree(l2, skb))
  642. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  643. else {
  644. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  645. establishlink(fi);
  646. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  647. }
  648. }
  649. static void
  650. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  651. {
  652. struct sk_buff *skb = arg;
  653. struct layer2 *l2 = fi->userdata;
  654. if (get_PollFlagFree(l2, skb))
  655. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  656. else
  657. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  658. establishlink(fi);
  659. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  660. }
  661. static void
  662. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  663. {
  664. dev_kfree_skb((struct sk_buff *)arg);
  665. mISDN_FsmChangeState(fi, ST_L2_3);
  666. }
  667. static void
  668. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  669. {
  670. struct layer2 *l2 = fi->userdata;
  671. mISDN_FsmChangeState(fi, ST_L2_3);
  672. dev_kfree_skb((struct sk_buff *)arg);
  673. l2_tei(l2, MDL_ASSIGN_IND, 0);
  674. }
  675. static void
  676. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  677. {
  678. struct layer2 *l2 = fi->userdata;
  679. struct sk_buff *skb = arg;
  680. skb_queue_tail(&l2->ui_queue, skb);
  681. mISDN_FsmChangeState(fi, ST_L2_2);
  682. l2_tei(l2, MDL_ASSIGN_IND, 0);
  683. }
  684. static void
  685. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  686. {
  687. struct layer2 *l2 = fi->userdata;
  688. struct sk_buff *skb = arg;
  689. skb_queue_tail(&l2->ui_queue, skb);
  690. }
  691. static void
  692. tx_ui(struct layer2 *l2)
  693. {
  694. struct sk_buff *skb;
  695. u_char header[MAX_L2HEADER_LEN];
  696. int i;
  697. i = sethdraddr(l2, header, CMD);
  698. if (test_bit(FLG_LAPD_NET, &l2->flag))
  699. header[1] = 0xff; /* tei 127 */
  700. header[i++] = UI;
  701. while ((skb = skb_dequeue(&l2->ui_queue))) {
  702. memcpy(skb_push(skb, i), header, i);
  703. enqueue_ui(l2, skb);
  704. }
  705. }
  706. static void
  707. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  708. {
  709. struct layer2 *l2 = fi->userdata;
  710. struct sk_buff *skb = arg;
  711. skb_queue_tail(&l2->ui_queue, skb);
  712. tx_ui(l2);
  713. }
  714. static void
  715. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  716. {
  717. struct layer2 *l2 = fi->userdata;
  718. struct sk_buff *skb = arg;
  719. skb_pull(skb, l2headersize(l2, 1));
  720. /*
  721. * in states 1-3 for broadcast
  722. */
  723. if (l2->tm)
  724. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  725. l2up(l2, DL_UNITDATA_IND, skb);
  726. }
  727. static void
  728. l2_establish(struct FsmInst *fi, int event, void *arg)
  729. {
  730. struct sk_buff *skb = arg;
  731. struct layer2 *l2 = fi->userdata;
  732. establishlink(fi);
  733. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  734. dev_kfree_skb(skb);
  735. }
  736. static void
  737. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  738. {
  739. struct sk_buff *skb = arg;
  740. struct layer2 *l2 = fi->userdata;
  741. skb_queue_purge(&l2->i_queue);
  742. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  743. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  744. dev_kfree_skb(skb);
  745. }
  746. static void
  747. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  748. {
  749. struct sk_buff *skb = arg;
  750. struct layer2 *l2 = fi->userdata;
  751. skb_queue_purge(&l2->i_queue);
  752. establishlink(fi);
  753. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  754. dev_kfree_skb(skb);
  755. }
  756. static void
  757. l2_release(struct FsmInst *fi, int event, void *arg)
  758. {
  759. struct layer2 *l2 = fi->userdata;
  760. struct sk_buff *skb = arg;
  761. skb_trim(skb, 0);
  762. l2up(l2, DL_RELEASE_CNF, skb);
  763. }
  764. static void
  765. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  766. {
  767. struct sk_buff *skb = arg;
  768. struct layer2 *l2 = fi->userdata;
  769. test_and_set_bit(FLG_PEND_REL, &l2->flag);
  770. dev_kfree_skb(skb);
  771. }
  772. static void
  773. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  774. {
  775. struct layer2 *l2 = fi->userdata;
  776. struct sk_buff *skb = arg;
  777. skb_queue_purge(&l2->i_queue);
  778. freewin(l2);
  779. mISDN_FsmChangeState(fi, ST_L2_6);
  780. l2->rc = 0;
  781. send_uframe(l2, NULL, DISC | 0x10, CMD);
  782. mISDN_FsmDelTimer(&l2->t203, 1);
  783. restart_t200(l2, 2);
  784. dev_kfree_skb(skb);
  785. }
  786. static void
  787. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  788. {
  789. struct layer2 *l2 = fi->userdata;
  790. struct sk_buff *skb = arg;
  791. l2->vs = 0;
  792. l2->va = 0;
  793. l2->vr = 0;
  794. l2->sow = 0;
  795. clear_exception(l2);
  796. send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
  797. mISDN_FsmChangeState(fi, ST_L2_7);
  798. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  799. skb_trim(skb, 0);
  800. l2up(l2, DL_ESTABLISH_IND, skb);
  801. if (l2->tm)
  802. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  803. }
  804. static void
  805. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  806. {
  807. struct layer2 *l2 = fi->userdata;
  808. struct sk_buff *skb = arg;
  809. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  810. }
  811. static void
  812. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  813. {
  814. struct layer2 *l2 = fi->userdata;
  815. struct sk_buff *skb = arg;
  816. send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
  817. }
  818. static void
  819. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  820. {
  821. struct layer2 *l2 = fi->userdata;
  822. struct sk_buff *skb = arg;
  823. int est = 0;
  824. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  825. l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
  826. if (l2->vs != l2->va) {
  827. skb_queue_purge(&l2->i_queue);
  828. est = 1;
  829. }
  830. clear_exception(l2);
  831. l2->vs = 0;
  832. l2->va = 0;
  833. l2->vr = 0;
  834. l2->sow = 0;
  835. mISDN_FsmChangeState(fi, ST_L2_7);
  836. stop_t200(l2, 3);
  837. mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  838. if (est)
  839. l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
  840. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  841. * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
  842. * 0, NULL, 0);
  843. */
  844. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  845. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  846. }
  847. static void
  848. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  849. {
  850. struct layer2 *l2 = fi->userdata;
  851. struct sk_buff *skb = arg;
  852. mISDN_FsmChangeState(fi, ST_L2_4);
  853. mISDN_FsmDelTimer(&l2->t203, 3);
  854. stop_t200(l2, 4);
  855. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  856. skb_queue_purge(&l2->i_queue);
  857. freewin(l2);
  858. lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
  859. if (l2->tm)
  860. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  861. }
  862. static void
  863. l2_connected(struct FsmInst *fi, int event, void *arg)
  864. {
  865. struct layer2 *l2 = fi->userdata;
  866. struct sk_buff *skb = arg;
  867. int pr = -1;
  868. if (!get_PollFlag(l2, skb)) {
  869. l2_mdl_error_ua(fi, event, arg);
  870. return;
  871. }
  872. dev_kfree_skb(skb);
  873. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  874. l2_disconnect(fi, event, NULL);
  875. if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
  876. pr = DL_ESTABLISH_CNF;
  877. } else if (l2->vs != l2->va) {
  878. skb_queue_purge(&l2->i_queue);
  879. pr = DL_ESTABLISH_IND;
  880. }
  881. stop_t200(l2, 5);
  882. l2->vr = 0;
  883. l2->vs = 0;
  884. l2->va = 0;
  885. l2->sow = 0;
  886. mISDN_FsmChangeState(fi, ST_L2_7);
  887. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
  888. if (pr != -1)
  889. l2up_create(l2, pr, 0, NULL);
  890. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  891. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  892. if (l2->tm)
  893. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  894. }
  895. static void
  896. l2_released(struct FsmInst *fi, int event, void *arg)
  897. {
  898. struct layer2 *l2 = fi->userdata;
  899. struct sk_buff *skb = arg;
  900. if (!get_PollFlag(l2, skb)) {
  901. l2_mdl_error_ua(fi, event, arg);
  902. return;
  903. }
  904. dev_kfree_skb(skb);
  905. stop_t200(l2, 6);
  906. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  907. mISDN_FsmChangeState(fi, ST_L2_4);
  908. if (l2->tm)
  909. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  910. }
  911. static void
  912. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  913. {
  914. struct layer2 *l2 = fi->userdata;
  915. struct sk_buff *skb = arg;
  916. if (!get_PollFlagFree(l2, skb)) {
  917. establishlink(fi);
  918. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  919. }
  920. }
  921. static void
  922. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  923. {
  924. struct layer2 *l2 = fi->userdata;
  925. struct sk_buff *skb = arg;
  926. if (get_PollFlagFree(l2, skb)) {
  927. stop_t200(l2, 7);
  928. if (!test_bit(FLG_L3_INIT, &l2->flag))
  929. skb_queue_purge(&l2->i_queue);
  930. if (test_bit(FLG_LAPB, &l2->flag))
  931. l2down_create(l2, PH_DEACTIVATE_REQ,
  932. l2_newid(l2), 0, NULL);
  933. st5_dl_release_l2l3(l2);
  934. mISDN_FsmChangeState(fi, ST_L2_4);
  935. if (l2->tm)
  936. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  937. }
  938. }
  939. static void
  940. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  941. {
  942. struct layer2 *l2 = fi->userdata;
  943. struct sk_buff *skb = arg;
  944. if (get_PollFlagFree(l2, skb)) {
  945. stop_t200(l2, 8);
  946. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  947. mISDN_FsmChangeState(fi, ST_L2_4);
  948. if (l2->tm)
  949. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  950. }
  951. }
  952. static void
  953. enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
  954. {
  955. struct sk_buff *skb;
  956. u_char tmp[MAX_L2HEADER_LEN];
  957. int i;
  958. i = sethdraddr(l2, tmp, cr);
  959. if (test_bit(FLG_MOD128, &l2->flag)) {
  960. tmp[i++] = typ;
  961. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  962. } else
  963. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  964. skb = mI_alloc_skb(i, GFP_ATOMIC);
  965. if (!skb) {
  966. printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n",
  967. mISDNDevName4ch(&l2->ch), __func__);
  968. return;
  969. }
  970. skb_put_data(skb, tmp, i);
  971. enqueue_super(l2, skb);
  972. }
  973. inline void
  974. enquiry_response(struct layer2 *l2)
  975. {
  976. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  977. enquiry_cr(l2, RNR, RSP, 1);
  978. else
  979. enquiry_cr(l2, RR, RSP, 1);
  980. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  981. }
  982. inline void
  983. transmit_enquiry(struct layer2 *l2)
  984. {
  985. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  986. enquiry_cr(l2, RNR, CMD, 1);
  987. else
  988. enquiry_cr(l2, RR, CMD, 1);
  989. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  990. start_t200(l2, 9);
  991. }
  992. static void
  993. nrerrorrecovery(struct FsmInst *fi)
  994. {
  995. struct layer2 *l2 = fi->userdata;
  996. l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
  997. establishlink(fi);
  998. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  999. }
  1000. static void
  1001. invoke_retransmission(struct layer2 *l2, unsigned int nr)
  1002. {
  1003. u_int p1;
  1004. if (l2->vs != nr) {
  1005. while (l2->vs != nr) {
  1006. (l2->vs)--;
  1007. if (test_bit(FLG_MOD128, &l2->flag)) {
  1008. l2->vs %= 128;
  1009. p1 = (l2->vs - l2->va) % 128;
  1010. } else {
  1011. l2->vs %= 8;
  1012. p1 = (l2->vs - l2->va) % 8;
  1013. }
  1014. p1 = (p1 + l2->sow) % l2->window;
  1015. if (l2->windowar[p1])
  1016. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  1017. else
  1018. printk(KERN_WARNING
  1019. "%s: windowar[%d] is NULL\n",
  1020. mISDNDevName4ch(&l2->ch), p1);
  1021. l2->windowar[p1] = NULL;
  1022. }
  1023. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  1024. }
  1025. }
  1026. static void
  1027. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  1028. {
  1029. struct layer2 *l2 = fi->userdata;
  1030. struct sk_buff *skb = arg;
  1031. int PollFlag, rsp, typ = RR;
  1032. unsigned int nr;
  1033. rsp = *skb->data & 0x2;
  1034. if (test_bit(FLG_ORIG, &l2->flag))
  1035. rsp = !rsp;
  1036. skb_pull(skb, l2addrsize(l2));
  1037. if (IsRNR(skb->data, l2)) {
  1038. set_peer_busy(l2);
  1039. typ = RNR;
  1040. } else
  1041. clear_peer_busy(l2);
  1042. if (IsREJ(skb->data, l2))
  1043. typ = REJ;
  1044. if (test_bit(FLG_MOD128, &l2->flag)) {
  1045. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1046. nr = skb->data[1] >> 1;
  1047. } else {
  1048. PollFlag = (skb->data[0] & 0x10);
  1049. nr = (skb->data[0] >> 5) & 0x7;
  1050. }
  1051. dev_kfree_skb(skb);
  1052. if (PollFlag) {
  1053. if (rsp)
  1054. l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
  1055. else
  1056. enquiry_response(l2);
  1057. }
  1058. if (legalnr(l2, nr)) {
  1059. if (typ == REJ) {
  1060. setva(l2, nr);
  1061. invoke_retransmission(l2, nr);
  1062. stop_t200(l2, 10);
  1063. if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1064. EV_L2_T203, NULL, 6))
  1065. l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
  1066. } else if ((nr == l2->vs) && (typ == RR)) {
  1067. setva(l2, nr);
  1068. stop_t200(l2, 11);
  1069. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1070. EV_L2_T203, NULL, 7);
  1071. } else if ((l2->va != nr) || (typ == RNR)) {
  1072. setva(l2, nr);
  1073. if (typ != RR)
  1074. mISDN_FsmDelTimer(&l2->t203, 9);
  1075. restart_t200(l2, 12);
  1076. }
  1077. if (skb_queue_len(&l2->i_queue) && (typ == RR))
  1078. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1079. } else
  1080. nrerrorrecovery(fi);
  1081. }
  1082. static void
  1083. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  1084. {
  1085. struct layer2 *l2 = fi->userdata;
  1086. struct sk_buff *skb = arg;
  1087. if (!test_bit(FLG_L3_INIT, &l2->flag))
  1088. skb_queue_tail(&l2->i_queue, skb);
  1089. else
  1090. dev_kfree_skb(skb);
  1091. }
  1092. static void
  1093. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  1094. {
  1095. struct layer2 *l2 = fi->userdata;
  1096. struct sk_buff *skb = arg;
  1097. skb_queue_tail(&l2->i_queue, skb);
  1098. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1099. }
  1100. static void
  1101. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  1102. {
  1103. struct layer2 *l2 = fi->userdata;
  1104. struct sk_buff *skb = arg;
  1105. skb_queue_tail(&l2->i_queue, skb);
  1106. }
  1107. static void
  1108. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  1109. {
  1110. struct layer2 *l2 = fi->userdata;
  1111. struct sk_buff *skb = arg;
  1112. int PollFlag, i;
  1113. u_int ns, nr;
  1114. i = l2addrsize(l2);
  1115. if (test_bit(FLG_MOD128, &l2->flag)) {
  1116. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  1117. ns = skb->data[i] >> 1;
  1118. nr = (skb->data[i + 1] >> 1) & 0x7f;
  1119. } else {
  1120. PollFlag = (skb->data[i] & 0x10);
  1121. ns = (skb->data[i] >> 1) & 0x7;
  1122. nr = (skb->data[i] >> 5) & 0x7;
  1123. }
  1124. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  1125. dev_kfree_skb(skb);
  1126. if (PollFlag)
  1127. enquiry_response(l2);
  1128. } else {
  1129. if (l2->vr == ns) {
  1130. l2->vr++;
  1131. if (test_bit(FLG_MOD128, &l2->flag))
  1132. l2->vr %= 128;
  1133. else
  1134. l2->vr %= 8;
  1135. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  1136. if (PollFlag)
  1137. enquiry_response(l2);
  1138. else
  1139. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  1140. skb_pull(skb, l2headersize(l2, 0));
  1141. l2up(l2, DL_DATA_IND, skb);
  1142. } else {
  1143. /* n(s)!=v(r) */
  1144. dev_kfree_skb(skb);
  1145. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  1146. if (PollFlag)
  1147. enquiry_response(l2);
  1148. } else {
  1149. enquiry_cr(l2, REJ, RSP, PollFlag);
  1150. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1151. }
  1152. }
  1153. }
  1154. if (legalnr(l2, nr)) {
  1155. if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
  1156. (fi->state == ST_L2_7)) {
  1157. if (nr == l2->vs) {
  1158. stop_t200(l2, 13);
  1159. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1160. EV_L2_T203, NULL, 7);
  1161. } else if (nr != l2->va)
  1162. restart_t200(l2, 14);
  1163. }
  1164. setva(l2, nr);
  1165. } else {
  1166. nrerrorrecovery(fi);
  1167. return;
  1168. }
  1169. if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
  1170. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1171. if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
  1172. enquiry_cr(l2, RR, RSP, 0);
  1173. }
  1174. static void
  1175. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  1176. {
  1177. struct layer2 *l2 = fi->userdata;
  1178. u_int info;
  1179. l2->tei = (signed char)(long)arg;
  1180. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1181. info = DL_INFO_L2_CONNECT;
  1182. l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
  1183. if (fi->state == ST_L2_3) {
  1184. establishlink(fi);
  1185. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  1186. } else
  1187. mISDN_FsmChangeState(fi, ST_L2_4);
  1188. if (skb_queue_len(&l2->ui_queue))
  1189. tx_ui(l2);
  1190. }
  1191. static void
  1192. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  1193. {
  1194. struct layer2 *l2 = fi->userdata;
  1195. if (test_bit(FLG_LAPD, &l2->flag) &&
  1196. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1197. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1198. } else if (l2->rc == l2->N200) {
  1199. mISDN_FsmChangeState(fi, ST_L2_4);
  1200. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1201. skb_queue_purge(&l2->i_queue);
  1202. l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
  1203. if (test_bit(FLG_LAPB, &l2->flag))
  1204. l2down_create(l2, PH_DEACTIVATE_REQ,
  1205. l2_newid(l2), 0, NULL);
  1206. st5_dl_release_l2l3(l2);
  1207. if (l2->tm)
  1208. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1209. } else {
  1210. l2->rc++;
  1211. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1212. send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
  1213. SABME : SABM) | 0x10, CMD);
  1214. }
  1215. }
  1216. static void
  1217. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  1218. {
  1219. struct layer2 *l2 = fi->userdata;
  1220. if (test_bit(FLG_LAPD, &l2->flag) &&
  1221. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1222. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1223. } else if (l2->rc == l2->N200) {
  1224. mISDN_FsmChangeState(fi, ST_L2_4);
  1225. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1226. l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
  1227. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  1228. if (l2->tm)
  1229. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1230. } else {
  1231. l2->rc++;
  1232. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
  1233. NULL, 9);
  1234. send_uframe(l2, NULL, DISC | 0x10, CMD);
  1235. }
  1236. }
  1237. static void
  1238. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1239. {
  1240. struct layer2 *l2 = fi->userdata;
  1241. if (test_bit(FLG_LAPD, &l2->flag) &&
  1242. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1243. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1244. return;
  1245. }
  1246. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1247. l2->rc = 0;
  1248. mISDN_FsmChangeState(fi, ST_L2_8);
  1249. transmit_enquiry(l2);
  1250. l2->rc++;
  1251. }
  1252. static void
  1253. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1254. {
  1255. struct layer2 *l2 = fi->userdata;
  1256. if (test_bit(FLG_LAPD, &l2->flag) &&
  1257. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1258. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1259. return;
  1260. }
  1261. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1262. if (l2->rc == l2->N200) {
  1263. l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
  1264. establishlink(fi);
  1265. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1266. } else {
  1267. transmit_enquiry(l2);
  1268. l2->rc++;
  1269. }
  1270. }
  1271. static void
  1272. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1273. {
  1274. struct layer2 *l2 = fi->userdata;
  1275. if (test_bit(FLG_LAPD, &l2->flag) &&
  1276. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1277. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
  1278. return;
  1279. }
  1280. mISDN_FsmChangeState(fi, ST_L2_8);
  1281. transmit_enquiry(l2);
  1282. l2->rc = 0;
  1283. }
  1284. static void
  1285. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1286. {
  1287. struct layer2 *l2 = fi->userdata;
  1288. struct sk_buff *skb, *nskb;
  1289. u_char header[MAX_L2HEADER_LEN];
  1290. u_int i, p1;
  1291. if (!cansend(l2))
  1292. return;
  1293. skb = skb_dequeue(&l2->i_queue);
  1294. if (!skb)
  1295. return;
  1296. i = sethdraddr(l2, header, CMD);
  1297. if (test_bit(FLG_MOD128, &l2->flag)) {
  1298. header[i++] = l2->vs << 1;
  1299. header[i++] = l2->vr << 1;
  1300. } else
  1301. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1302. nskb = skb_realloc_headroom(skb, i);
  1303. if (!nskb) {
  1304. printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
  1305. mISDNDevName4ch(&l2->ch), i);
  1306. skb_queue_head(&l2->i_queue, skb);
  1307. return;
  1308. }
  1309. if (test_bit(FLG_MOD128, &l2->flag)) {
  1310. p1 = (l2->vs - l2->va) % 128;
  1311. l2->vs = (l2->vs + 1) % 128;
  1312. } else {
  1313. p1 = (l2->vs - l2->va) % 8;
  1314. l2->vs = (l2->vs + 1) % 8;
  1315. }
  1316. p1 = (p1 + l2->sow) % l2->window;
  1317. if (l2->windowar[p1]) {
  1318. printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
  1319. mISDNDevName4ch(&l2->ch), p1);
  1320. dev_kfree_skb(l2->windowar[p1]);
  1321. }
  1322. l2->windowar[p1] = skb;
  1323. memcpy(skb_push(nskb, i), header, i);
  1324. l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
  1325. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1326. if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
  1327. mISDN_FsmDelTimer(&l2->t203, 13);
  1328. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
  1329. }
  1330. }
  1331. static void
  1332. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1333. {
  1334. struct layer2 *l2 = fi->userdata;
  1335. struct sk_buff *skb = arg;
  1336. int PollFlag, rsp, rnr = 0;
  1337. unsigned int nr;
  1338. rsp = *skb->data & 0x2;
  1339. if (test_bit(FLG_ORIG, &l2->flag))
  1340. rsp = !rsp;
  1341. skb_pull(skb, l2addrsize(l2));
  1342. if (IsRNR(skb->data, l2)) {
  1343. set_peer_busy(l2);
  1344. rnr = 1;
  1345. } else
  1346. clear_peer_busy(l2);
  1347. if (test_bit(FLG_MOD128, &l2->flag)) {
  1348. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1349. nr = skb->data[1] >> 1;
  1350. } else {
  1351. PollFlag = (skb->data[0] & 0x10);
  1352. nr = (skb->data[0] >> 5) & 0x7;
  1353. }
  1354. dev_kfree_skb(skb);
  1355. if (rsp && PollFlag) {
  1356. if (legalnr(l2, nr)) {
  1357. if (rnr) {
  1358. restart_t200(l2, 15);
  1359. } else {
  1360. stop_t200(l2, 16);
  1361. mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1362. EV_L2_T203, NULL, 5);
  1363. setva(l2, nr);
  1364. }
  1365. invoke_retransmission(l2, nr);
  1366. mISDN_FsmChangeState(fi, ST_L2_7);
  1367. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  1368. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1369. } else
  1370. nrerrorrecovery(fi);
  1371. } else {
  1372. if (!rsp && PollFlag)
  1373. enquiry_response(l2);
  1374. if (legalnr(l2, nr))
  1375. setva(l2, nr);
  1376. else
  1377. nrerrorrecovery(fi);
  1378. }
  1379. }
  1380. static void
  1381. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1382. {
  1383. struct layer2 *l2 = fi->userdata;
  1384. struct sk_buff *skb = arg;
  1385. skb_pull(skb, l2addrsize(l2) + 1);
  1386. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1387. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1388. l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
  1389. establishlink(fi);
  1390. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1391. }
  1392. dev_kfree_skb(skb);
  1393. }
  1394. static void
  1395. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1396. {
  1397. struct layer2 *l2 = fi->userdata;
  1398. skb_queue_purge(&l2->ui_queue);
  1399. l2->tei = GROUP_TEI;
  1400. mISDN_FsmChangeState(fi, ST_L2_1);
  1401. }
  1402. static void
  1403. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1404. {
  1405. struct layer2 *l2 = fi->userdata;
  1406. skb_queue_purge(&l2->ui_queue);
  1407. l2->tei = GROUP_TEI;
  1408. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1409. mISDN_FsmChangeState(fi, ST_L2_1);
  1410. }
  1411. static void
  1412. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1413. {
  1414. struct layer2 *l2 = fi->userdata;
  1415. skb_queue_purge(&l2->i_queue);
  1416. skb_queue_purge(&l2->ui_queue);
  1417. freewin(l2);
  1418. l2->tei = GROUP_TEI;
  1419. stop_t200(l2, 17);
  1420. st5_dl_release_l2l3(l2);
  1421. mISDN_FsmChangeState(fi, ST_L2_1);
  1422. }
  1423. static void
  1424. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1425. {
  1426. struct layer2 *l2 = fi->userdata;
  1427. skb_queue_purge(&l2->ui_queue);
  1428. l2->tei = GROUP_TEI;
  1429. stop_t200(l2, 18);
  1430. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1431. mISDN_FsmChangeState(fi, ST_L2_1);
  1432. }
  1433. static void
  1434. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1435. {
  1436. struct layer2 *l2 = fi->userdata;
  1437. skb_queue_purge(&l2->i_queue);
  1438. skb_queue_purge(&l2->ui_queue);
  1439. freewin(l2);
  1440. l2->tei = GROUP_TEI;
  1441. stop_t200(l2, 17);
  1442. mISDN_FsmDelTimer(&l2->t203, 19);
  1443. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1444. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  1445. * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
  1446. * 0, NULL, 0);
  1447. */
  1448. mISDN_FsmChangeState(fi, ST_L2_1);
  1449. }
  1450. static void
  1451. l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
  1452. {
  1453. struct layer2 *l2 = fi->userdata;
  1454. struct sk_buff *skb = arg;
  1455. skb_queue_purge(&l2->i_queue);
  1456. skb_queue_purge(&l2->ui_queue);
  1457. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1458. l2up(l2, DL_RELEASE_IND, skb);
  1459. else
  1460. dev_kfree_skb(skb);
  1461. }
  1462. static void
  1463. l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
  1464. {
  1465. struct layer2 *l2 = fi->userdata;
  1466. struct sk_buff *skb = arg;
  1467. skb_queue_purge(&l2->i_queue);
  1468. skb_queue_purge(&l2->ui_queue);
  1469. freewin(l2);
  1470. stop_t200(l2, 19);
  1471. st5_dl_release_l2l3(l2);
  1472. mISDN_FsmChangeState(fi, ST_L2_4);
  1473. if (l2->tm)
  1474. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1475. dev_kfree_skb(skb);
  1476. }
  1477. static void
  1478. l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
  1479. {
  1480. struct layer2 *l2 = fi->userdata;
  1481. struct sk_buff *skb = arg;
  1482. skb_queue_purge(&l2->ui_queue);
  1483. stop_t200(l2, 20);
  1484. l2up(l2, DL_RELEASE_CNF, skb);
  1485. mISDN_FsmChangeState(fi, ST_L2_4);
  1486. if (l2->tm)
  1487. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1488. }
  1489. static void
  1490. l2_persistent_da(struct FsmInst *fi, int event, void *arg)
  1491. {
  1492. struct layer2 *l2 = fi->userdata;
  1493. struct sk_buff *skb = arg;
  1494. skb_queue_purge(&l2->i_queue);
  1495. skb_queue_purge(&l2->ui_queue);
  1496. freewin(l2);
  1497. stop_t200(l2, 19);
  1498. mISDN_FsmDelTimer(&l2->t203, 19);
  1499. l2up(l2, DL_RELEASE_IND, skb);
  1500. mISDN_FsmChangeState(fi, ST_L2_4);
  1501. if (l2->tm)
  1502. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1503. }
  1504. static void
  1505. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1506. {
  1507. struct layer2 *l2 = fi->userdata;
  1508. struct sk_buff *skb = arg;
  1509. if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
  1510. enquiry_cr(l2, RNR, RSP, 0);
  1511. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1512. }
  1513. dev_kfree_skb(skb);
  1514. }
  1515. static void
  1516. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1517. {
  1518. struct layer2 *l2 = fi->userdata;
  1519. struct sk_buff *skb = arg;
  1520. if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
  1521. enquiry_cr(l2, RR, RSP, 0);
  1522. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1523. }
  1524. dev_kfree_skb(skb);
  1525. }
  1526. static void
  1527. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1528. {
  1529. struct layer2 *l2 = fi->userdata;
  1530. l2mgr(l2, MDL_ERROR_IND, arg);
  1531. }
  1532. static void
  1533. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1534. {
  1535. struct layer2 *l2 = fi->userdata;
  1536. l2mgr(l2, MDL_ERROR_IND, arg);
  1537. establishlink(fi);
  1538. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1539. }
  1540. static struct FsmNode L2FnList[] =
  1541. {
  1542. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1543. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1544. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1545. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1546. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1547. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1548. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1549. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1550. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1551. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1552. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1553. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1554. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1555. {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
  1556. {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
  1557. {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
  1558. {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
  1559. {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
  1560. {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
  1561. {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
  1562. {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
  1563. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1564. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1565. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1566. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1567. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1568. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1569. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1570. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1571. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1572. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1573. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1574. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1575. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1576. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1577. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1578. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1579. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1580. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1581. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1582. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1583. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1584. {ST_L2_5, EV_L2_UA, l2_connected},
  1585. {ST_L2_6, EV_L2_UA, l2_released},
  1586. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1587. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1588. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1589. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1590. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1591. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1592. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1593. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1594. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1595. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1596. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1597. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1598. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1599. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1600. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1601. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1602. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1603. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1604. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1605. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1606. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1607. {ST_L2_5, EV_L2_T200, l2_timeout},
  1608. {ST_L2_6, EV_L2_T200, l2_timeout},
  1609. {ST_L2_7, EV_L2_T200, l2_timeout},
  1610. {ST_L2_8, EV_L2_T200, l2_timeout},
  1611. {ST_L2_7, EV_L2_T203, l2_timeout},
  1612. {ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
  1613. {ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
  1614. {ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
  1615. {ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
  1616. {ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
  1617. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1618. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1619. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1620. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1621. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1622. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1623. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1624. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1625. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1626. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1627. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1628. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1629. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1630. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1631. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
  1632. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
  1633. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
  1634. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
  1635. };
  1636. static int
  1637. ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
  1638. {
  1639. u_char *datap = skb->data;
  1640. int ret = -EINVAL;
  1641. int psapi, ptei;
  1642. u_int l;
  1643. int c = 0;
  1644. l = l2addrsize(l2);
  1645. if (skb->len <= l) {
  1646. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1647. return ret;
  1648. }
  1649. if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
  1650. psapi = *datap++;
  1651. ptei = *datap++;
  1652. if ((psapi & 1) || !(ptei & 1)) {
  1653. printk(KERN_WARNING
  1654. "%s l2 D-channel frame wrong EA0/EA1\n",
  1655. mISDNDevName4ch(&l2->ch));
  1656. return ret;
  1657. }
  1658. psapi >>= 2;
  1659. ptei >>= 1;
  1660. if (psapi != l2->sapi) {
  1661. /* not our business */
  1662. if (*debug & DEBUG_L2)
  1663. printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
  1664. mISDNDevName4ch(&l2->ch), psapi,
  1665. l2->sapi);
  1666. dev_kfree_skb(skb);
  1667. return 0;
  1668. }
  1669. if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
  1670. /* not our business */
  1671. if (*debug & DEBUG_L2)
  1672. printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
  1673. mISDNDevName4ch(&l2->ch), ptei, l2->tei);
  1674. dev_kfree_skb(skb);
  1675. return 0;
  1676. }
  1677. } else
  1678. datap += l;
  1679. if (!(*datap & 1)) { /* I-Frame */
  1680. c = iframe_error(l2, skb);
  1681. if (!c)
  1682. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
  1683. } else if (IsSFrame(datap, l2)) { /* S-Frame */
  1684. c = super_error(l2, skb);
  1685. if (!c)
  1686. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
  1687. } else if (IsUI(datap)) {
  1688. c = UI_error(l2, skb);
  1689. if (!c)
  1690. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
  1691. } else if (IsSABME(datap, l2)) {
  1692. c = unnum_error(l2, skb, CMD);
  1693. if (!c)
  1694. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
  1695. } else if (IsUA(datap)) {
  1696. c = unnum_error(l2, skb, RSP);
  1697. if (!c)
  1698. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
  1699. } else if (IsDISC(datap)) {
  1700. c = unnum_error(l2, skb, CMD);
  1701. if (!c)
  1702. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
  1703. } else if (IsDM(datap)) {
  1704. c = unnum_error(l2, skb, RSP);
  1705. if (!c)
  1706. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
  1707. } else if (IsFRMR(datap)) {
  1708. c = FRMR_error(l2, skb);
  1709. if (!c)
  1710. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
  1711. } else
  1712. c = 'L';
  1713. if (c) {
  1714. printk(KERN_WARNING "%s:l2 D-channel frame error %c\n",
  1715. mISDNDevName4ch(&l2->ch), c);
  1716. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1717. }
  1718. return ret;
  1719. }
  1720. static int
  1721. l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
  1722. {
  1723. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1724. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  1725. int ret = -EINVAL;
  1726. if (*debug & DEBUG_L2_RECV)
  1727. printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n",
  1728. __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id,
  1729. l2->sapi, l2->tei);
  1730. if (hh->prim == DL_INTERN_MSG) {
  1731. struct mISDNhead *chh = hh + 1; /* saved copy */
  1732. *hh = *chh;
  1733. if (*debug & DEBUG_L2_RECV)
  1734. printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
  1735. mISDNDevName4ch(&l2->ch), hh->prim, hh->id);
  1736. }
  1737. switch (hh->prim) {
  1738. case PH_DATA_IND:
  1739. ret = ph_data_indication(l2, hh, skb);
  1740. break;
  1741. case PH_DATA_CNF:
  1742. ret = ph_data_confirm(l2, hh, skb);
  1743. break;
  1744. case PH_ACTIVATE_IND:
  1745. test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
  1746. l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
  1747. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1748. ret = mISDN_FsmEvent(&l2->l2m,
  1749. EV_L2_DL_ESTABLISH_REQ, skb);
  1750. break;
  1751. case PH_DEACTIVATE_IND:
  1752. test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
  1753. l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
  1754. ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
  1755. break;
  1756. case MPH_INFORMATION_IND:
  1757. if (!l2->up)
  1758. break;
  1759. ret = l2->up->send(l2->up, skb);
  1760. break;
  1761. case DL_DATA_REQ:
  1762. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
  1763. break;
  1764. case DL_UNITDATA_REQ:
  1765. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
  1766. break;
  1767. case DL_ESTABLISH_REQ:
  1768. if (test_bit(FLG_LAPB, &l2->flag))
  1769. test_and_set_bit(FLG_ORIG, &l2->flag);
  1770. if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
  1771. if (test_bit(FLG_LAPD, &l2->flag) ||
  1772. test_bit(FLG_ORIG, &l2->flag))
  1773. ret = mISDN_FsmEvent(&l2->l2m,
  1774. EV_L2_DL_ESTABLISH_REQ, skb);
  1775. } else {
  1776. if (test_bit(FLG_LAPD, &l2->flag) ||
  1777. test_bit(FLG_ORIG, &l2->flag)) {
  1778. test_and_set_bit(FLG_ESTAB_PEND,
  1779. &l2->flag);
  1780. }
  1781. ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
  1782. skb);
  1783. }
  1784. break;
  1785. case DL_RELEASE_REQ:
  1786. if (test_bit(FLG_LAPB, &l2->flag))
  1787. l2down_create(l2, PH_DEACTIVATE_REQ,
  1788. l2_newid(l2), 0, NULL);
  1789. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
  1790. skb);
  1791. break;
  1792. case DL_TIMER200_IND:
  1793. mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
  1794. break;
  1795. case DL_TIMER203_IND:
  1796. mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
  1797. break;
  1798. default:
  1799. if (*debug & DEBUG_L2)
  1800. l2m_debug(&l2->l2m, "l2 unknown pr %04x",
  1801. hh->prim);
  1802. }
  1803. if (ret) {
  1804. dev_kfree_skb(skb);
  1805. ret = 0;
  1806. }
  1807. return ret;
  1808. }
  1809. int
  1810. tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
  1811. {
  1812. int ret = -EINVAL;
  1813. if (*debug & DEBUG_L2_TEI)
  1814. printk(KERN_DEBUG "%s: cmd(%x) in %s\n",
  1815. mISDNDevName4ch(&l2->ch), cmd, __func__);
  1816. switch (cmd) {
  1817. case (MDL_ASSIGN_REQ):
  1818. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
  1819. break;
  1820. case (MDL_REMOVE_REQ):
  1821. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
  1822. break;
  1823. case (MDL_ERROR_IND):
  1824. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1825. break;
  1826. case (MDL_ERROR_RSP):
  1827. /* ETS 300-125 5.3.2.1 Test: TC13010 */
  1828. printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n",
  1829. mISDNDevName4ch(&l2->ch));
  1830. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1831. break;
  1832. }
  1833. return ret;
  1834. }
  1835. static void
  1836. release_l2(struct layer2 *l2)
  1837. {
  1838. mISDN_FsmDelTimer(&l2->t200, 21);
  1839. mISDN_FsmDelTimer(&l2->t203, 16);
  1840. skb_queue_purge(&l2->i_queue);
  1841. skb_queue_purge(&l2->ui_queue);
  1842. skb_queue_purge(&l2->down_queue);
  1843. ReleaseWin(l2);
  1844. if (test_bit(FLG_LAPD, &l2->flag)) {
  1845. TEIrelease(l2);
  1846. if (l2->ch.st)
  1847. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
  1848. CLOSE_CHANNEL, NULL);
  1849. }
  1850. kfree(l2);
  1851. }
  1852. static int
  1853. l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  1854. {
  1855. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1856. u_int info;
  1857. if (*debug & DEBUG_L2_CTRL)
  1858. printk(KERN_DEBUG "%s: %s cmd(%x)\n",
  1859. mISDNDevName4ch(ch), __func__, cmd);
  1860. switch (cmd) {
  1861. case OPEN_CHANNEL:
  1862. if (test_bit(FLG_LAPD, &l2->flag)) {
  1863. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1864. info = DL_INFO_L2_CONNECT;
  1865. l2up_create(l2, DL_INFORMATION_IND,
  1866. sizeof(info), &info);
  1867. }
  1868. break;
  1869. case CLOSE_CHANNEL:
  1870. if (l2->ch.peer)
  1871. l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
  1872. release_l2(l2);
  1873. break;
  1874. }
  1875. return 0;
  1876. }
  1877. struct layer2 *
  1878. create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
  1879. int sapi)
  1880. {
  1881. struct layer2 *l2;
  1882. struct channel_req rq;
  1883. l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
  1884. if (!l2) {
  1885. printk(KERN_ERR "kzalloc layer2 failed\n");
  1886. return NULL;
  1887. }
  1888. l2->next_id = 1;
  1889. l2->down_id = MISDN_ID_NONE;
  1890. l2->up = ch;
  1891. l2->ch.st = ch->st;
  1892. l2->ch.send = l2_send;
  1893. l2->ch.ctrl = l2_ctrl;
  1894. switch (protocol) {
  1895. case ISDN_P_LAPD_NT:
  1896. test_and_set_bit(FLG_LAPD, &l2->flag);
  1897. test_and_set_bit(FLG_LAPD_NET, &l2->flag);
  1898. test_and_set_bit(FLG_MOD128, &l2->flag);
  1899. l2->sapi = sapi;
  1900. l2->maxlen = MAX_DFRAME_LEN;
  1901. if (test_bit(OPTION_L2_PMX, &options))
  1902. l2->window = 7;
  1903. else
  1904. l2->window = 1;
  1905. if (test_bit(OPTION_L2_PTP, &options))
  1906. test_and_set_bit(FLG_PTP, &l2->flag);
  1907. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1908. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1909. l2->tei = tei;
  1910. l2->T200 = 1000;
  1911. l2->N200 = 3;
  1912. l2->T203 = 10000;
  1913. if (test_bit(OPTION_L2_PMX, &options))
  1914. rq.protocol = ISDN_P_NT_E1;
  1915. else
  1916. rq.protocol = ISDN_P_NT_S0;
  1917. rq.adr.channel = 0;
  1918. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1919. break;
  1920. case ISDN_P_LAPD_TE:
  1921. test_and_set_bit(FLG_LAPD, &l2->flag);
  1922. test_and_set_bit(FLG_MOD128, &l2->flag);
  1923. test_and_set_bit(FLG_ORIG, &l2->flag);
  1924. l2->sapi = sapi;
  1925. l2->maxlen = MAX_DFRAME_LEN;
  1926. if (test_bit(OPTION_L2_PMX, &options))
  1927. l2->window = 7;
  1928. else
  1929. l2->window = 1;
  1930. if (test_bit(OPTION_L2_PTP, &options))
  1931. test_and_set_bit(FLG_PTP, &l2->flag);
  1932. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1933. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1934. l2->tei = tei;
  1935. l2->T200 = 1000;
  1936. l2->N200 = 3;
  1937. l2->T203 = 10000;
  1938. if (test_bit(OPTION_L2_PMX, &options))
  1939. rq.protocol = ISDN_P_TE_E1;
  1940. else
  1941. rq.protocol = ISDN_P_TE_S0;
  1942. rq.adr.channel = 0;
  1943. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1944. break;
  1945. case ISDN_P_B_X75SLP:
  1946. test_and_set_bit(FLG_LAPB, &l2->flag);
  1947. l2->window = 7;
  1948. l2->maxlen = MAX_DATA_SIZE;
  1949. l2->T200 = 1000;
  1950. l2->N200 = 4;
  1951. l2->T203 = 5000;
  1952. l2->addr.A = 3;
  1953. l2->addr.B = 1;
  1954. break;
  1955. default:
  1956. printk(KERN_ERR "layer2 create failed prt %x\n",
  1957. protocol);
  1958. kfree(l2);
  1959. return NULL;
  1960. }
  1961. skb_queue_head_init(&l2->i_queue);
  1962. skb_queue_head_init(&l2->ui_queue);
  1963. skb_queue_head_init(&l2->down_queue);
  1964. skb_queue_head_init(&l2->tmp_queue);
  1965. InitWin(l2);
  1966. l2->l2m.fsm = &l2fsm;
  1967. if (test_bit(FLG_LAPB, &l2->flag) ||
  1968. test_bit(FLG_FIXED_TEI, &l2->flag) ||
  1969. test_bit(FLG_LAPD_NET, &l2->flag))
  1970. l2->l2m.state = ST_L2_4;
  1971. else
  1972. l2->l2m.state = ST_L2_1;
  1973. l2->l2m.debug = *debug;
  1974. l2->l2m.userdata = l2;
  1975. l2->l2m.userint = 0;
  1976. l2->l2m.printdebug = l2m_debug;
  1977. mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
  1978. mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
  1979. return l2;
  1980. }
  1981. static int
  1982. x75create(struct channel_req *crq)
  1983. {
  1984. struct layer2 *l2;
  1985. if (crq->protocol != ISDN_P_B_X75SLP)
  1986. return -EPROTONOSUPPORT;
  1987. l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
  1988. if (!l2)
  1989. return -ENOMEM;
  1990. crq->ch = &l2->ch;
  1991. crq->protocol = ISDN_P_B_HDLC;
  1992. return 0;
  1993. }
  1994. static struct Bprotocol X75SLP = {
  1995. .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
  1996. .name = "X75SLP",
  1997. .create = x75create
  1998. };
  1999. int
  2000. Isdnl2_Init(u_int *deb)
  2001. {
  2002. int res;
  2003. debug = deb;
  2004. mISDN_register_Bprotocol(&X75SLP);
  2005. l2fsm.state_count = L2_STATE_COUNT;
  2006. l2fsm.event_count = L2_EVENT_COUNT;
  2007. l2fsm.strEvent = strL2Event;
  2008. l2fsm.strState = strL2State;
  2009. res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  2010. if (res)
  2011. goto error;
  2012. res = TEIInit(deb);
  2013. if (res)
  2014. goto error_fsm;
  2015. return 0;
  2016. error_fsm:
  2017. mISDN_FsmFree(&l2fsm);
  2018. error:
  2019. mISDN_unregister_Bprotocol(&X75SLP);
  2020. return res;
  2021. }
  2022. void
  2023. Isdnl2_cleanup(void)
  2024. {
  2025. mISDN_unregister_Bprotocol(&X75SLP);
  2026. TEIFree();
  2027. mISDN_FsmFree(&l2fsm);
  2028. }