hdlc_x25.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic HDLC support routines for Linux
  4. * X.25 support
  5. *
  6. * Copyright (C) 1999 - 2006 Krzysztof Halasa <[email protected]>
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/gfp.h>
  10. #include <linux/hdlc.h>
  11. #include <linux/if_arp.h>
  12. #include <linux/inetdevice.h>
  13. #include <linux/init.h>
  14. #include <linux/kernel.h>
  15. #include <linux/lapb.h>
  16. #include <linux/module.h>
  17. #include <linux/pkt_sched.h>
  18. #include <linux/poll.h>
  19. #include <linux/rtnetlink.h>
  20. #include <linux/skbuff.h>
  21. #include <net/x25device.h>
  22. struct x25_state {
  23. x25_hdlc_proto settings;
  24. bool up;
  25. spinlock_t up_lock; /* Protects "up" */
  26. struct sk_buff_head rx_queue;
  27. struct tasklet_struct rx_tasklet;
  28. };
  29. static int x25_ioctl(struct net_device *dev, struct if_settings *ifs);
  30. static struct x25_state *state(hdlc_device *hdlc)
  31. {
  32. return hdlc->state;
  33. }
  34. static void x25_rx_queue_kick(struct tasklet_struct *t)
  35. {
  36. struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet);
  37. struct sk_buff *skb = skb_dequeue(&x25st->rx_queue);
  38. while (skb) {
  39. netif_receive_skb_core(skb);
  40. skb = skb_dequeue(&x25st->rx_queue);
  41. }
  42. }
  43. /* These functions are callbacks called by LAPB layer */
  44. static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
  45. {
  46. struct x25_state *x25st = state(dev_to_hdlc(dev));
  47. struct sk_buff *skb;
  48. unsigned char *ptr;
  49. skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
  50. if (!skb)
  51. return;
  52. ptr = skb_put(skb, 1);
  53. *ptr = code;
  54. skb->protocol = x25_type_trans(skb, dev);
  55. skb_queue_tail(&x25st->rx_queue, skb);
  56. tasklet_schedule(&x25st->rx_tasklet);
  57. }
  58. static void x25_connected(struct net_device *dev, int reason)
  59. {
  60. x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
  61. }
  62. static void x25_disconnected(struct net_device *dev, int reason)
  63. {
  64. x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
  65. }
  66. static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
  67. {
  68. struct x25_state *x25st = state(dev_to_hdlc(dev));
  69. unsigned char *ptr;
  70. if (skb_cow(skb, 1)) {
  71. kfree_skb(skb);
  72. return NET_RX_DROP;
  73. }
  74. skb_push(skb, 1);
  75. ptr = skb->data;
  76. *ptr = X25_IFACE_DATA;
  77. skb->protocol = x25_type_trans(skb, dev);
  78. skb_queue_tail(&x25st->rx_queue, skb);
  79. tasklet_schedule(&x25st->rx_tasklet);
  80. return NET_RX_SUCCESS;
  81. }
  82. static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
  83. {
  84. hdlc_device *hdlc = dev_to_hdlc(dev);
  85. skb_reset_network_header(skb);
  86. skb->protocol = hdlc_type_trans(skb, dev);
  87. if (dev_nit_active(dev))
  88. dev_queue_xmit_nit(skb, dev);
  89. hdlc->xmit(skb, dev); /* Ignore return value :-( */
  90. }
  91. static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
  92. {
  93. hdlc_device *hdlc = dev_to_hdlc(dev);
  94. struct x25_state *x25st = state(hdlc);
  95. int result;
  96. /* There should be a pseudo header of 1 byte added by upper layers.
  97. * Check to make sure it is there before reading it.
  98. */
  99. if (skb->len < 1) {
  100. kfree_skb(skb);
  101. return NETDEV_TX_OK;
  102. }
  103. spin_lock_bh(&x25st->up_lock);
  104. if (!x25st->up) {
  105. spin_unlock_bh(&x25st->up_lock);
  106. kfree_skb(skb);
  107. return NETDEV_TX_OK;
  108. }
  109. switch (skb->data[0]) {
  110. case X25_IFACE_DATA: /* Data to be transmitted */
  111. skb_pull(skb, 1);
  112. result = lapb_data_request(dev, skb);
  113. if (result != LAPB_OK)
  114. dev_kfree_skb(skb);
  115. spin_unlock_bh(&x25st->up_lock);
  116. return NETDEV_TX_OK;
  117. case X25_IFACE_CONNECT:
  118. result = lapb_connect_request(dev);
  119. if (result != LAPB_OK) {
  120. if (result == LAPB_CONNECTED)
  121. /* Send connect confirm. msg to level 3 */
  122. x25_connected(dev, 0);
  123. else
  124. netdev_err(dev, "LAPB connect request failed, error code = %i\n",
  125. result);
  126. }
  127. break;
  128. case X25_IFACE_DISCONNECT:
  129. result = lapb_disconnect_request(dev);
  130. if (result != LAPB_OK) {
  131. if (result == LAPB_NOTCONNECTED)
  132. /* Send disconnect confirm. msg to level 3 */
  133. x25_disconnected(dev, 0);
  134. else
  135. netdev_err(dev, "LAPB disconnect request failed, error code = %i\n",
  136. result);
  137. }
  138. break;
  139. default: /* to be defined */
  140. break;
  141. }
  142. spin_unlock_bh(&x25st->up_lock);
  143. dev_kfree_skb(skb);
  144. return NETDEV_TX_OK;
  145. }
  146. static int x25_open(struct net_device *dev)
  147. {
  148. static const struct lapb_register_struct cb = {
  149. .connect_confirmation = x25_connected,
  150. .connect_indication = x25_connected,
  151. .disconnect_confirmation = x25_disconnected,
  152. .disconnect_indication = x25_disconnected,
  153. .data_indication = x25_data_indication,
  154. .data_transmit = x25_data_transmit,
  155. };
  156. hdlc_device *hdlc = dev_to_hdlc(dev);
  157. struct x25_state *x25st = state(hdlc);
  158. struct lapb_parms_struct params;
  159. int result;
  160. result = lapb_register(dev, &cb);
  161. if (result != LAPB_OK)
  162. return -ENOMEM;
  163. result = lapb_getparms(dev, &params);
  164. if (result != LAPB_OK)
  165. return -EINVAL;
  166. if (state(hdlc)->settings.dce)
  167. params.mode = params.mode | LAPB_DCE;
  168. if (state(hdlc)->settings.modulo == 128)
  169. params.mode = params.mode | LAPB_EXTENDED;
  170. params.window = state(hdlc)->settings.window;
  171. params.t1 = state(hdlc)->settings.t1;
  172. params.t2 = state(hdlc)->settings.t2;
  173. params.n2 = state(hdlc)->settings.n2;
  174. result = lapb_setparms(dev, &params);
  175. if (result != LAPB_OK)
  176. return -EINVAL;
  177. spin_lock_bh(&x25st->up_lock);
  178. x25st->up = true;
  179. spin_unlock_bh(&x25st->up_lock);
  180. return 0;
  181. }
  182. static void x25_close(struct net_device *dev)
  183. {
  184. hdlc_device *hdlc = dev_to_hdlc(dev);
  185. struct x25_state *x25st = state(hdlc);
  186. spin_lock_bh(&x25st->up_lock);
  187. x25st->up = false;
  188. spin_unlock_bh(&x25st->up_lock);
  189. lapb_unregister(dev);
  190. tasklet_kill(&x25st->rx_tasklet);
  191. }
  192. static int x25_rx(struct sk_buff *skb)
  193. {
  194. struct net_device *dev = skb->dev;
  195. hdlc_device *hdlc = dev_to_hdlc(dev);
  196. struct x25_state *x25st = state(hdlc);
  197. skb = skb_share_check(skb, GFP_ATOMIC);
  198. if (!skb) {
  199. dev->stats.rx_dropped++;
  200. return NET_RX_DROP;
  201. }
  202. spin_lock_bh(&x25st->up_lock);
  203. if (!x25st->up) {
  204. spin_unlock_bh(&x25st->up_lock);
  205. kfree_skb(skb);
  206. dev->stats.rx_dropped++;
  207. return NET_RX_DROP;
  208. }
  209. if (lapb_data_received(dev, skb) == LAPB_OK) {
  210. spin_unlock_bh(&x25st->up_lock);
  211. return NET_RX_SUCCESS;
  212. }
  213. spin_unlock_bh(&x25st->up_lock);
  214. dev->stats.rx_errors++;
  215. dev_kfree_skb_any(skb);
  216. return NET_RX_DROP;
  217. }
  218. static struct hdlc_proto proto = {
  219. .open = x25_open,
  220. .close = x25_close,
  221. .ioctl = x25_ioctl,
  222. .netif_rx = x25_rx,
  223. .xmit = x25_xmit,
  224. .module = THIS_MODULE,
  225. };
  226. static int x25_ioctl(struct net_device *dev, struct if_settings *ifs)
  227. {
  228. x25_hdlc_proto __user *x25_s = ifs->ifs_ifsu.x25;
  229. const size_t size = sizeof(x25_hdlc_proto);
  230. hdlc_device *hdlc = dev_to_hdlc(dev);
  231. x25_hdlc_proto new_settings;
  232. int result;
  233. switch (ifs->type) {
  234. case IF_GET_PROTO:
  235. if (dev_to_hdlc(dev)->proto != &proto)
  236. return -EINVAL;
  237. ifs->type = IF_PROTO_X25;
  238. if (ifs->size < size) {
  239. ifs->size = size; /* data size wanted */
  240. return -ENOBUFS;
  241. }
  242. if (copy_to_user(x25_s, &state(hdlc)->settings, size))
  243. return -EFAULT;
  244. return 0;
  245. case IF_PROTO_X25:
  246. if (!capable(CAP_NET_ADMIN))
  247. return -EPERM;
  248. if (dev->flags & IFF_UP)
  249. return -EBUSY;
  250. /* backward compatibility */
  251. if (ifs->size == 0) {
  252. new_settings.dce = 0;
  253. new_settings.modulo = 8;
  254. new_settings.window = 7;
  255. new_settings.t1 = 3;
  256. new_settings.t2 = 1;
  257. new_settings.n2 = 10;
  258. } else {
  259. if (copy_from_user(&new_settings, x25_s, size))
  260. return -EFAULT;
  261. if ((new_settings.dce != 0 &&
  262. new_settings.dce != 1) ||
  263. (new_settings.modulo != 8 &&
  264. new_settings.modulo != 128) ||
  265. new_settings.window < 1 ||
  266. (new_settings.modulo == 8 &&
  267. new_settings.window > 7) ||
  268. (new_settings.modulo == 128 &&
  269. new_settings.window > 127) ||
  270. new_settings.t1 < 1 ||
  271. new_settings.t1 > 255 ||
  272. new_settings.t2 < 1 ||
  273. new_settings.t2 > 255 ||
  274. new_settings.n2 < 1 ||
  275. new_settings.n2 > 255)
  276. return -EINVAL;
  277. }
  278. result = hdlc->attach(dev, ENCODING_NRZ,
  279. PARITY_CRC16_PR1_CCITT);
  280. if (result)
  281. return result;
  282. result = attach_hdlc_protocol(dev, &proto,
  283. sizeof(struct x25_state));
  284. if (result)
  285. return result;
  286. memcpy(&state(hdlc)->settings, &new_settings, size);
  287. state(hdlc)->up = false;
  288. spin_lock_init(&state(hdlc)->up_lock);
  289. skb_queue_head_init(&state(hdlc)->rx_queue);
  290. tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick);
  291. /* There's no header_ops so hard_header_len should be 0. */
  292. dev->hard_header_len = 0;
  293. /* When transmitting data:
  294. * first we'll remove a pseudo header of 1 byte,
  295. * then we'll prepend an LAPB header of at most 3 bytes.
  296. */
  297. dev->needed_headroom = 3 - 1;
  298. dev->type = ARPHRD_X25;
  299. call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
  300. netif_dormant_off(dev);
  301. return 0;
  302. }
  303. return -EINVAL;
  304. }
  305. static int __init hdlc_x25_init(void)
  306. {
  307. register_hdlc_protocol(&proto);
  308. return 0;
  309. }
  310. static void __exit hdlc_x25_exit(void)
  311. {
  312. unregister_hdlc_protocol(&proto);
  313. }
  314. module_init(hdlc_x25_init);
  315. module_exit(hdlc_x25_exit);
  316. MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
  317. MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
  318. MODULE_LICENSE("GPL v2");