ax25_out.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. *
  4. * Copyright (C) Alan Cox GW4PTS ([email protected])
  5. * Copyright (C) Jonathan Naylor G4KLX ([email protected])
  6. * Copyright (C) Joerg Reuter DL1BKE ([email protected])
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/types.h>
  10. #include <linux/socket.h>
  11. #include <linux/in.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/timer.h>
  15. #include <linux/string.h>
  16. #include <linux/sockios.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/net.h>
  19. #include <linux/slab.h>
  20. #include <net/ax25.h>
  21. #include <linux/inet.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/skbuff.h>
  24. #include <net/sock.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/fcntl.h>
  27. #include <linux/mm.h>
  28. #include <linux/interrupt.h>
  29. static DEFINE_SPINLOCK(ax25_frag_lock);
  30. ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
  31. {
  32. ax25_dev *ax25_dev;
  33. ax25_cb *ax25;
  34. /*
  35. * Take the default packet length for the device if zero is
  36. * specified.
  37. */
  38. if (paclen == 0) {
  39. if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
  40. return NULL;
  41. paclen = ax25_dev->values[AX25_VALUES_PACLEN];
  42. }
  43. /*
  44. * Look for an existing connection.
  45. */
  46. if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
  47. ax25_output(ax25, paclen, skb);
  48. return ax25; /* It already existed */
  49. }
  50. if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
  51. return NULL;
  52. if ((ax25 = ax25_create_cb()) == NULL)
  53. return NULL;
  54. ax25_fillin_cb(ax25, ax25_dev);
  55. ax25->source_addr = *src;
  56. ax25->dest_addr = *dest;
  57. if (digi != NULL) {
  58. ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
  59. if (ax25->digipeat == NULL) {
  60. ax25_cb_put(ax25);
  61. return NULL;
  62. }
  63. }
  64. switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
  65. case AX25_PROTO_STD_SIMPLEX:
  66. case AX25_PROTO_STD_DUPLEX:
  67. ax25_std_establish_data_link(ax25);
  68. break;
  69. #ifdef CONFIG_AX25_DAMA_SLAVE
  70. case AX25_PROTO_DAMA_SLAVE:
  71. if (ax25_dev->dama.slave)
  72. ax25_ds_establish_data_link(ax25);
  73. else
  74. ax25_std_establish_data_link(ax25);
  75. break;
  76. #endif
  77. }
  78. /*
  79. * There is one ref for the state machine; a caller needs
  80. * one more to put it back, just like with the existing one.
  81. */
  82. ax25_cb_hold(ax25);
  83. ax25_cb_add(ax25);
  84. ax25->state = AX25_STATE_1;
  85. ax25_start_heartbeat(ax25);
  86. ax25_output(ax25, paclen, skb);
  87. return ax25; /* We had to create it */
  88. }
  89. EXPORT_SYMBOL(ax25_send_frame);
  90. /*
  91. * All outgoing AX.25 I frames pass via this routine. Therefore this is
  92. * where the fragmentation of frames takes place. If fragment is set to
  93. * zero then we are not allowed to do fragmentation, even if the frame
  94. * is too large.
  95. */
  96. void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
  97. {
  98. struct sk_buff *skbn;
  99. unsigned char *p;
  100. int frontlen, len, fragno, ka9qfrag, first = 1;
  101. if (paclen < 16) {
  102. WARN_ON_ONCE(1);
  103. kfree_skb(skb);
  104. return;
  105. }
  106. if ((skb->len - 1) > paclen) {
  107. if (*skb->data == AX25_P_TEXT) {
  108. skb_pull(skb, 1); /* skip PID */
  109. ka9qfrag = 0;
  110. } else {
  111. paclen -= 2; /* Allow for fragment control info */
  112. ka9qfrag = 1;
  113. }
  114. fragno = skb->len / paclen;
  115. if (skb->len % paclen == 0) fragno--;
  116. frontlen = skb_headroom(skb); /* Address space + CTRL */
  117. while (skb->len > 0) {
  118. spin_lock_bh(&ax25_frag_lock);
  119. if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
  120. spin_unlock_bh(&ax25_frag_lock);
  121. printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
  122. return;
  123. }
  124. if (skb->sk != NULL)
  125. skb_set_owner_w(skbn, skb->sk);
  126. spin_unlock_bh(&ax25_frag_lock);
  127. len = (paclen > skb->len) ? skb->len : paclen;
  128. if (ka9qfrag == 1) {
  129. skb_reserve(skbn, frontlen + 2);
  130. skb_set_network_header(skbn,
  131. skb_network_offset(skb));
  132. skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
  133. p = skb_push(skbn, 2);
  134. *p++ = AX25_P_SEGMENT;
  135. *p = fragno--;
  136. if (first) {
  137. *p |= AX25_SEG_FIRST;
  138. first = 0;
  139. }
  140. } else {
  141. skb_reserve(skbn, frontlen + 1);
  142. skb_set_network_header(skbn,
  143. skb_network_offset(skb));
  144. skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
  145. p = skb_push(skbn, 1);
  146. *p = AX25_P_TEXT;
  147. }
  148. skb_pull(skb, len);
  149. skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
  150. }
  151. kfree_skb(skb);
  152. } else {
  153. skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */
  154. }
  155. switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
  156. case AX25_PROTO_STD_SIMPLEX:
  157. case AX25_PROTO_STD_DUPLEX:
  158. ax25_kick(ax25);
  159. break;
  160. #ifdef CONFIG_AX25_DAMA_SLAVE
  161. /*
  162. * A DAMA slave is _required_ to work as normal AX.25L2V2
  163. * if no DAMA master is available.
  164. */
  165. case AX25_PROTO_DAMA_SLAVE:
  166. if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
  167. break;
  168. #endif
  169. }
  170. }
  171. /*
  172. * This procedure is passed a buffer descriptor for an iframe. It builds
  173. * the rest of the control part of the frame and then writes it out.
  174. */
  175. static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
  176. {
  177. unsigned char *frame;
  178. if (skb == NULL)
  179. return;
  180. skb_reset_network_header(skb);
  181. if (ax25->modulus == AX25_MODULUS) {
  182. frame = skb_push(skb, 1);
  183. *frame = AX25_I;
  184. *frame |= (poll_bit) ? AX25_PF : 0;
  185. *frame |= (ax25->vr << 5);
  186. *frame |= (ax25->vs << 1);
  187. } else {
  188. frame = skb_push(skb, 2);
  189. frame[0] = AX25_I;
  190. frame[0] |= (ax25->vs << 1);
  191. frame[1] = (poll_bit) ? AX25_EPF : 0;
  192. frame[1] |= (ax25->vr << 1);
  193. }
  194. ax25_start_idletimer(ax25);
  195. ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
  196. }
  197. void ax25_kick(ax25_cb *ax25)
  198. {
  199. struct sk_buff *skb, *skbn;
  200. int last = 1;
  201. unsigned short start, end, next;
  202. if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
  203. return;
  204. if (ax25->condition & AX25_COND_PEER_RX_BUSY)
  205. return;
  206. if (skb_peek(&ax25->write_queue) == NULL)
  207. return;
  208. start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
  209. end = (ax25->va + ax25->window) % ax25->modulus;
  210. if (start == end)
  211. return;
  212. /*
  213. * Transmit data until either we're out of data to send or
  214. * the window is full. Send a poll on the final I frame if
  215. * the window is filled.
  216. */
  217. /*
  218. * Dequeue the frame and copy it.
  219. * Check for race with ax25_clear_queues().
  220. */
  221. skb = skb_dequeue(&ax25->write_queue);
  222. if (!skb)
  223. return;
  224. ax25->vs = start;
  225. do {
  226. if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
  227. skb_queue_head(&ax25->write_queue, skb);
  228. break;
  229. }
  230. if (skb->sk != NULL)
  231. skb_set_owner_w(skbn, skb->sk);
  232. next = (ax25->vs + 1) % ax25->modulus;
  233. last = (next == end);
  234. /*
  235. * Transmit the frame copy.
  236. * bke 960114: do not set the Poll bit on the last frame
  237. * in DAMA mode.
  238. */
  239. switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
  240. case AX25_PROTO_STD_SIMPLEX:
  241. case AX25_PROTO_STD_DUPLEX:
  242. ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
  243. break;
  244. #ifdef CONFIG_AX25_DAMA_SLAVE
  245. case AX25_PROTO_DAMA_SLAVE:
  246. ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
  247. break;
  248. #endif
  249. }
  250. ax25->vs = next;
  251. /*
  252. * Requeue the original data frame.
  253. */
  254. skb_queue_tail(&ax25->ack_queue, skb);
  255. } while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
  256. ax25->condition &= ~AX25_COND_ACK_PENDING;
  257. if (!ax25_t1timer_running(ax25)) {
  258. ax25_stop_t3timer(ax25);
  259. ax25_calculate_t1(ax25);
  260. ax25_start_t1timer(ax25);
  261. }
  262. }
  263. void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
  264. {
  265. unsigned char *ptr;
  266. int headroom;
  267. if (ax25->ax25_dev == NULL) {
  268. ax25_disconnect(ax25, ENETUNREACH);
  269. return;
  270. }
  271. headroom = ax25_addr_size(ax25->digipeat);
  272. if (unlikely(skb_headroom(skb) < headroom)) {
  273. skb = skb_expand_head(skb, headroom);
  274. if (!skb) {
  275. printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
  276. return;
  277. }
  278. }
  279. ptr = skb_push(skb, headroom);
  280. ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
  281. ax25_queue_xmit(skb, ax25->ax25_dev->dev);
  282. }
  283. /*
  284. * A small shim to dev_queue_xmit to add the KISS control byte, and do
  285. * any packet forwarding in operation.
  286. */
  287. void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
  288. {
  289. unsigned char *ptr;
  290. skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
  291. ptr = skb_push(skb, 1);
  292. *ptr = 0x00; /* KISS */
  293. dev_queue_xmit(skb);
  294. }
  295. int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
  296. {
  297. if (ax25->vs == nr) {
  298. ax25_frames_acked(ax25, nr);
  299. ax25_calculate_rtt(ax25);
  300. ax25_stop_t1timer(ax25);
  301. ax25_start_t3timer(ax25);
  302. return 1;
  303. } else {
  304. if (ax25->va != nr) {
  305. ax25_frames_acked(ax25, nr);
  306. ax25_calculate_t1(ax25);
  307. ax25_start_t1timer(ax25);
  308. return 1;
  309. }
  310. }
  311. return 0;
  312. }