tx.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <net/6lowpan.h>
  3. #include <net/ndisc.h>
  4. #include <net/ieee802154_netdev.h>
  5. #include <net/mac802154.h>
  6. #include "6lowpan_i.h"
  7. #define LOWPAN_FRAG1_HEAD_SIZE 0x4
  8. #define LOWPAN_FRAGN_HEAD_SIZE 0x5
  9. struct lowpan_addr_info {
  10. struct ieee802154_addr daddr;
  11. struct ieee802154_addr saddr;
  12. };
  13. static inline struct
  14. lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
  15. {
  16. WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
  17. return (struct lowpan_addr_info *)(skb->data -
  18. sizeof(struct lowpan_addr_info));
  19. }
  20. /* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
  21. * sockets gives an 8 byte array for addresses only!
  22. *
  23. * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
  24. * sense here. We should disable it, the right use-case would be AF_INET6
  25. * RAW/DGRAM sockets.
  26. */
  27. int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
  28. unsigned short type, const void *daddr,
  29. const void *saddr, unsigned int len)
  30. {
  31. struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
  32. struct lowpan_addr_info *info = lowpan_skb_priv(skb);
  33. struct lowpan_802154_neigh *llneigh = NULL;
  34. const struct ipv6hdr *hdr = ipv6_hdr(skb);
  35. struct neighbour *n;
  36. if (!daddr)
  37. return -EINVAL;
  38. /* TODO:
  39. * if this package isn't ipv6 one, where should it be routed?
  40. */
  41. if (type != ETH_P_IPV6)
  42. return 0;
  43. /* intra-pan communication */
  44. info->saddr.pan_id = wpan_dev->pan_id;
  45. info->daddr.pan_id = info->saddr.pan_id;
  46. if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
  47. info->daddr.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
  48. info->daddr.mode = IEEE802154_ADDR_SHORT;
  49. } else {
  50. __le16 short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
  51. n = neigh_lookup(&nd_tbl, &hdr->daddr, ldev);
  52. if (n) {
  53. llneigh = lowpan_802154_neigh(neighbour_priv(n));
  54. read_lock_bh(&n->lock);
  55. short_addr = llneigh->short_addr;
  56. read_unlock_bh(&n->lock);
  57. }
  58. if (llneigh &&
  59. lowpan_802154_is_valid_src_short_addr(short_addr)) {
  60. info->daddr.short_addr = short_addr;
  61. info->daddr.mode = IEEE802154_ADDR_SHORT;
  62. } else {
  63. info->daddr.mode = IEEE802154_ADDR_LONG;
  64. ieee802154_be64_to_le64(&info->daddr.extended_addr,
  65. daddr);
  66. }
  67. if (n)
  68. neigh_release(n);
  69. }
  70. if (!saddr) {
  71. if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr)) {
  72. info->saddr.mode = IEEE802154_ADDR_SHORT;
  73. info->saddr.short_addr = wpan_dev->short_addr;
  74. } else {
  75. info->saddr.mode = IEEE802154_ADDR_LONG;
  76. info->saddr.extended_addr = wpan_dev->extended_addr;
  77. }
  78. } else {
  79. info->saddr.mode = IEEE802154_ADDR_LONG;
  80. ieee802154_be64_to_le64(&info->saddr.extended_addr, saddr);
  81. }
  82. return 0;
  83. }
  84. static struct sk_buff*
  85. lowpan_alloc_frag(struct sk_buff *skb, int size,
  86. const struct ieee802154_hdr *master_hdr, bool frag1)
  87. {
  88. struct net_device *wdev = lowpan_802154_dev(skb->dev)->wdev;
  89. struct sk_buff *frag;
  90. int rc;
  91. frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
  92. GFP_ATOMIC);
  93. if (likely(frag)) {
  94. frag->dev = wdev;
  95. frag->priority = skb->priority;
  96. skb_reserve(frag, wdev->needed_headroom);
  97. skb_reset_network_header(frag);
  98. *mac_cb(frag) = *mac_cb(skb);
  99. if (frag1) {
  100. skb_put_data(frag, skb_mac_header(skb), skb->mac_len);
  101. } else {
  102. rc = wpan_dev_hard_header(frag, wdev,
  103. &master_hdr->dest,
  104. &master_hdr->source, size);
  105. if (rc < 0) {
  106. kfree_skb(frag);
  107. return ERR_PTR(rc);
  108. }
  109. }
  110. } else {
  111. frag = ERR_PTR(-ENOMEM);
  112. }
  113. return frag;
  114. }
  115. static int
  116. lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
  117. u8 *frag_hdr, int frag_hdrlen,
  118. int offset, int len, bool frag1)
  119. {
  120. struct sk_buff *frag;
  121. raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
  122. frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
  123. if (IS_ERR(frag))
  124. return PTR_ERR(frag);
  125. skb_put_data(frag, frag_hdr, frag_hdrlen);
  126. skb_put_data(frag, skb_network_header(skb) + offset, len);
  127. raw_dump_table(__func__, " fragment dump", frag->data, frag->len);
  128. return dev_queue_xmit(frag);
  129. }
  130. static int
  131. lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
  132. const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
  133. u16 dgram_offset)
  134. {
  135. __be16 frag_tag;
  136. u8 frag_hdr[5];
  137. int frag_cap, frag_len, payload_cap, rc;
  138. int skb_unprocessed, skb_offset;
  139. frag_tag = htons(lowpan_802154_dev(ldev)->fragment_tag);
  140. lowpan_802154_dev(ldev)->fragment_tag++;
  141. frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
  142. frag_hdr[1] = dgram_size & 0xff;
  143. memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag));
  144. payload_cap = ieee802154_max_payload(wpan_hdr);
  145. frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE -
  146. skb_network_header_len(skb), 8);
  147. skb_offset = skb_network_header_len(skb);
  148. skb_unprocessed = skb->len - skb->mac_len - skb_offset;
  149. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  150. LOWPAN_FRAG1_HEAD_SIZE, 0,
  151. frag_len + skb_network_header_len(skb),
  152. true);
  153. if (rc) {
  154. pr_debug("%s unable to send FRAG1 packet (tag: %d)",
  155. __func__, ntohs(frag_tag));
  156. goto err;
  157. }
  158. frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1;
  159. frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN;
  160. frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8);
  161. do {
  162. dgram_offset += frag_len;
  163. skb_offset += frag_len;
  164. skb_unprocessed -= frag_len;
  165. frag_len = min(frag_cap, skb_unprocessed);
  166. frag_hdr[4] = dgram_offset >> 3;
  167. rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
  168. LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
  169. frag_len, false);
  170. if (rc) {
  171. pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
  172. __func__, ntohs(frag_tag), skb_offset);
  173. goto err;
  174. }
  175. } while (skb_unprocessed > frag_cap);
  176. ldev->stats.tx_packets++;
  177. ldev->stats.tx_bytes += dgram_size;
  178. consume_skb(skb);
  179. return NET_XMIT_SUCCESS;
  180. err:
  181. kfree_skb(skb);
  182. return rc;
  183. }
  184. static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
  185. u16 *dgram_size, u16 *dgram_offset)
  186. {
  187. struct wpan_dev *wpan_dev = lowpan_802154_dev(ldev)->wdev->ieee802154_ptr;
  188. struct ieee802154_mac_cb *cb = mac_cb_init(skb);
  189. struct lowpan_addr_info info;
  190. memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
  191. *dgram_size = skb->len;
  192. lowpan_header_compress(skb, ldev, &info.daddr, &info.saddr);
  193. /* dgram_offset = (saved bytes after compression) + lowpan header len */
  194. *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
  195. cb->type = IEEE802154_FC_TYPE_DATA;
  196. if (info.daddr.mode == IEEE802154_ADDR_SHORT &&
  197. ieee802154_is_broadcast_short_addr(info.daddr.short_addr))
  198. cb->ackreq = false;
  199. else
  200. cb->ackreq = wpan_dev->ackreq;
  201. return wpan_dev_hard_header(skb, lowpan_802154_dev(ldev)->wdev,
  202. &info.daddr, &info.saddr, 0);
  203. }
  204. netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
  205. {
  206. struct ieee802154_hdr wpan_hdr;
  207. int max_single, ret;
  208. u16 dgram_size, dgram_offset;
  209. pr_debug("package xmit\n");
  210. WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
  211. /* We must take a copy of the skb before we modify/replace the ipv6
  212. * header as the header could be used elsewhere
  213. */
  214. if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
  215. skb_tailroom(skb) < ldev->needed_tailroom)) {
  216. struct sk_buff *nskb;
  217. nskb = skb_copy_expand(skb, ldev->needed_headroom,
  218. ldev->needed_tailroom, GFP_ATOMIC);
  219. if (likely(nskb)) {
  220. consume_skb(skb);
  221. skb = nskb;
  222. } else {
  223. kfree_skb(skb);
  224. return NET_XMIT_DROP;
  225. }
  226. } else {
  227. skb = skb_unshare(skb, GFP_ATOMIC);
  228. if (!skb)
  229. return NET_XMIT_DROP;
  230. }
  231. ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
  232. if (ret < 0) {
  233. kfree_skb(skb);
  234. return NET_XMIT_DROP;
  235. }
  236. if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
  237. kfree_skb(skb);
  238. return NET_XMIT_DROP;
  239. }
  240. max_single = ieee802154_max_payload(&wpan_hdr);
  241. if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
  242. skb->dev = lowpan_802154_dev(ldev)->wdev;
  243. ldev->stats.tx_packets++;
  244. ldev->stats.tx_bytes += dgram_size;
  245. return dev_queue_xmit(skb);
  246. } else {
  247. netdev_tx_t rc;
  248. pr_debug("frame is too big, fragmentation is needed\n");
  249. rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
  250. dgram_offset);
  251. return rc < 0 ? NET_XMIT_DROP : rc;
  252. }
  253. }