ioam6_iptunnel.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * IPv6 IOAM Lightweight Tunnel implementation
  4. *
  5. * Author:
  6. * Justin Iurman <[email protected]>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/net.h>
  11. #include <linux/in6.h>
  12. #include <linux/ioam6.h>
  13. #include <linux/ioam6_iptunnel.h>
  14. #include <net/dst.h>
  15. #include <net/sock.h>
  16. #include <net/lwtunnel.h>
  17. #include <net/ioam6.h>
  18. #include <net/netlink.h>
  19. #include <net/ipv6.h>
  20. #include <net/dst_cache.h>
  21. #include <net/ip6_route.h>
  22. #include <net/addrconf.h>
  23. #define IOAM6_MASK_SHORT_FIELDS 0xff100000
  24. #define IOAM6_MASK_WIDE_FIELDS 0xe00000
  25. struct ioam6_lwt_encap {
  26. struct ipv6_hopopt_hdr eh;
  27. u8 pad[2]; /* 2-octet padding for 4n-alignment */
  28. struct ioam6_hdr ioamh;
  29. struct ioam6_trace_hdr traceh;
  30. } __packed;
  31. struct ioam6_lwt_freq {
  32. u32 k;
  33. u32 n;
  34. };
  35. struct ioam6_lwt {
  36. struct dst_cache cache;
  37. struct ioam6_lwt_freq freq;
  38. atomic_t pkt_cnt;
  39. u8 mode;
  40. struct in6_addr tundst;
  41. struct ioam6_lwt_encap tuninfo;
  42. };
  43. static struct netlink_range_validation freq_range = {
  44. .min = IOAM6_IPTUNNEL_FREQ_MIN,
  45. .max = IOAM6_IPTUNNEL_FREQ_MAX,
  46. };
  47. static struct ioam6_lwt *ioam6_lwt_state(struct lwtunnel_state *lwt)
  48. {
  49. return (struct ioam6_lwt *)lwt->data;
  50. }
  51. static struct ioam6_lwt_encap *ioam6_lwt_info(struct lwtunnel_state *lwt)
  52. {
  53. return &ioam6_lwt_state(lwt)->tuninfo;
  54. }
  55. static struct ioam6_trace_hdr *ioam6_lwt_trace(struct lwtunnel_state *lwt)
  56. {
  57. return &(ioam6_lwt_state(lwt)->tuninfo.traceh);
  58. }
  59. static const struct nla_policy ioam6_iptunnel_policy[IOAM6_IPTUNNEL_MAX + 1] = {
  60. [IOAM6_IPTUNNEL_FREQ_K] = NLA_POLICY_FULL_RANGE(NLA_U32, &freq_range),
  61. [IOAM6_IPTUNNEL_FREQ_N] = NLA_POLICY_FULL_RANGE(NLA_U32, &freq_range),
  62. [IOAM6_IPTUNNEL_MODE] = NLA_POLICY_RANGE(NLA_U8,
  63. IOAM6_IPTUNNEL_MODE_MIN,
  64. IOAM6_IPTUNNEL_MODE_MAX),
  65. [IOAM6_IPTUNNEL_DST] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
  66. [IOAM6_IPTUNNEL_TRACE] = NLA_POLICY_EXACT_LEN(sizeof(struct ioam6_trace_hdr)),
  67. };
  68. static bool ioam6_validate_trace_hdr(struct ioam6_trace_hdr *trace)
  69. {
  70. u32 fields;
  71. if (!trace->type_be32 || !trace->remlen ||
  72. trace->remlen > IOAM6_TRACE_DATA_SIZE_MAX / 4 ||
  73. trace->type.bit12 | trace->type.bit13 | trace->type.bit14 |
  74. trace->type.bit15 | trace->type.bit16 | trace->type.bit17 |
  75. trace->type.bit18 | trace->type.bit19 | trace->type.bit20 |
  76. trace->type.bit21)
  77. return false;
  78. trace->nodelen = 0;
  79. fields = be32_to_cpu(trace->type_be32);
  80. trace->nodelen += hweight32(fields & IOAM6_MASK_SHORT_FIELDS)
  81. * (sizeof(__be32) / 4);
  82. trace->nodelen += hweight32(fields & IOAM6_MASK_WIDE_FIELDS)
  83. * (sizeof(__be64) / 4);
  84. return true;
  85. }
  86. static int ioam6_build_state(struct net *net, struct nlattr *nla,
  87. unsigned int family, const void *cfg,
  88. struct lwtunnel_state **ts,
  89. struct netlink_ext_ack *extack)
  90. {
  91. struct nlattr *tb[IOAM6_IPTUNNEL_MAX + 1];
  92. struct ioam6_lwt_encap *tuninfo;
  93. struct ioam6_trace_hdr *trace;
  94. struct lwtunnel_state *lwt;
  95. struct ioam6_lwt *ilwt;
  96. int len_aligned, err;
  97. u32 freq_k, freq_n;
  98. u8 mode;
  99. if (family != AF_INET6)
  100. return -EINVAL;
  101. err = nla_parse_nested(tb, IOAM6_IPTUNNEL_MAX, nla,
  102. ioam6_iptunnel_policy, extack);
  103. if (err < 0)
  104. return err;
  105. if ((!tb[IOAM6_IPTUNNEL_FREQ_K] && tb[IOAM6_IPTUNNEL_FREQ_N]) ||
  106. (tb[IOAM6_IPTUNNEL_FREQ_K] && !tb[IOAM6_IPTUNNEL_FREQ_N])) {
  107. NL_SET_ERR_MSG(extack, "freq: missing parameter");
  108. return -EINVAL;
  109. } else if (!tb[IOAM6_IPTUNNEL_FREQ_K] && !tb[IOAM6_IPTUNNEL_FREQ_N]) {
  110. freq_k = IOAM6_IPTUNNEL_FREQ_MIN;
  111. freq_n = IOAM6_IPTUNNEL_FREQ_MIN;
  112. } else {
  113. freq_k = nla_get_u32(tb[IOAM6_IPTUNNEL_FREQ_K]);
  114. freq_n = nla_get_u32(tb[IOAM6_IPTUNNEL_FREQ_N]);
  115. if (freq_k > freq_n) {
  116. NL_SET_ERR_MSG(extack, "freq: k > n is forbidden");
  117. return -EINVAL;
  118. }
  119. }
  120. if (!tb[IOAM6_IPTUNNEL_MODE])
  121. mode = IOAM6_IPTUNNEL_MODE_INLINE;
  122. else
  123. mode = nla_get_u8(tb[IOAM6_IPTUNNEL_MODE]);
  124. if (!tb[IOAM6_IPTUNNEL_DST] && mode != IOAM6_IPTUNNEL_MODE_INLINE) {
  125. NL_SET_ERR_MSG(extack, "this mode needs a tunnel destination");
  126. return -EINVAL;
  127. }
  128. if (!tb[IOAM6_IPTUNNEL_TRACE]) {
  129. NL_SET_ERR_MSG(extack, "missing trace");
  130. return -EINVAL;
  131. }
  132. trace = nla_data(tb[IOAM6_IPTUNNEL_TRACE]);
  133. if (!ioam6_validate_trace_hdr(trace)) {
  134. NL_SET_ERR_MSG_ATTR(extack, tb[IOAM6_IPTUNNEL_TRACE],
  135. "invalid trace validation");
  136. return -EINVAL;
  137. }
  138. len_aligned = ALIGN(trace->remlen * 4, 8);
  139. lwt = lwtunnel_state_alloc(sizeof(*ilwt) + len_aligned);
  140. if (!lwt)
  141. return -ENOMEM;
  142. ilwt = ioam6_lwt_state(lwt);
  143. err = dst_cache_init(&ilwt->cache, GFP_ATOMIC);
  144. if (err) {
  145. kfree(lwt);
  146. return err;
  147. }
  148. atomic_set(&ilwt->pkt_cnt, 0);
  149. ilwt->freq.k = freq_k;
  150. ilwt->freq.n = freq_n;
  151. ilwt->mode = mode;
  152. if (tb[IOAM6_IPTUNNEL_DST])
  153. ilwt->tundst = nla_get_in6_addr(tb[IOAM6_IPTUNNEL_DST]);
  154. tuninfo = ioam6_lwt_info(lwt);
  155. tuninfo->eh.hdrlen = ((sizeof(*tuninfo) + len_aligned) >> 3) - 1;
  156. tuninfo->pad[0] = IPV6_TLV_PADN;
  157. tuninfo->ioamh.type = IOAM6_TYPE_PREALLOC;
  158. tuninfo->ioamh.opt_type = IPV6_TLV_IOAM;
  159. tuninfo->ioamh.opt_len = sizeof(tuninfo->ioamh) - 2 + sizeof(*trace)
  160. + trace->remlen * 4;
  161. memcpy(&tuninfo->traceh, trace, sizeof(*trace));
  162. if (len_aligned - trace->remlen * 4) {
  163. tuninfo->traceh.data[trace->remlen * 4] = IPV6_TLV_PADN;
  164. tuninfo->traceh.data[trace->remlen * 4 + 1] = 2;
  165. }
  166. lwt->type = LWTUNNEL_ENCAP_IOAM6;
  167. lwt->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
  168. *ts = lwt;
  169. return 0;
  170. }
  171. static int ioam6_do_fill(struct net *net, struct sk_buff *skb)
  172. {
  173. struct ioam6_trace_hdr *trace;
  174. struct ioam6_namespace *ns;
  175. trace = (struct ioam6_trace_hdr *)(skb_transport_header(skb)
  176. + sizeof(struct ipv6_hopopt_hdr) + 2
  177. + sizeof(struct ioam6_hdr));
  178. ns = ioam6_namespace(net, trace->namespace_id);
  179. if (ns)
  180. ioam6_fill_trace_data(skb, ns, trace, false);
  181. return 0;
  182. }
  183. static int ioam6_do_inline(struct net *net, struct sk_buff *skb,
  184. struct ioam6_lwt_encap *tuninfo)
  185. {
  186. struct ipv6hdr *oldhdr, *hdr;
  187. int hdrlen, err;
  188. hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
  189. err = skb_cow_head(skb, hdrlen + skb->mac_len);
  190. if (unlikely(err))
  191. return err;
  192. oldhdr = ipv6_hdr(skb);
  193. skb_pull(skb, sizeof(*oldhdr));
  194. skb_postpull_rcsum(skb, skb_network_header(skb), sizeof(*oldhdr));
  195. skb_push(skb, sizeof(*oldhdr) + hdrlen);
  196. skb_reset_network_header(skb);
  197. skb_mac_header_rebuild(skb);
  198. hdr = ipv6_hdr(skb);
  199. memmove(hdr, oldhdr, sizeof(*oldhdr));
  200. tuninfo->eh.nexthdr = hdr->nexthdr;
  201. skb_set_transport_header(skb, sizeof(*hdr));
  202. skb_postpush_rcsum(skb, hdr, sizeof(*hdr) + hdrlen);
  203. memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen);
  204. hdr->nexthdr = NEXTHDR_HOP;
  205. hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
  206. return ioam6_do_fill(net, skb);
  207. }
  208. static int ioam6_do_encap(struct net *net, struct sk_buff *skb,
  209. struct ioam6_lwt_encap *tuninfo,
  210. struct in6_addr *tundst)
  211. {
  212. struct dst_entry *dst = skb_dst(skb);
  213. struct ipv6hdr *hdr, *inner_hdr;
  214. int hdrlen, len, err;
  215. hdrlen = (tuninfo->eh.hdrlen + 1) << 3;
  216. len = sizeof(*hdr) + hdrlen;
  217. err = skb_cow_head(skb, len + skb->mac_len);
  218. if (unlikely(err))
  219. return err;
  220. inner_hdr = ipv6_hdr(skb);
  221. skb_push(skb, len);
  222. skb_reset_network_header(skb);
  223. skb_mac_header_rebuild(skb);
  224. skb_set_transport_header(skb, sizeof(*hdr));
  225. tuninfo->eh.nexthdr = NEXTHDR_IPV6;
  226. memcpy(skb_transport_header(skb), (u8 *)tuninfo, hdrlen);
  227. hdr = ipv6_hdr(skb);
  228. memcpy(hdr, inner_hdr, sizeof(*hdr));
  229. hdr->nexthdr = NEXTHDR_HOP;
  230. hdr->payload_len = cpu_to_be16(skb->len - sizeof(*hdr));
  231. hdr->daddr = *tundst;
  232. ipv6_dev_get_saddr(net, dst->dev, &hdr->daddr,
  233. IPV6_PREFER_SRC_PUBLIC, &hdr->saddr);
  234. skb_postpush_rcsum(skb, hdr, len);
  235. return ioam6_do_fill(net, skb);
  236. }
  237. static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  238. {
  239. struct dst_entry *dst = skb_dst(skb);
  240. struct in6_addr orig_daddr;
  241. struct ioam6_lwt *ilwt;
  242. int err = -EINVAL;
  243. u32 pkt_cnt;
  244. if (skb->protocol != htons(ETH_P_IPV6))
  245. goto drop;
  246. ilwt = ioam6_lwt_state(dst->lwtstate);
  247. /* Check for insertion frequency (i.e., "k over n" insertions) */
  248. pkt_cnt = atomic_fetch_inc(&ilwt->pkt_cnt);
  249. if (pkt_cnt % ilwt->freq.n >= ilwt->freq.k)
  250. goto out;
  251. orig_daddr = ipv6_hdr(skb)->daddr;
  252. switch (ilwt->mode) {
  253. case IOAM6_IPTUNNEL_MODE_INLINE:
  254. do_inline:
  255. /* Direct insertion - if there is no Hop-by-Hop yet */
  256. if (ipv6_hdr(skb)->nexthdr == NEXTHDR_HOP)
  257. goto out;
  258. err = ioam6_do_inline(net, skb, &ilwt->tuninfo);
  259. if (unlikely(err))
  260. goto drop;
  261. break;
  262. case IOAM6_IPTUNNEL_MODE_ENCAP:
  263. do_encap:
  264. /* Encapsulation (ip6ip6) */
  265. err = ioam6_do_encap(net, skb, &ilwt->tuninfo, &ilwt->tundst);
  266. if (unlikely(err))
  267. goto drop;
  268. break;
  269. case IOAM6_IPTUNNEL_MODE_AUTO:
  270. /* Automatic (RFC8200 compliant):
  271. * - local packets -> INLINE mode
  272. * - in-transit packets -> ENCAP mode
  273. */
  274. if (!skb->dev)
  275. goto do_inline;
  276. goto do_encap;
  277. default:
  278. goto drop;
  279. }
  280. err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
  281. if (unlikely(err))
  282. goto drop;
  283. if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
  284. preempt_disable();
  285. dst = dst_cache_get(&ilwt->cache);
  286. preempt_enable();
  287. if (unlikely(!dst)) {
  288. struct ipv6hdr *hdr = ipv6_hdr(skb);
  289. struct flowi6 fl6;
  290. memset(&fl6, 0, sizeof(fl6));
  291. fl6.daddr = hdr->daddr;
  292. fl6.saddr = hdr->saddr;
  293. fl6.flowlabel = ip6_flowinfo(hdr);
  294. fl6.flowi6_mark = skb->mark;
  295. fl6.flowi6_proto = hdr->nexthdr;
  296. dst = ip6_route_output(net, NULL, &fl6);
  297. if (dst->error) {
  298. err = dst->error;
  299. dst_release(dst);
  300. goto drop;
  301. }
  302. preempt_disable();
  303. dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
  304. preempt_enable();
  305. }
  306. skb_dst_drop(skb);
  307. skb_dst_set(skb, dst);
  308. return dst_output(net, sk, skb);
  309. }
  310. out:
  311. return dst->lwtstate->orig_output(net, sk, skb);
  312. drop:
  313. kfree_skb(skb);
  314. return err;
  315. }
  316. static void ioam6_destroy_state(struct lwtunnel_state *lwt)
  317. {
  318. dst_cache_destroy(&ioam6_lwt_state(lwt)->cache);
  319. }
  320. static int ioam6_fill_encap_info(struct sk_buff *skb,
  321. struct lwtunnel_state *lwtstate)
  322. {
  323. struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate);
  324. int err;
  325. err = nla_put_u32(skb, IOAM6_IPTUNNEL_FREQ_K, ilwt->freq.k);
  326. if (err)
  327. goto ret;
  328. err = nla_put_u32(skb, IOAM6_IPTUNNEL_FREQ_N, ilwt->freq.n);
  329. if (err)
  330. goto ret;
  331. err = nla_put_u8(skb, IOAM6_IPTUNNEL_MODE, ilwt->mode);
  332. if (err)
  333. goto ret;
  334. if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE) {
  335. err = nla_put_in6_addr(skb, IOAM6_IPTUNNEL_DST, &ilwt->tundst);
  336. if (err)
  337. goto ret;
  338. }
  339. err = nla_put(skb, IOAM6_IPTUNNEL_TRACE, sizeof(ilwt->tuninfo.traceh),
  340. &ilwt->tuninfo.traceh);
  341. ret:
  342. return err;
  343. }
  344. static int ioam6_encap_nlsize(struct lwtunnel_state *lwtstate)
  345. {
  346. struct ioam6_lwt *ilwt = ioam6_lwt_state(lwtstate);
  347. int nlsize;
  348. nlsize = nla_total_size(sizeof(ilwt->freq.k)) +
  349. nla_total_size(sizeof(ilwt->freq.n)) +
  350. nla_total_size(sizeof(ilwt->mode)) +
  351. nla_total_size(sizeof(ilwt->tuninfo.traceh));
  352. if (ilwt->mode != IOAM6_IPTUNNEL_MODE_INLINE)
  353. nlsize += nla_total_size(sizeof(ilwt->tundst));
  354. return nlsize;
  355. }
  356. static int ioam6_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
  357. {
  358. struct ioam6_trace_hdr *trace_a = ioam6_lwt_trace(a);
  359. struct ioam6_trace_hdr *trace_b = ioam6_lwt_trace(b);
  360. struct ioam6_lwt *ilwt_a = ioam6_lwt_state(a);
  361. struct ioam6_lwt *ilwt_b = ioam6_lwt_state(b);
  362. return (ilwt_a->freq.k != ilwt_b->freq.k ||
  363. ilwt_a->freq.n != ilwt_b->freq.n ||
  364. ilwt_a->mode != ilwt_b->mode ||
  365. (ilwt_a->mode != IOAM6_IPTUNNEL_MODE_INLINE &&
  366. !ipv6_addr_equal(&ilwt_a->tundst, &ilwt_b->tundst)) ||
  367. trace_a->namespace_id != trace_b->namespace_id);
  368. }
  369. static const struct lwtunnel_encap_ops ioam6_iptun_ops = {
  370. .build_state = ioam6_build_state,
  371. .destroy_state = ioam6_destroy_state,
  372. .output = ioam6_output,
  373. .fill_encap = ioam6_fill_encap_info,
  374. .get_encap_size = ioam6_encap_nlsize,
  375. .cmp_encap = ioam6_encap_cmp,
  376. .owner = THIS_MODULE,
  377. };
  378. int __init ioam6_iptunnel_init(void)
  379. {
  380. return lwtunnel_encap_add_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6);
  381. }
  382. void ioam6_iptunnel_exit(void)
  383. {
  384. lwtunnel_encap_del_ops(&ioam6_iptun_ops, LWTUNNEL_ENCAP_IOAM6);
  385. }