if_vlan.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * VLAN An implementation of 802.1Q VLAN tagging.
  4. *
  5. * Authors: Ben Greear <[email protected]>
  6. */
  7. #ifndef _LINUX_IF_VLAN_H_
  8. #define _LINUX_IF_VLAN_H_
  9. #include <linux/netdevice.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/rtnetlink.h>
  12. #include <linux/bug.h>
  13. #include <uapi/linux/if_vlan.h>
  14. #define VLAN_HLEN 4 /* The additional bytes required by VLAN
  15. * (in addition to the Ethernet header)
  16. */
  17. #define VLAN_ETH_HLEN 18 /* Total octets in header. */
  18. #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
  19. /*
  20. * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
  21. */
  22. #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
  23. #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
  24. #define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */
  25. /*
  26. * struct vlan_hdr - vlan header
  27. * @h_vlan_TCI: priority and VLAN ID
  28. * @h_vlan_encapsulated_proto: packet type ID or len
  29. */
  30. struct vlan_hdr {
  31. __be16 h_vlan_TCI;
  32. __be16 h_vlan_encapsulated_proto;
  33. };
  34. /**
  35. * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
  36. * @h_dest: destination ethernet address
  37. * @h_source: source ethernet address
  38. * @h_vlan_proto: ethernet protocol
  39. * @h_vlan_TCI: priority and VLAN ID
  40. * @h_vlan_encapsulated_proto: packet type ID or len
  41. */
  42. struct vlan_ethhdr {
  43. struct_group(addrs,
  44. unsigned char h_dest[ETH_ALEN];
  45. unsigned char h_source[ETH_ALEN];
  46. );
  47. __be16 h_vlan_proto;
  48. __be16 h_vlan_TCI;
  49. __be16 h_vlan_encapsulated_proto;
  50. };
  51. #include <linux/skbuff.h>
  52. static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
  53. {
  54. return (struct vlan_ethhdr *)skb_mac_header(skb);
  55. }
  56. #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
  57. #define VLAN_PRIO_SHIFT 13
  58. #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
  59. #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
  60. #define VLAN_N_VID 4096
  61. /* found in socket.c */
  62. extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
  63. static inline bool is_vlan_dev(const struct net_device *dev)
  64. {
  65. return dev->priv_flags & IFF_802_1Q_VLAN;
  66. }
  67. #define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
  68. #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
  69. #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
  70. #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
  71. #define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
  72. static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
  73. {
  74. ASSERT_RTNL();
  75. return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
  76. }
  77. static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
  78. {
  79. ASSERT_RTNL();
  80. call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
  81. }
  82. static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
  83. {
  84. ASSERT_RTNL();
  85. return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
  86. }
  87. static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
  88. {
  89. ASSERT_RTNL();
  90. call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
  91. }
  92. /**
  93. * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
  94. * @rx_packets: number of received packets
  95. * @rx_bytes: number of received bytes
  96. * @rx_multicast: number of received multicast packets
  97. * @tx_packets: number of transmitted packets
  98. * @tx_bytes: number of transmitted bytes
  99. * @syncp: synchronization point for 64bit counters
  100. * @rx_errors: number of rx errors
  101. * @tx_dropped: number of tx drops
  102. */
  103. struct vlan_pcpu_stats {
  104. u64_stats_t rx_packets;
  105. u64_stats_t rx_bytes;
  106. u64_stats_t rx_multicast;
  107. u64_stats_t tx_packets;
  108. u64_stats_t tx_bytes;
  109. struct u64_stats_sync syncp;
  110. u32 rx_errors;
  111. u32 tx_dropped;
  112. };
  113. #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  114. extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
  115. __be16 vlan_proto, u16 vlan_id);
  116. extern int vlan_for_each(struct net_device *dev,
  117. int (*action)(struct net_device *dev, int vid,
  118. void *arg), void *arg);
  119. extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
  120. extern u16 vlan_dev_vlan_id(const struct net_device *dev);
  121. extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
  122. /**
  123. * struct vlan_priority_tci_mapping - vlan egress priority mappings
  124. * @priority: skb priority
  125. * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
  126. * @next: pointer to next struct
  127. */
  128. struct vlan_priority_tci_mapping {
  129. u32 priority;
  130. u16 vlan_qos;
  131. struct vlan_priority_tci_mapping *next;
  132. };
  133. struct proc_dir_entry;
  134. struct netpoll;
  135. /**
  136. * struct vlan_dev_priv - VLAN private device data
  137. * @nr_ingress_mappings: number of ingress priority mappings
  138. * @ingress_priority_map: ingress priority mappings
  139. * @nr_egress_mappings: number of egress priority mappings
  140. * @egress_priority_map: hash of egress priority mappings
  141. * @vlan_proto: VLAN encapsulation protocol
  142. * @vlan_id: VLAN identifier
  143. * @flags: device flags
  144. * @real_dev: underlying netdevice
  145. * @dev_tracker: refcount tracker for @real_dev reference
  146. * @real_dev_addr: address of underlying netdevice
  147. * @dent: proc dir entry
  148. * @vlan_pcpu_stats: ptr to percpu rx stats
  149. */
  150. struct vlan_dev_priv {
  151. unsigned int nr_ingress_mappings;
  152. u32 ingress_priority_map[8];
  153. unsigned int nr_egress_mappings;
  154. struct vlan_priority_tci_mapping *egress_priority_map[16];
  155. __be16 vlan_proto;
  156. u16 vlan_id;
  157. u16 flags;
  158. struct net_device *real_dev;
  159. netdevice_tracker dev_tracker;
  160. unsigned char real_dev_addr[ETH_ALEN];
  161. struct proc_dir_entry *dent;
  162. struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
  163. #ifdef CONFIG_NET_POLL_CONTROLLER
  164. struct netpoll *netpoll;
  165. #endif
  166. };
  167. static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
  168. {
  169. return netdev_priv(dev);
  170. }
  171. static inline u16
  172. vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
  173. {
  174. struct vlan_priority_tci_mapping *mp;
  175. smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
  176. mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
  177. while (mp) {
  178. if (mp->priority == skprio) {
  179. return mp->vlan_qos; /* This should already be shifted
  180. * to mask correctly with the
  181. * VLAN's TCI */
  182. }
  183. mp = mp->next;
  184. }
  185. return 0;
  186. }
  187. extern bool vlan_do_receive(struct sk_buff **skb);
  188. extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
  189. extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
  190. extern int vlan_vids_add_by_dev(struct net_device *dev,
  191. const struct net_device *by_dev);
  192. extern void vlan_vids_del_by_dev(struct net_device *dev,
  193. const struct net_device *by_dev);
  194. extern bool vlan_uses_dev(const struct net_device *dev);
  195. #else
  196. static inline struct net_device *
  197. __vlan_find_dev_deep_rcu(struct net_device *real_dev,
  198. __be16 vlan_proto, u16 vlan_id)
  199. {
  200. return NULL;
  201. }
  202. static inline int
  203. vlan_for_each(struct net_device *dev,
  204. int (*action)(struct net_device *dev, int vid, void *arg),
  205. void *arg)
  206. {
  207. return 0;
  208. }
  209. static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
  210. {
  211. BUG();
  212. return NULL;
  213. }
  214. static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
  215. {
  216. BUG();
  217. return 0;
  218. }
  219. static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
  220. {
  221. BUG();
  222. return 0;
  223. }
  224. static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
  225. u32 skprio)
  226. {
  227. return 0;
  228. }
  229. static inline bool vlan_do_receive(struct sk_buff **skb)
  230. {
  231. return false;
  232. }
  233. static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
  234. {
  235. return 0;
  236. }
  237. static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
  238. {
  239. }
  240. static inline int vlan_vids_add_by_dev(struct net_device *dev,
  241. const struct net_device *by_dev)
  242. {
  243. return 0;
  244. }
  245. static inline void vlan_vids_del_by_dev(struct net_device *dev,
  246. const struct net_device *by_dev)
  247. {
  248. }
  249. static inline bool vlan_uses_dev(const struct net_device *dev)
  250. {
  251. return false;
  252. }
  253. #endif
  254. /**
  255. * eth_type_vlan - check for valid vlan ether type.
  256. * @ethertype: ether type to check
  257. *
  258. * Returns true if the ether type is a vlan ether type.
  259. */
  260. static inline bool eth_type_vlan(__be16 ethertype)
  261. {
  262. switch (ethertype) {
  263. case htons(ETH_P_8021Q):
  264. case htons(ETH_P_8021AD):
  265. return true;
  266. default:
  267. return false;
  268. }
  269. }
  270. static inline bool vlan_hw_offload_capable(netdev_features_t features,
  271. __be16 proto)
  272. {
  273. if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
  274. return true;
  275. if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
  276. return true;
  277. return false;
  278. }
  279. /**
  280. * __vlan_insert_inner_tag - inner VLAN tag inserting
  281. * @skb: skbuff to tag
  282. * @vlan_proto: VLAN encapsulation protocol
  283. * @vlan_tci: VLAN TCI to insert
  284. * @mac_len: MAC header length including outer vlan headers
  285. *
  286. * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
  287. * Returns error if skb_cow_head fails.
  288. *
  289. * Does not change skb->protocol so this function can be used during receive.
  290. */
  291. static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
  292. __be16 vlan_proto, u16 vlan_tci,
  293. unsigned int mac_len)
  294. {
  295. struct vlan_ethhdr *veth;
  296. if (skb_cow_head(skb, VLAN_HLEN) < 0)
  297. return -ENOMEM;
  298. skb_push(skb, VLAN_HLEN);
  299. /* Move the mac header sans proto to the beginning of the new header. */
  300. if (likely(mac_len > ETH_TLEN))
  301. memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
  302. skb->mac_header -= VLAN_HLEN;
  303. veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
  304. /* first, the ethernet type */
  305. if (likely(mac_len >= ETH_TLEN)) {
  306. /* h_vlan_encapsulated_proto should already be populated, and
  307. * skb->data has space for h_vlan_proto
  308. */
  309. veth->h_vlan_proto = vlan_proto;
  310. } else {
  311. /* h_vlan_encapsulated_proto should not be populated, and
  312. * skb->data has no space for h_vlan_proto
  313. */
  314. veth->h_vlan_encapsulated_proto = skb->protocol;
  315. }
  316. /* now, the TCI */
  317. veth->h_vlan_TCI = htons(vlan_tci);
  318. return 0;
  319. }
  320. /**
  321. * __vlan_insert_tag - regular VLAN tag inserting
  322. * @skb: skbuff to tag
  323. * @vlan_proto: VLAN encapsulation protocol
  324. * @vlan_tci: VLAN TCI to insert
  325. *
  326. * Inserts the VLAN tag into @skb as part of the payload
  327. * Returns error if skb_cow_head fails.
  328. *
  329. * Does not change skb->protocol so this function can be used during receive.
  330. */
  331. static inline int __vlan_insert_tag(struct sk_buff *skb,
  332. __be16 vlan_proto, u16 vlan_tci)
  333. {
  334. return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
  335. }
  336. /**
  337. * vlan_insert_inner_tag - inner VLAN tag inserting
  338. * @skb: skbuff to tag
  339. * @vlan_proto: VLAN encapsulation protocol
  340. * @vlan_tci: VLAN TCI to insert
  341. * @mac_len: MAC header length including outer vlan headers
  342. *
  343. * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
  344. * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
  345. *
  346. * Following the skb_unshare() example, in case of error, the calling function
  347. * doesn't have to worry about freeing the original skb.
  348. *
  349. * Does not change skb->protocol so this function can be used during receive.
  350. */
  351. static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
  352. __be16 vlan_proto,
  353. u16 vlan_tci,
  354. unsigned int mac_len)
  355. {
  356. int err;
  357. err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
  358. if (err) {
  359. dev_kfree_skb_any(skb);
  360. return NULL;
  361. }
  362. return skb;
  363. }
  364. /**
  365. * vlan_insert_tag - regular VLAN tag inserting
  366. * @skb: skbuff to tag
  367. * @vlan_proto: VLAN encapsulation protocol
  368. * @vlan_tci: VLAN TCI to insert
  369. *
  370. * Inserts the VLAN tag into @skb as part of the payload
  371. * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
  372. *
  373. * Following the skb_unshare() example, in case of error, the calling function
  374. * doesn't have to worry about freeing the original skb.
  375. *
  376. * Does not change skb->protocol so this function can be used during receive.
  377. */
  378. static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
  379. __be16 vlan_proto, u16 vlan_tci)
  380. {
  381. return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
  382. }
  383. /**
  384. * vlan_insert_tag_set_proto - regular VLAN tag inserting
  385. * @skb: skbuff to tag
  386. * @vlan_proto: VLAN encapsulation protocol
  387. * @vlan_tci: VLAN TCI to insert
  388. *
  389. * Inserts the VLAN tag into @skb as part of the payload
  390. * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
  391. *
  392. * Following the skb_unshare() example, in case of error, the calling function
  393. * doesn't have to worry about freeing the original skb.
  394. */
  395. static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
  396. __be16 vlan_proto,
  397. u16 vlan_tci)
  398. {
  399. skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
  400. if (skb)
  401. skb->protocol = vlan_proto;
  402. return skb;
  403. }
  404. /**
  405. * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
  406. * @skb: skbuff to clear
  407. *
  408. * Clears the VLAN information from @skb
  409. */
  410. static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
  411. {
  412. skb->vlan_present = 0;
  413. }
  414. /**
  415. * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
  416. * @dst: skbuff to copy to
  417. * @src: skbuff to copy from
  418. *
  419. * Copies VLAN information from @src to @dst (for branchless code)
  420. */
  421. static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
  422. {
  423. dst->vlan_present = src->vlan_present;
  424. dst->vlan_proto = src->vlan_proto;
  425. dst->vlan_tci = src->vlan_tci;
  426. }
  427. /*
  428. * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
  429. * @skb: skbuff to tag
  430. *
  431. * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
  432. *
  433. * Following the skb_unshare() example, in case of error, the calling function
  434. * doesn't have to worry about freeing the original skb.
  435. */
  436. static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
  437. {
  438. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  439. skb_vlan_tag_get(skb));
  440. if (likely(skb))
  441. __vlan_hwaccel_clear_tag(skb);
  442. return skb;
  443. }
  444. /**
  445. * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
  446. * @skb: skbuff to tag
  447. * @vlan_proto: VLAN encapsulation protocol
  448. * @vlan_tci: VLAN TCI to insert
  449. *
  450. * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
  451. */
  452. static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
  453. __be16 vlan_proto, u16 vlan_tci)
  454. {
  455. skb->vlan_proto = vlan_proto;
  456. skb->vlan_tci = vlan_tci;
  457. skb->vlan_present = 1;
  458. }
  459. /**
  460. * __vlan_get_tag - get the VLAN ID that is part of the payload
  461. * @skb: skbuff to query
  462. * @vlan_tci: buffer to store value
  463. *
  464. * Returns error if the skb is not of VLAN type
  465. */
  466. static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
  467. {
  468. struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
  469. if (!eth_type_vlan(veth->h_vlan_proto))
  470. return -EINVAL;
  471. *vlan_tci = ntohs(veth->h_vlan_TCI);
  472. return 0;
  473. }
  474. /**
  475. * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
  476. * @skb: skbuff to query
  477. * @vlan_tci: buffer to store value
  478. *
  479. * Returns error if @skb->vlan_tci is not set correctly
  480. */
  481. static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
  482. u16 *vlan_tci)
  483. {
  484. if (skb_vlan_tag_present(skb)) {
  485. *vlan_tci = skb_vlan_tag_get(skb);
  486. return 0;
  487. } else {
  488. *vlan_tci = 0;
  489. return -EINVAL;
  490. }
  491. }
  492. /**
  493. * vlan_get_tag - get the VLAN ID from the skb
  494. * @skb: skbuff to query
  495. * @vlan_tci: buffer to store value
  496. *
  497. * Returns error if the skb is not VLAN tagged
  498. */
  499. static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
  500. {
  501. if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
  502. return __vlan_hwaccel_get_tag(skb, vlan_tci);
  503. } else {
  504. return __vlan_get_tag(skb, vlan_tci);
  505. }
  506. }
  507. /**
  508. * vlan_get_protocol - get protocol EtherType.
  509. * @skb: skbuff to query
  510. * @type: first vlan protocol
  511. * @depth: buffer to store length of eth and vlan tags in bytes
  512. *
  513. * Returns the EtherType of the packet, regardless of whether it is
  514. * vlan encapsulated (normal or hardware accelerated) or not.
  515. */
  516. static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
  517. int *depth)
  518. {
  519. unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
  520. /* if type is 802.1Q/AD then the header should already be
  521. * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
  522. * ETH_HLEN otherwise
  523. */
  524. if (eth_type_vlan(type)) {
  525. if (vlan_depth) {
  526. if (WARN_ON(vlan_depth < VLAN_HLEN))
  527. return 0;
  528. vlan_depth -= VLAN_HLEN;
  529. } else {
  530. vlan_depth = ETH_HLEN;
  531. }
  532. do {
  533. struct vlan_hdr vhdr, *vh;
  534. vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
  535. if (unlikely(!vh || !--parse_depth))
  536. return 0;
  537. type = vh->h_vlan_encapsulated_proto;
  538. vlan_depth += VLAN_HLEN;
  539. } while (eth_type_vlan(type));
  540. }
  541. if (depth)
  542. *depth = vlan_depth;
  543. return type;
  544. }
  545. /**
  546. * vlan_get_protocol - get protocol EtherType.
  547. * @skb: skbuff to query
  548. *
  549. * Returns the EtherType of the packet, regardless of whether it is
  550. * vlan encapsulated (normal or hardware accelerated) or not.
  551. */
  552. static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
  553. {
  554. return __vlan_get_protocol(skb, skb->protocol, NULL);
  555. }
  556. /* This version of __vlan_get_protocol() also pulls mac header in skb->head */
  557. static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
  558. __be16 type, int *depth)
  559. {
  560. int maclen;
  561. type = __vlan_get_protocol(skb, type, &maclen);
  562. if (type) {
  563. if (!pskb_may_pull(skb, maclen))
  564. type = 0;
  565. else if (depth)
  566. *depth = maclen;
  567. }
  568. return type;
  569. }
  570. /* A getter for the SKB protocol field which will handle VLAN tags consistently
  571. * whether VLAN acceleration is enabled or not.
  572. */
  573. static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
  574. {
  575. if (!skip_vlan)
  576. /* VLAN acceleration strips the VLAN header from the skb and
  577. * moves it to skb->vlan_proto
  578. */
  579. return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
  580. return vlan_get_protocol(skb);
  581. }
  582. static inline void vlan_set_encap_proto(struct sk_buff *skb,
  583. struct vlan_hdr *vhdr)
  584. {
  585. __be16 proto;
  586. unsigned short *rawp;
  587. /*
  588. * Was a VLAN packet, grab the encapsulated protocol, which the layer
  589. * three protocols care about.
  590. */
  591. proto = vhdr->h_vlan_encapsulated_proto;
  592. if (eth_proto_is_802_3(proto)) {
  593. skb->protocol = proto;
  594. return;
  595. }
  596. rawp = (unsigned short *)(vhdr + 1);
  597. if (*rawp == 0xFFFF)
  598. /*
  599. * This is a magic hack to spot IPX packets. Older Novell
  600. * breaks the protocol design and runs IPX over 802.3 without
  601. * an 802.2 LLC layer. We look for FFFF which isn't a used
  602. * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
  603. * but does for the rest.
  604. */
  605. skb->protocol = htons(ETH_P_802_3);
  606. else
  607. /*
  608. * Real 802.2 LLC
  609. */
  610. skb->protocol = htons(ETH_P_802_2);
  611. }
  612. /**
  613. * skb_vlan_tagged - check if skb is vlan tagged.
  614. * @skb: skbuff to query
  615. *
  616. * Returns true if the skb is tagged, regardless of whether it is hardware
  617. * accelerated or not.
  618. */
  619. static inline bool skb_vlan_tagged(const struct sk_buff *skb)
  620. {
  621. if (!skb_vlan_tag_present(skb) &&
  622. likely(!eth_type_vlan(skb->protocol)))
  623. return false;
  624. return true;
  625. }
  626. /**
  627. * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
  628. * @skb: skbuff to query
  629. *
  630. * Returns true if the skb is tagged with multiple vlan headers, regardless
  631. * of whether it is hardware accelerated or not.
  632. */
  633. static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
  634. {
  635. __be16 protocol = skb->protocol;
  636. if (!skb_vlan_tag_present(skb)) {
  637. struct vlan_ethhdr *veh;
  638. if (likely(!eth_type_vlan(protocol)))
  639. return false;
  640. if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
  641. return false;
  642. veh = (struct vlan_ethhdr *)skb->data;
  643. protocol = veh->h_vlan_encapsulated_proto;
  644. }
  645. if (!eth_type_vlan(protocol))
  646. return false;
  647. return true;
  648. }
  649. /**
  650. * vlan_features_check - drop unsafe features for skb with multiple tags.
  651. * @skb: skbuff to query
  652. * @features: features to be checked
  653. *
  654. * Returns features without unsafe ones if the skb has multiple tags.
  655. */
  656. static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
  657. netdev_features_t features)
  658. {
  659. if (skb_vlan_tagged_multi(skb)) {
  660. /* In the case of multi-tagged packets, use a direct mask
  661. * instead of using netdev_interesect_features(), to make
  662. * sure that only devices supporting NETIF_F_HW_CSUM will
  663. * have checksum offloading support.
  664. */
  665. features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
  666. NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
  667. NETIF_F_HW_VLAN_STAG_TX;
  668. }
  669. return features;
  670. }
  671. /**
  672. * compare_vlan_header - Compare two vlan headers
  673. * @h1: Pointer to vlan header
  674. * @h2: Pointer to vlan header
  675. *
  676. * Compare two vlan headers, returns 0 if equal.
  677. *
  678. * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
  679. */
  680. static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
  681. const struct vlan_hdr *h2)
  682. {
  683. #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  684. return *(u32 *)h1 ^ *(u32 *)h2;
  685. #else
  686. return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
  687. ((__force u32)h1->h_vlan_encapsulated_proto ^
  688. (__force u32)h2->h_vlan_encapsulated_proto);
  689. #endif
  690. }
  691. #endif /* !(_LINUX_IF_VLAN_H_) */