hsr_forward.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2011-2014 Autronica Fire and Security AS
  3. *
  4. * Author(s):
  5. * 2011-2014 Arvid Brodin, [email protected]
  6. *
  7. * Frame router for HSR and PRP.
  8. */
  9. #include "hsr_forward.h"
  10. #include <linux/types.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/if_vlan.h>
  14. #include "hsr_main.h"
  15. #include "hsr_framereg.h"
  16. struct hsr_node;
  17. /* The uses I can see for these HSR supervision frames are:
  18. * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
  19. * 22") to reset any sequence_nr counters belonging to that node. Useful if
  20. * the other node's counter has been reset for some reason.
  21. * --
  22. * Or not - resetting the counter and bridging the frame would create a
  23. * loop, unfortunately.
  24. *
  25. * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
  26. * frame is received from a particular node, we know something is wrong.
  27. * We just register these (as with normal frames) and throw them away.
  28. *
  29. * 3) Allow different MAC addresses for the two slave interfaces, using the
  30. * MacAddressA field.
  31. */
  32. static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
  33. {
  34. struct ethhdr *eth_hdr;
  35. struct hsr_sup_tag *hsr_sup_tag;
  36. struct hsrv1_ethhdr_sp *hsr_V1_hdr;
  37. struct hsr_sup_tlv *hsr_sup_tlv;
  38. u16 total_length = 0;
  39. WARN_ON_ONCE(!skb_mac_header_was_set(skb));
  40. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  41. /* Correct addr? */
  42. if (!ether_addr_equal(eth_hdr->h_dest,
  43. hsr->sup_multicast_addr))
  44. return false;
  45. /* Correct ether type?. */
  46. if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
  47. eth_hdr->h_proto == htons(ETH_P_HSR)))
  48. return false;
  49. /* Get the supervision header from correct location. */
  50. if (eth_hdr->h_proto == htons(ETH_P_HSR)) { /* Okay HSRv1. */
  51. total_length = sizeof(struct hsrv1_ethhdr_sp);
  52. if (!pskb_may_pull(skb, total_length))
  53. return false;
  54. hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
  55. if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
  56. return false;
  57. hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
  58. } else {
  59. total_length = sizeof(struct hsrv0_ethhdr_sp);
  60. if (!pskb_may_pull(skb, total_length))
  61. return false;
  62. hsr_sup_tag =
  63. &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
  64. }
  65. if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
  66. hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
  67. hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
  68. hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
  69. return false;
  70. if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
  71. hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
  72. return false;
  73. /* Get next tlv */
  74. total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
  75. if (!pskb_may_pull(skb, total_length))
  76. return false;
  77. skb_pull(skb, total_length);
  78. hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
  79. skb_push(skb, total_length);
  80. /* if this is a redbox supervision frame we need to verify
  81. * that more data is available
  82. */
  83. if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
  84. /* tlv length must be a length of a mac address */
  85. if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
  86. return false;
  87. /* make sure another tlv follows */
  88. total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
  89. if (!pskb_may_pull(skb, total_length))
  90. return false;
  91. /* get next tlv */
  92. skb_pull(skb, total_length);
  93. hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
  94. skb_push(skb, total_length);
  95. }
  96. /* end of tlvs must follow at the end */
  97. if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
  98. hsr_sup_tlv->HSR_TLV_length != 0)
  99. return false;
  100. return true;
  101. }
  102. static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
  103. struct hsr_frame_info *frame)
  104. {
  105. struct sk_buff *skb;
  106. int copylen;
  107. unsigned char *dst, *src;
  108. skb_pull(skb_in, HSR_HLEN);
  109. skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
  110. skb_push(skb_in, HSR_HLEN);
  111. if (!skb)
  112. return NULL;
  113. skb_reset_mac_header(skb);
  114. if (skb->ip_summed == CHECKSUM_PARTIAL)
  115. skb->csum_start -= HSR_HLEN;
  116. copylen = 2 * ETH_ALEN;
  117. if (frame->is_vlan)
  118. copylen += VLAN_HLEN;
  119. src = skb_mac_header(skb_in);
  120. dst = skb_mac_header(skb);
  121. memcpy(dst, src, copylen);
  122. skb->protocol = eth_hdr(skb)->h_proto;
  123. return skb;
  124. }
  125. struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
  126. struct hsr_port *port)
  127. {
  128. if (!frame->skb_std) {
  129. if (frame->skb_hsr)
  130. frame->skb_std =
  131. create_stripped_skb_hsr(frame->skb_hsr, frame);
  132. else
  133. netdev_warn_once(port->dev,
  134. "Unexpected frame received in hsr_get_untagged_frame()\n");
  135. if (!frame->skb_std)
  136. return NULL;
  137. }
  138. return skb_clone(frame->skb_std, GFP_ATOMIC);
  139. }
  140. struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
  141. struct hsr_port *port)
  142. {
  143. if (!frame->skb_std) {
  144. if (frame->skb_prp) {
  145. /* trim the skb by len - HSR_HLEN to exclude RCT */
  146. skb_trim(frame->skb_prp,
  147. frame->skb_prp->len - HSR_HLEN);
  148. frame->skb_std =
  149. __pskb_copy(frame->skb_prp,
  150. skb_headroom(frame->skb_prp),
  151. GFP_ATOMIC);
  152. } else {
  153. /* Unexpected */
  154. WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
  155. __FILE__, __LINE__, port->dev->name);
  156. return NULL;
  157. }
  158. }
  159. return skb_clone(frame->skb_std, GFP_ATOMIC);
  160. }
  161. static void prp_set_lan_id(struct prp_rct *trailer,
  162. struct hsr_port *port)
  163. {
  164. int lane_id;
  165. if (port->type == HSR_PT_SLAVE_A)
  166. lane_id = 0;
  167. else
  168. lane_id = 1;
  169. /* Add net_id in the upper 3 bits of lane_id */
  170. lane_id |= port->hsr->net_id;
  171. set_prp_lan_id(trailer, lane_id);
  172. }
  173. /* Tailroom for PRP rct should have been created before calling this */
  174. static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
  175. struct hsr_frame_info *frame,
  176. struct hsr_port *port)
  177. {
  178. struct prp_rct *trailer;
  179. int min_size = ETH_ZLEN;
  180. int lsdu_size;
  181. if (!skb)
  182. return skb;
  183. if (frame->is_vlan)
  184. min_size = VLAN_ETH_ZLEN;
  185. if (skb_put_padto(skb, min_size))
  186. return NULL;
  187. trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
  188. lsdu_size = skb->len - 14;
  189. if (frame->is_vlan)
  190. lsdu_size -= 4;
  191. prp_set_lan_id(trailer, port);
  192. set_prp_LSDU_size(trailer, lsdu_size);
  193. trailer->sequence_nr = htons(frame->sequence_nr);
  194. trailer->PRP_suffix = htons(ETH_P_PRP);
  195. skb->protocol = eth_hdr(skb)->h_proto;
  196. return skb;
  197. }
  198. static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
  199. struct hsr_port *port)
  200. {
  201. int path_id;
  202. if (port->type == HSR_PT_SLAVE_A)
  203. path_id = 0;
  204. else
  205. path_id = 1;
  206. set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
  207. }
  208. static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
  209. struct hsr_frame_info *frame,
  210. struct hsr_port *port, u8 proto_version)
  211. {
  212. struct hsr_ethhdr *hsr_ethhdr;
  213. int lsdu_size;
  214. /* pad to minimum packet size which is 60 + 6 (HSR tag) */
  215. if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
  216. return NULL;
  217. lsdu_size = skb->len - 14;
  218. if (frame->is_vlan)
  219. lsdu_size -= 4;
  220. hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
  221. hsr_set_path_id(hsr_ethhdr, port);
  222. set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
  223. hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
  224. hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
  225. hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
  226. ETH_P_HSR : ETH_P_PRP);
  227. skb->protocol = hsr_ethhdr->ethhdr.h_proto;
  228. return skb;
  229. }
  230. /* If the original frame was an HSR tagged frame, just clone it to be sent
  231. * unchanged. Otherwise, create a private frame especially tagged for 'port'.
  232. */
  233. struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
  234. struct hsr_port *port)
  235. {
  236. unsigned char *dst, *src;
  237. struct sk_buff *skb;
  238. int movelen;
  239. if (frame->skb_hsr) {
  240. struct hsr_ethhdr *hsr_ethhdr =
  241. (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
  242. /* set the lane id properly */
  243. hsr_set_path_id(hsr_ethhdr, port);
  244. return skb_clone(frame->skb_hsr, GFP_ATOMIC);
  245. } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
  246. return skb_clone(frame->skb_std, GFP_ATOMIC);
  247. }
  248. /* Create the new skb with enough headroom to fit the HSR tag */
  249. skb = __pskb_copy(frame->skb_std,
  250. skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
  251. if (!skb)
  252. return NULL;
  253. skb_reset_mac_header(skb);
  254. if (skb->ip_summed == CHECKSUM_PARTIAL)
  255. skb->csum_start += HSR_HLEN;
  256. movelen = ETH_HLEN;
  257. if (frame->is_vlan)
  258. movelen += VLAN_HLEN;
  259. src = skb_mac_header(skb);
  260. dst = skb_push(skb, HSR_HLEN);
  261. memmove(dst, src, movelen);
  262. skb_reset_mac_header(skb);
  263. /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
  264. * that case
  265. */
  266. return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
  267. }
  268. struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
  269. struct hsr_port *port)
  270. {
  271. struct sk_buff *skb;
  272. if (frame->skb_prp) {
  273. struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
  274. if (trailer) {
  275. prp_set_lan_id(trailer, port);
  276. } else {
  277. WARN_ONCE(!trailer, "errored PRP skb");
  278. return NULL;
  279. }
  280. return skb_clone(frame->skb_prp, GFP_ATOMIC);
  281. } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
  282. return skb_clone(frame->skb_std, GFP_ATOMIC);
  283. }
  284. skb = skb_copy_expand(frame->skb_std, 0,
  285. skb_tailroom(frame->skb_std) + HSR_HLEN,
  286. GFP_ATOMIC);
  287. return prp_fill_rct(skb, frame, port);
  288. }
  289. static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
  290. struct hsr_node *node_src)
  291. {
  292. bool was_multicast_frame;
  293. int res, recv_len;
  294. was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
  295. hsr_addr_subst_source(node_src, skb);
  296. skb_pull(skb, ETH_HLEN);
  297. recv_len = skb->len;
  298. res = netif_rx(skb);
  299. if (res == NET_RX_DROP) {
  300. dev->stats.rx_dropped++;
  301. } else {
  302. dev->stats.rx_packets++;
  303. dev->stats.rx_bytes += recv_len;
  304. if (was_multicast_frame)
  305. dev->stats.multicast++;
  306. }
  307. }
  308. static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
  309. struct hsr_frame_info *frame)
  310. {
  311. if (frame->port_rcv->type == HSR_PT_MASTER) {
  312. hsr_addr_subst_dest(frame->node_src, skb, port);
  313. /* Address substitution (IEC62439-3 pp 26, 50): replace mac
  314. * address of outgoing frame with that of the outgoing slave's.
  315. */
  316. ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
  317. }
  318. return dev_queue_xmit(skb);
  319. }
  320. bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
  321. {
  322. return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
  323. port->type == HSR_PT_SLAVE_B) ||
  324. (frame->port_rcv->type == HSR_PT_SLAVE_B &&
  325. port->type == HSR_PT_SLAVE_A));
  326. }
  327. bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
  328. {
  329. if (port->dev->features & NETIF_F_HW_HSR_FWD)
  330. return prp_drop_frame(frame, port);
  331. return false;
  332. }
  333. /* Forward the frame through all devices except:
  334. * - Back through the receiving device
  335. * - If it's a HSR frame: through a device where it has passed before
  336. * - if it's a PRP frame: through another PRP slave device (no bridge)
  337. * - To the local HSR master only if the frame is directly addressed to it, or
  338. * a non-supervision multicast or broadcast frame.
  339. *
  340. * HSR slave devices should insert a HSR tag into the frame, or forward the
  341. * frame unchanged if it's already tagged. Interlink devices should strip HSR
  342. * tags if they're of the non-HSR type (but only after duplicate discard). The
  343. * master device always strips HSR tags.
  344. */
  345. static void hsr_forward_do(struct hsr_frame_info *frame)
  346. {
  347. struct hsr_port *port;
  348. struct sk_buff *skb;
  349. bool sent = false;
  350. hsr_for_each_port(frame->port_rcv->hsr, port) {
  351. struct hsr_priv *hsr = port->hsr;
  352. /* Don't send frame back the way it came */
  353. if (port == frame->port_rcv)
  354. continue;
  355. /* Don't deliver locally unless we should */
  356. if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
  357. continue;
  358. /* Deliver frames directly addressed to us to master only */
  359. if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
  360. continue;
  361. /* If hardware duplicate generation is enabled, only send out
  362. * one port.
  363. */
  364. if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
  365. continue;
  366. /* Don't send frame over port where it has been sent before.
  367. * Also fro SAN, this shouldn't be done.
  368. */
  369. if (!frame->is_from_san &&
  370. hsr_register_frame_out(port, frame->node_src,
  371. frame->sequence_nr))
  372. continue;
  373. if (frame->is_supervision && port->type == HSR_PT_MASTER) {
  374. hsr_handle_sup_frame(frame);
  375. continue;
  376. }
  377. /* Check if frame is to be dropped. Eg. for PRP no forward
  378. * between ports.
  379. */
  380. if (hsr->proto_ops->drop_frame &&
  381. hsr->proto_ops->drop_frame(frame, port))
  382. continue;
  383. if (port->type != HSR_PT_MASTER)
  384. skb = hsr->proto_ops->create_tagged_frame(frame, port);
  385. else
  386. skb = hsr->proto_ops->get_untagged_frame(frame, port);
  387. if (!skb) {
  388. frame->port_rcv->dev->stats.rx_dropped++;
  389. continue;
  390. }
  391. skb->dev = port->dev;
  392. if (port->type == HSR_PT_MASTER) {
  393. hsr_deliver_master(skb, port->dev, frame->node_src);
  394. } else {
  395. if (!hsr_xmit(skb, port, frame))
  396. sent = true;
  397. }
  398. }
  399. }
  400. static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
  401. struct hsr_frame_info *frame)
  402. {
  403. if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
  404. frame->is_local_exclusive = true;
  405. skb->pkt_type = PACKET_HOST;
  406. } else {
  407. frame->is_local_exclusive = false;
  408. }
  409. if (skb->pkt_type == PACKET_HOST ||
  410. skb->pkt_type == PACKET_MULTICAST ||
  411. skb->pkt_type == PACKET_BROADCAST) {
  412. frame->is_local_dest = true;
  413. } else {
  414. frame->is_local_dest = false;
  415. }
  416. }
  417. static void handle_std_frame(struct sk_buff *skb,
  418. struct hsr_frame_info *frame)
  419. {
  420. struct hsr_port *port = frame->port_rcv;
  421. struct hsr_priv *hsr = port->hsr;
  422. frame->skb_hsr = NULL;
  423. frame->skb_prp = NULL;
  424. frame->skb_std = skb;
  425. if (port->type != HSR_PT_MASTER) {
  426. frame->is_from_san = true;
  427. } else {
  428. /* Sequence nr for the master node */
  429. lockdep_assert_held(&hsr->seqnr_lock);
  430. frame->sequence_nr = hsr->sequence_nr;
  431. hsr->sequence_nr++;
  432. }
  433. }
  434. int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
  435. struct hsr_frame_info *frame)
  436. {
  437. struct hsr_port *port = frame->port_rcv;
  438. struct hsr_priv *hsr = port->hsr;
  439. /* HSRv0 supervisory frames double as a tag so treat them as tagged. */
  440. if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
  441. proto == htons(ETH_P_HSR)) {
  442. /* Check if skb contains hsr_ethhdr */
  443. if (skb->mac_len < sizeof(struct hsr_ethhdr))
  444. return -EINVAL;
  445. /* HSR tagged frame :- Data or Supervision */
  446. frame->skb_std = NULL;
  447. frame->skb_prp = NULL;
  448. frame->skb_hsr = skb;
  449. frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
  450. return 0;
  451. }
  452. /* Standard frame or PRP from master port */
  453. handle_std_frame(skb, frame);
  454. return 0;
  455. }
  456. int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
  457. struct hsr_frame_info *frame)
  458. {
  459. /* Supervision frame */
  460. struct prp_rct *rct = skb_get_PRP_rct(skb);
  461. if (rct &&
  462. prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
  463. frame->skb_hsr = NULL;
  464. frame->skb_std = NULL;
  465. frame->skb_prp = skb;
  466. frame->sequence_nr = prp_get_skb_sequence_nr(rct);
  467. return 0;
  468. }
  469. handle_std_frame(skb, frame);
  470. return 0;
  471. }
  472. static int fill_frame_info(struct hsr_frame_info *frame,
  473. struct sk_buff *skb, struct hsr_port *port)
  474. {
  475. struct hsr_priv *hsr = port->hsr;
  476. struct hsr_vlan_ethhdr *vlan_hdr;
  477. struct ethhdr *ethhdr;
  478. __be16 proto;
  479. int ret;
  480. /* Check if skb contains ethhdr */
  481. if (skb->mac_len < sizeof(struct ethhdr))
  482. return -EINVAL;
  483. memset(frame, 0, sizeof(*frame));
  484. frame->is_supervision = is_supervision_frame(port->hsr, skb);
  485. frame->node_src = hsr_get_node(port, &hsr->node_db, skb,
  486. frame->is_supervision,
  487. port->type);
  488. if (!frame->node_src)
  489. return -1; /* Unknown node and !is_supervision, or no mem */
  490. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  491. frame->is_vlan = false;
  492. proto = ethhdr->h_proto;
  493. if (proto == htons(ETH_P_8021Q))
  494. frame->is_vlan = true;
  495. if (frame->is_vlan) {
  496. vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
  497. proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
  498. /* FIXME: */
  499. netdev_warn_once(skb->dev, "VLAN not yet supported");
  500. return -EINVAL;
  501. }
  502. frame->is_from_san = false;
  503. frame->port_rcv = port;
  504. ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
  505. if (ret)
  506. return ret;
  507. check_local_dest(port->hsr, skb, frame);
  508. return 0;
  509. }
  510. /* Must be called holding rcu read lock (because of the port parameter) */
  511. void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
  512. {
  513. struct hsr_frame_info frame;
  514. rcu_read_lock();
  515. if (fill_frame_info(&frame, skb, port) < 0)
  516. goto out_drop;
  517. hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
  518. hsr_forward_do(&frame);
  519. rcu_read_unlock();
  520. /* Gets called for ingress frames as well as egress from master port.
  521. * So check and increment stats for master port only here.
  522. */
  523. if (port->type == HSR_PT_MASTER) {
  524. port->dev->stats.tx_packets++;
  525. port->dev->stats.tx_bytes += skb->len;
  526. }
  527. kfree_skb(frame.skb_hsr);
  528. kfree_skb(frame.skb_prp);
  529. kfree_skb(frame.skb_std);
  530. return;
  531. out_drop:
  532. rcu_read_unlock();
  533. port->dev->stats.tx_dropped++;
  534. kfree_skb(skb);
  535. }