hsr_netlink.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright 2011-2014 Autronica Fire and Security AS
  3. *
  4. * Author(s):
  5. * 2011-2014 Arvid Brodin, [email protected]
  6. *
  7. * Routines for handling Netlink messages for HSR and PRP.
  8. */
  9. #include "hsr_netlink.h"
  10. #include <linux/kernel.h>
  11. #include <net/rtnetlink.h>
  12. #include <net/genetlink.h>
  13. #include "hsr_main.h"
  14. #include "hsr_device.h"
  15. #include "hsr_framereg.h"
  16. static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
  17. [IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
  18. [IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
  19. [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
  20. [IFLA_HSR_VERSION] = { .type = NLA_U8 },
  21. [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
  22. [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
  23. [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
  24. };
  25. /* Here, it seems a netdevice has already been allocated for us, and the
  26. * hsr_dev_setup routine has been executed. Nice!
  27. */
  28. static int hsr_newlink(struct net *src_net, struct net_device *dev,
  29. struct nlattr *tb[], struct nlattr *data[],
  30. struct netlink_ext_ack *extack)
  31. {
  32. enum hsr_version proto_version;
  33. unsigned char multicast_spec;
  34. u8 proto = HSR_PROTOCOL_HSR;
  35. struct net_device *link[2];
  36. if (!data) {
  37. NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
  38. return -EINVAL;
  39. }
  40. if (!data[IFLA_HSR_SLAVE1]) {
  41. NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
  42. return -EINVAL;
  43. }
  44. link[0] = __dev_get_by_index(src_net,
  45. nla_get_u32(data[IFLA_HSR_SLAVE1]));
  46. if (!link[0]) {
  47. NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
  48. return -EINVAL;
  49. }
  50. if (!data[IFLA_HSR_SLAVE2]) {
  51. NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
  52. return -EINVAL;
  53. }
  54. link[1] = __dev_get_by_index(src_net,
  55. nla_get_u32(data[IFLA_HSR_SLAVE2]));
  56. if (!link[1]) {
  57. NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
  58. return -EINVAL;
  59. }
  60. if (link[0] == link[1]) {
  61. NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
  62. return -EINVAL;
  63. }
  64. if (!data[IFLA_HSR_MULTICAST_SPEC])
  65. multicast_spec = 0;
  66. else
  67. multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
  68. if (data[IFLA_HSR_PROTOCOL])
  69. proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
  70. if (proto >= HSR_PROTOCOL_MAX) {
  71. NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
  72. return -EINVAL;
  73. }
  74. if (!data[IFLA_HSR_VERSION]) {
  75. proto_version = HSR_V0;
  76. } else {
  77. if (proto == HSR_PROTOCOL_PRP) {
  78. NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
  79. return -EINVAL;
  80. }
  81. proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
  82. if (proto_version > HSR_V1) {
  83. NL_SET_ERR_MSG_MOD(extack,
  84. "Only HSR version 0/1 supported");
  85. return -EINVAL;
  86. }
  87. }
  88. if (proto == HSR_PROTOCOL_PRP)
  89. proto_version = PRP_V1;
  90. return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
  91. }
  92. static void hsr_dellink(struct net_device *dev, struct list_head *head)
  93. {
  94. struct hsr_priv *hsr = netdev_priv(dev);
  95. del_timer_sync(&hsr->prune_timer);
  96. del_timer_sync(&hsr->announce_timer);
  97. hsr_debugfs_term(hsr);
  98. hsr_del_ports(hsr);
  99. hsr_del_self_node(hsr);
  100. hsr_del_nodes(&hsr->node_db);
  101. unregister_netdevice_queue(dev, head);
  102. }
  103. static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
  104. {
  105. struct hsr_priv *hsr = netdev_priv(dev);
  106. u8 proto = HSR_PROTOCOL_HSR;
  107. struct hsr_port *port;
  108. port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
  109. if (port) {
  110. if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
  111. goto nla_put_failure;
  112. }
  113. port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
  114. if (port) {
  115. if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
  116. goto nla_put_failure;
  117. }
  118. if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
  119. hsr->sup_multicast_addr) ||
  120. nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
  121. goto nla_put_failure;
  122. if (hsr->prot_version == PRP_V1)
  123. proto = HSR_PROTOCOL_PRP;
  124. if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
  125. goto nla_put_failure;
  126. return 0;
  127. nla_put_failure:
  128. return -EMSGSIZE;
  129. }
  130. static struct rtnl_link_ops hsr_link_ops __read_mostly = {
  131. .kind = "hsr",
  132. .maxtype = IFLA_HSR_MAX,
  133. .policy = hsr_policy,
  134. .priv_size = sizeof(struct hsr_priv),
  135. .setup = hsr_dev_setup,
  136. .newlink = hsr_newlink,
  137. .dellink = hsr_dellink,
  138. .fill_info = hsr_fill_info,
  139. };
  140. /* attribute policy */
  141. static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
  142. [HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
  143. [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
  144. [HSR_A_IFINDEX] = { .type = NLA_U32 },
  145. [HSR_A_IF1_AGE] = { .type = NLA_U32 },
  146. [HSR_A_IF2_AGE] = { .type = NLA_U32 },
  147. [HSR_A_IF1_SEQ] = { .type = NLA_U16 },
  148. [HSR_A_IF2_SEQ] = { .type = NLA_U16 },
  149. };
  150. static struct genl_family hsr_genl_family;
  151. static const struct genl_multicast_group hsr_mcgrps[] = {
  152. { .name = "hsr-network", },
  153. };
  154. /* This is called if for some node with MAC address addr, we only get frames
  155. * over one of the slave interfaces. This would indicate an open network ring
  156. * (i.e. a link has failed somewhere).
  157. */
  158. void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
  159. struct hsr_port *port)
  160. {
  161. struct sk_buff *skb;
  162. void *msg_head;
  163. struct hsr_port *master;
  164. int res;
  165. skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
  166. if (!skb)
  167. goto fail;
  168. msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
  169. HSR_C_RING_ERROR);
  170. if (!msg_head)
  171. goto nla_put_failure;
  172. res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
  173. if (res < 0)
  174. goto nla_put_failure;
  175. res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
  176. if (res < 0)
  177. goto nla_put_failure;
  178. genlmsg_end(skb, msg_head);
  179. genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
  180. return;
  181. nla_put_failure:
  182. kfree_skb(skb);
  183. fail:
  184. rcu_read_lock();
  185. master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
  186. netdev_warn(master->dev, "Could not send HSR ring error message\n");
  187. rcu_read_unlock();
  188. }
  189. /* This is called when we haven't heard from the node with MAC address addr for
  190. * some time (just before the node is removed from the node table/list).
  191. */
  192. void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
  193. {
  194. struct sk_buff *skb;
  195. void *msg_head;
  196. struct hsr_port *master;
  197. int res;
  198. skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
  199. if (!skb)
  200. goto fail;
  201. msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
  202. if (!msg_head)
  203. goto nla_put_failure;
  204. res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
  205. if (res < 0)
  206. goto nla_put_failure;
  207. genlmsg_end(skb, msg_head);
  208. genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
  209. return;
  210. nla_put_failure:
  211. kfree_skb(skb);
  212. fail:
  213. rcu_read_lock();
  214. master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
  215. netdev_warn(master->dev, "Could not send HSR node down\n");
  216. rcu_read_unlock();
  217. }
  218. /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
  219. * about the status of a specific node in the network, defined by its MAC
  220. * address.
  221. *
  222. * Input: hsr ifindex, node mac address
  223. * Output: hsr ifindex, node mac address (copied from request),
  224. * age of latest frame from node over slave 1, slave 2 [ms]
  225. */
  226. static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
  227. {
  228. /* For receiving */
  229. struct nlattr *na;
  230. struct net_device *hsr_dev;
  231. /* For sending */
  232. struct sk_buff *skb_out;
  233. void *msg_head;
  234. struct hsr_priv *hsr;
  235. struct hsr_port *port;
  236. unsigned char hsr_node_addr_b[ETH_ALEN];
  237. int hsr_node_if1_age;
  238. u16 hsr_node_if1_seq;
  239. int hsr_node_if2_age;
  240. u16 hsr_node_if2_seq;
  241. int addr_b_ifindex;
  242. int res;
  243. if (!info)
  244. goto invalid;
  245. na = info->attrs[HSR_A_IFINDEX];
  246. if (!na)
  247. goto invalid;
  248. na = info->attrs[HSR_A_NODE_ADDR];
  249. if (!na)
  250. goto invalid;
  251. rcu_read_lock();
  252. hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
  253. nla_get_u32(info->attrs[HSR_A_IFINDEX]));
  254. if (!hsr_dev)
  255. goto rcu_unlock;
  256. if (!is_hsr_master(hsr_dev))
  257. goto rcu_unlock;
  258. /* Send reply */
  259. skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
  260. if (!skb_out) {
  261. res = -ENOMEM;
  262. goto fail;
  263. }
  264. msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
  265. info->snd_seq, &hsr_genl_family, 0,
  266. HSR_C_SET_NODE_STATUS);
  267. if (!msg_head) {
  268. res = -ENOMEM;
  269. goto nla_put_failure;
  270. }
  271. res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
  272. if (res < 0)
  273. goto nla_put_failure;
  274. hsr = netdev_priv(hsr_dev);
  275. res = hsr_get_node_data(hsr,
  276. (unsigned char *)
  277. nla_data(info->attrs[HSR_A_NODE_ADDR]),
  278. hsr_node_addr_b,
  279. &addr_b_ifindex,
  280. &hsr_node_if1_age,
  281. &hsr_node_if1_seq,
  282. &hsr_node_if2_age,
  283. &hsr_node_if2_seq);
  284. if (res < 0)
  285. goto nla_put_failure;
  286. res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
  287. nla_data(info->attrs[HSR_A_NODE_ADDR]));
  288. if (res < 0)
  289. goto nla_put_failure;
  290. if (addr_b_ifindex > -1) {
  291. res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
  292. hsr_node_addr_b);
  293. if (res < 0)
  294. goto nla_put_failure;
  295. res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
  296. addr_b_ifindex);
  297. if (res < 0)
  298. goto nla_put_failure;
  299. }
  300. res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
  301. if (res < 0)
  302. goto nla_put_failure;
  303. res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
  304. if (res < 0)
  305. goto nla_put_failure;
  306. port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
  307. if (port)
  308. res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
  309. port->dev->ifindex);
  310. if (res < 0)
  311. goto nla_put_failure;
  312. res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
  313. if (res < 0)
  314. goto nla_put_failure;
  315. res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
  316. if (res < 0)
  317. goto nla_put_failure;
  318. port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
  319. if (port)
  320. res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
  321. port->dev->ifindex);
  322. if (res < 0)
  323. goto nla_put_failure;
  324. rcu_read_unlock();
  325. genlmsg_end(skb_out, msg_head);
  326. genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
  327. return 0;
  328. rcu_unlock:
  329. rcu_read_unlock();
  330. invalid:
  331. netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
  332. return 0;
  333. nla_put_failure:
  334. kfree_skb(skb_out);
  335. /* Fall through */
  336. fail:
  337. rcu_read_unlock();
  338. return res;
  339. }
  340. /* Get a list of MacAddressA of all nodes known to this node (including self).
  341. */
  342. static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
  343. {
  344. unsigned char addr[ETH_ALEN];
  345. struct net_device *hsr_dev;
  346. struct sk_buff *skb_out;
  347. struct hsr_priv *hsr;
  348. bool restart = false;
  349. struct nlattr *na;
  350. void *pos = NULL;
  351. void *msg_head;
  352. int res;
  353. if (!info)
  354. goto invalid;
  355. na = info->attrs[HSR_A_IFINDEX];
  356. if (!na)
  357. goto invalid;
  358. rcu_read_lock();
  359. hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
  360. nla_get_u32(info->attrs[HSR_A_IFINDEX]));
  361. if (!hsr_dev)
  362. goto rcu_unlock;
  363. if (!is_hsr_master(hsr_dev))
  364. goto rcu_unlock;
  365. restart:
  366. /* Send reply */
  367. skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  368. if (!skb_out) {
  369. res = -ENOMEM;
  370. goto fail;
  371. }
  372. msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
  373. info->snd_seq, &hsr_genl_family, 0,
  374. HSR_C_SET_NODE_LIST);
  375. if (!msg_head) {
  376. res = -ENOMEM;
  377. goto nla_put_failure;
  378. }
  379. if (!restart) {
  380. res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
  381. if (res < 0)
  382. goto nla_put_failure;
  383. }
  384. hsr = netdev_priv(hsr_dev);
  385. if (!pos)
  386. pos = hsr_get_next_node(hsr, NULL, addr);
  387. while (pos) {
  388. res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
  389. if (res < 0) {
  390. if (res == -EMSGSIZE) {
  391. genlmsg_end(skb_out, msg_head);
  392. genlmsg_unicast(genl_info_net(info), skb_out,
  393. info->snd_portid);
  394. restart = true;
  395. goto restart;
  396. }
  397. goto nla_put_failure;
  398. }
  399. pos = hsr_get_next_node(hsr, pos, addr);
  400. }
  401. rcu_read_unlock();
  402. genlmsg_end(skb_out, msg_head);
  403. genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
  404. return 0;
  405. rcu_unlock:
  406. rcu_read_unlock();
  407. invalid:
  408. netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
  409. return 0;
  410. nla_put_failure:
  411. nlmsg_free(skb_out);
  412. /* Fall through */
  413. fail:
  414. rcu_read_unlock();
  415. return res;
  416. }
  417. static const struct genl_small_ops hsr_ops[] = {
  418. {
  419. .cmd = HSR_C_GET_NODE_STATUS,
  420. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  421. .flags = 0,
  422. .doit = hsr_get_node_status,
  423. .dumpit = NULL,
  424. },
  425. {
  426. .cmd = HSR_C_GET_NODE_LIST,
  427. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  428. .flags = 0,
  429. .doit = hsr_get_node_list,
  430. .dumpit = NULL,
  431. },
  432. };
  433. static struct genl_family hsr_genl_family __ro_after_init = {
  434. .hdrsize = 0,
  435. .name = "HSR",
  436. .version = 1,
  437. .maxattr = HSR_A_MAX,
  438. .policy = hsr_genl_policy,
  439. .netnsok = true,
  440. .module = THIS_MODULE,
  441. .small_ops = hsr_ops,
  442. .n_small_ops = ARRAY_SIZE(hsr_ops),
  443. .resv_start_op = HSR_C_SET_NODE_LIST + 1,
  444. .mcgrps = hsr_mcgrps,
  445. .n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
  446. };
  447. int __init hsr_netlink_init(void)
  448. {
  449. int rc;
  450. rc = rtnl_link_register(&hsr_link_ops);
  451. if (rc)
  452. goto fail_rtnl_link_register;
  453. rc = genl_register_family(&hsr_genl_family);
  454. if (rc)
  455. goto fail_genl_register_family;
  456. hsr_debugfs_create_root();
  457. return 0;
  458. fail_genl_register_family:
  459. rtnl_link_unregister(&hsr_link_ops);
  460. fail_rtnl_link_register:
  461. return rc;
  462. }
  463. void __exit hsr_netlink_exit(void)
  464. {
  465. genl_unregister_family(&hsr_genl_family);
  466. rtnl_link_unregister(&hsr_link_ops);
  467. }
  468. MODULE_ALIAS_RTNL_LINK("hsr");