br_cfm.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/cfm_bridge.h>
  3. #include <uapi/linux/cfm_bridge.h>
  4. #include "br_private_cfm.h"
  5. static struct br_cfm_mep *br_mep_find(struct net_bridge *br, u32 instance)
  6. {
  7. struct br_cfm_mep *mep;
  8. hlist_for_each_entry(mep, &br->mep_list, head)
  9. if (mep->instance == instance)
  10. return mep;
  11. return NULL;
  12. }
  13. static struct br_cfm_mep *br_mep_find_ifindex(struct net_bridge *br,
  14. u32 ifindex)
  15. {
  16. struct br_cfm_mep *mep;
  17. hlist_for_each_entry_rcu(mep, &br->mep_list, head,
  18. lockdep_rtnl_is_held())
  19. if (mep->create.ifindex == ifindex)
  20. return mep;
  21. return NULL;
  22. }
  23. static struct br_cfm_peer_mep *br_peer_mep_find(struct br_cfm_mep *mep,
  24. u32 mepid)
  25. {
  26. struct br_cfm_peer_mep *peer_mep;
  27. hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head,
  28. lockdep_rtnl_is_held())
  29. if (peer_mep->mepid == mepid)
  30. return peer_mep;
  31. return NULL;
  32. }
  33. static struct net_bridge_port *br_mep_get_port(struct net_bridge *br,
  34. u32 ifindex)
  35. {
  36. struct net_bridge_port *port;
  37. list_for_each_entry(port, &br->port_list, list)
  38. if (port->dev->ifindex == ifindex)
  39. return port;
  40. return NULL;
  41. }
  42. /* Calculate the CCM interval in us. */
  43. static u32 interval_to_us(enum br_cfm_ccm_interval interval)
  44. {
  45. switch (interval) {
  46. case BR_CFM_CCM_INTERVAL_NONE:
  47. return 0;
  48. case BR_CFM_CCM_INTERVAL_3_3_MS:
  49. return 3300;
  50. case BR_CFM_CCM_INTERVAL_10_MS:
  51. return 10 * 1000;
  52. case BR_CFM_CCM_INTERVAL_100_MS:
  53. return 100 * 1000;
  54. case BR_CFM_CCM_INTERVAL_1_SEC:
  55. return 1000 * 1000;
  56. case BR_CFM_CCM_INTERVAL_10_SEC:
  57. return 10 * 1000 * 1000;
  58. case BR_CFM_CCM_INTERVAL_1_MIN:
  59. return 60 * 1000 * 1000;
  60. case BR_CFM_CCM_INTERVAL_10_MIN:
  61. return 10 * 60 * 1000 * 1000;
  62. }
  63. return 0;
  64. }
  65. /* Convert the interface interval to CCM PDU value. */
  66. static u32 interval_to_pdu(enum br_cfm_ccm_interval interval)
  67. {
  68. switch (interval) {
  69. case BR_CFM_CCM_INTERVAL_NONE:
  70. return 0;
  71. case BR_CFM_CCM_INTERVAL_3_3_MS:
  72. return 1;
  73. case BR_CFM_CCM_INTERVAL_10_MS:
  74. return 2;
  75. case BR_CFM_CCM_INTERVAL_100_MS:
  76. return 3;
  77. case BR_CFM_CCM_INTERVAL_1_SEC:
  78. return 4;
  79. case BR_CFM_CCM_INTERVAL_10_SEC:
  80. return 5;
  81. case BR_CFM_CCM_INTERVAL_1_MIN:
  82. return 6;
  83. case BR_CFM_CCM_INTERVAL_10_MIN:
  84. return 7;
  85. }
  86. return 0;
  87. }
  88. /* Convert the CCM PDU value to interval on interface. */
  89. static u32 pdu_to_interval(u32 value)
  90. {
  91. switch (value) {
  92. case 0:
  93. return BR_CFM_CCM_INTERVAL_NONE;
  94. case 1:
  95. return BR_CFM_CCM_INTERVAL_3_3_MS;
  96. case 2:
  97. return BR_CFM_CCM_INTERVAL_10_MS;
  98. case 3:
  99. return BR_CFM_CCM_INTERVAL_100_MS;
  100. case 4:
  101. return BR_CFM_CCM_INTERVAL_1_SEC;
  102. case 5:
  103. return BR_CFM_CCM_INTERVAL_10_SEC;
  104. case 6:
  105. return BR_CFM_CCM_INTERVAL_1_MIN;
  106. case 7:
  107. return BR_CFM_CCM_INTERVAL_10_MIN;
  108. }
  109. return BR_CFM_CCM_INTERVAL_NONE;
  110. }
  111. static void ccm_rx_timer_start(struct br_cfm_peer_mep *peer_mep)
  112. {
  113. u32 interval_us;
  114. interval_us = interval_to_us(peer_mep->mep->cc_config.exp_interval);
  115. /* Function ccm_rx_dwork must be called with 1/4
  116. * of the configured CC 'expected_interval'
  117. * in order to detect CCM defect after 3.25 interval.
  118. */
  119. queue_delayed_work(system_wq, &peer_mep->ccm_rx_dwork,
  120. usecs_to_jiffies(interval_us / 4));
  121. }
  122. static void br_cfm_notify(int event, const struct net_bridge_port *port)
  123. {
  124. u32 filter = RTEXT_FILTER_CFM_STATUS;
  125. br_info_notify(event, port->br, NULL, filter);
  126. }
  127. static void cc_peer_enable(struct br_cfm_peer_mep *peer_mep)
  128. {
  129. memset(&peer_mep->cc_status, 0, sizeof(peer_mep->cc_status));
  130. peer_mep->ccm_rx_count_miss = 0;
  131. ccm_rx_timer_start(peer_mep);
  132. }
  133. static void cc_peer_disable(struct br_cfm_peer_mep *peer_mep)
  134. {
  135. cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
  136. }
  137. static struct sk_buff *ccm_frame_build(struct br_cfm_mep *mep,
  138. const struct br_cfm_cc_ccm_tx_info *const tx_info)
  139. {
  140. struct br_cfm_common_hdr *common_hdr;
  141. struct net_bridge_port *b_port;
  142. struct br_cfm_maid *maid;
  143. u8 *itu_reserved, *e_tlv;
  144. struct ethhdr *eth_hdr;
  145. struct sk_buff *skb;
  146. __be32 *status_tlv;
  147. __be32 *snumber;
  148. __be16 *mepid;
  149. skb = dev_alloc_skb(CFM_CCM_MAX_FRAME_LENGTH);
  150. if (!skb)
  151. return NULL;
  152. rcu_read_lock();
  153. b_port = rcu_dereference(mep->b_port);
  154. if (!b_port) {
  155. kfree_skb(skb);
  156. rcu_read_unlock();
  157. return NULL;
  158. }
  159. skb->dev = b_port->dev;
  160. rcu_read_unlock();
  161. /* The device cannot be deleted until the work_queue functions has
  162. * completed. This function is called from ccm_tx_work_expired()
  163. * that is a work_queue functions.
  164. */
  165. skb->protocol = htons(ETH_P_CFM);
  166. skb->priority = CFM_FRAME_PRIO;
  167. /* Ethernet header */
  168. eth_hdr = skb_put(skb, sizeof(*eth_hdr));
  169. ether_addr_copy(eth_hdr->h_dest, tx_info->dmac.addr);
  170. ether_addr_copy(eth_hdr->h_source, mep->config.unicast_mac.addr);
  171. eth_hdr->h_proto = htons(ETH_P_CFM);
  172. /* Common CFM Header */
  173. common_hdr = skb_put(skb, sizeof(*common_hdr));
  174. common_hdr->mdlevel_version = mep->config.mdlevel << 5;
  175. common_hdr->opcode = BR_CFM_OPCODE_CCM;
  176. common_hdr->flags = (mep->rdi << 7) |
  177. interval_to_pdu(mep->cc_config.exp_interval);
  178. common_hdr->tlv_offset = CFM_CCM_TLV_OFFSET;
  179. /* Sequence number */
  180. snumber = skb_put(skb, sizeof(*snumber));
  181. if (tx_info->seq_no_update) {
  182. *snumber = cpu_to_be32(mep->ccm_tx_snumber);
  183. mep->ccm_tx_snumber += 1;
  184. } else {
  185. *snumber = 0;
  186. }
  187. mepid = skb_put(skb, sizeof(*mepid));
  188. *mepid = cpu_to_be16((u16)mep->config.mepid);
  189. maid = skb_put(skb, sizeof(*maid));
  190. memcpy(maid->data, mep->cc_config.exp_maid.data, sizeof(maid->data));
  191. /* ITU reserved (CFM_CCM_ITU_RESERVED_SIZE octets) */
  192. itu_reserved = skb_put(skb, CFM_CCM_ITU_RESERVED_SIZE);
  193. memset(itu_reserved, 0, CFM_CCM_ITU_RESERVED_SIZE);
  194. /* Generel CFM TLV format:
  195. * TLV type: one byte
  196. * TLV value length: two bytes
  197. * TLV value: 'TLV value length' bytes
  198. */
  199. /* Port status TLV. The value length is 1. Total of 4 bytes. */
  200. if (tx_info->port_tlv) {
  201. status_tlv = skb_put(skb, sizeof(*status_tlv));
  202. *status_tlv = cpu_to_be32((CFM_PORT_STATUS_TLV_TYPE << 24) |
  203. (1 << 8) | /* Value length */
  204. (tx_info->port_tlv_value & 0xFF));
  205. }
  206. /* Interface status TLV. The value length is 1. Total of 4 bytes. */
  207. if (tx_info->if_tlv) {
  208. status_tlv = skb_put(skb, sizeof(*status_tlv));
  209. *status_tlv = cpu_to_be32((CFM_IF_STATUS_TLV_TYPE << 24) |
  210. (1 << 8) | /* Value length */
  211. (tx_info->if_tlv_value & 0xFF));
  212. }
  213. /* End TLV */
  214. e_tlv = skb_put(skb, sizeof(*e_tlv));
  215. *e_tlv = CFM_ENDE_TLV_TYPE;
  216. return skb;
  217. }
  218. static void ccm_frame_tx(struct sk_buff *skb)
  219. {
  220. skb_reset_network_header(skb);
  221. dev_queue_xmit(skb);
  222. }
  223. /* This function is called with the configured CC 'expected_interval'
  224. * in order to drive CCM transmission when enabled.
  225. */
  226. static void ccm_tx_work_expired(struct work_struct *work)
  227. {
  228. struct delayed_work *del_work;
  229. struct br_cfm_mep *mep;
  230. struct sk_buff *skb;
  231. u32 interval_us;
  232. del_work = to_delayed_work(work);
  233. mep = container_of(del_work, struct br_cfm_mep, ccm_tx_dwork);
  234. if (time_before_eq(mep->ccm_tx_end, jiffies)) {
  235. /* Transmission period has ended */
  236. mep->cc_ccm_tx_info.period = 0;
  237. return;
  238. }
  239. skb = ccm_frame_build(mep, &mep->cc_ccm_tx_info);
  240. if (skb)
  241. ccm_frame_tx(skb);
  242. interval_us = interval_to_us(mep->cc_config.exp_interval);
  243. queue_delayed_work(system_wq, &mep->ccm_tx_dwork,
  244. usecs_to_jiffies(interval_us));
  245. }
  246. /* This function is called with 1/4 of the configured CC 'expected_interval'
  247. * in order to detect CCM defect after 3.25 interval.
  248. */
  249. static void ccm_rx_work_expired(struct work_struct *work)
  250. {
  251. struct br_cfm_peer_mep *peer_mep;
  252. struct net_bridge_port *b_port;
  253. struct delayed_work *del_work;
  254. del_work = to_delayed_work(work);
  255. peer_mep = container_of(del_work, struct br_cfm_peer_mep, ccm_rx_dwork);
  256. /* After 13 counts (4 * 3,25) then 3.25 intervals are expired */
  257. if (peer_mep->ccm_rx_count_miss < 13) {
  258. /* 3.25 intervals are NOT expired without CCM reception */
  259. peer_mep->ccm_rx_count_miss++;
  260. /* Start timer again */
  261. ccm_rx_timer_start(peer_mep);
  262. } else {
  263. /* 3.25 intervals are expired without CCM reception.
  264. * CCM defect detected
  265. */
  266. peer_mep->cc_status.ccm_defect = true;
  267. /* Change in CCM defect status - notify */
  268. rcu_read_lock();
  269. b_port = rcu_dereference(peer_mep->mep->b_port);
  270. if (b_port)
  271. br_cfm_notify(RTM_NEWLINK, b_port);
  272. rcu_read_unlock();
  273. }
  274. }
  275. static u32 ccm_tlv_extract(struct sk_buff *skb, u32 index,
  276. struct br_cfm_peer_mep *peer_mep)
  277. {
  278. __be32 *s_tlv;
  279. __be32 _s_tlv;
  280. u32 h_s_tlv;
  281. u8 *e_tlv;
  282. u8 _e_tlv;
  283. e_tlv = skb_header_pointer(skb, index, sizeof(_e_tlv), &_e_tlv);
  284. if (!e_tlv)
  285. return 0;
  286. /* TLV is present - get the status TLV */
  287. s_tlv = skb_header_pointer(skb,
  288. index,
  289. sizeof(_s_tlv), &_s_tlv);
  290. if (!s_tlv)
  291. return 0;
  292. h_s_tlv = ntohl(*s_tlv);
  293. if ((h_s_tlv >> 24) == CFM_IF_STATUS_TLV_TYPE) {
  294. /* Interface status TLV */
  295. peer_mep->cc_status.tlv_seen = true;
  296. peer_mep->cc_status.if_tlv_value = (h_s_tlv & 0xFF);
  297. }
  298. if ((h_s_tlv >> 24) == CFM_PORT_STATUS_TLV_TYPE) {
  299. /* Port status TLV */
  300. peer_mep->cc_status.tlv_seen = true;
  301. peer_mep->cc_status.port_tlv_value = (h_s_tlv & 0xFF);
  302. }
  303. /* The Sender ID TLV is not handled */
  304. /* The Organization-Specific TLV is not handled */
  305. /* Return the length of this tlv.
  306. * This is the length of the value field plus 3 bytes for size of type
  307. * field and length field
  308. */
  309. return ((h_s_tlv >> 8) & 0xFFFF) + 3;
  310. }
  311. /* note: already called with rcu_read_lock */
  312. static int br_cfm_frame_rx(struct net_bridge_port *port, struct sk_buff *skb)
  313. {
  314. u32 mdlevel, interval, size, index, max;
  315. const struct br_cfm_common_hdr *hdr;
  316. struct br_cfm_peer_mep *peer_mep;
  317. const struct br_cfm_maid *maid;
  318. struct br_cfm_common_hdr _hdr;
  319. struct br_cfm_maid _maid;
  320. struct br_cfm_mep *mep;
  321. struct net_bridge *br;
  322. __be32 *snumber;
  323. __be32 _snumber;
  324. __be16 *mepid;
  325. __be16 _mepid;
  326. if (port->state == BR_STATE_DISABLED)
  327. return 0;
  328. hdr = skb_header_pointer(skb, 0, sizeof(_hdr), &_hdr);
  329. if (!hdr)
  330. return 1;
  331. br = port->br;
  332. mep = br_mep_find_ifindex(br, port->dev->ifindex);
  333. if (unlikely(!mep))
  334. /* No MEP on this port - must be forwarded */
  335. return 0;
  336. mdlevel = hdr->mdlevel_version >> 5;
  337. if (mdlevel > mep->config.mdlevel)
  338. /* The level is above this MEP level - must be forwarded */
  339. return 0;
  340. if ((hdr->mdlevel_version & 0x1F) != 0) {
  341. /* Invalid version */
  342. mep->status.version_unexp_seen = true;
  343. return 1;
  344. }
  345. if (mdlevel < mep->config.mdlevel) {
  346. /* The level is below this MEP level */
  347. mep->status.rx_level_low_seen = true;
  348. return 1;
  349. }
  350. if (hdr->opcode == BR_CFM_OPCODE_CCM) {
  351. /* CCM PDU received. */
  352. /* MA ID is after common header + sequence number + MEP ID */
  353. maid = skb_header_pointer(skb,
  354. CFM_CCM_PDU_MAID_OFFSET,
  355. sizeof(_maid), &_maid);
  356. if (!maid)
  357. return 1;
  358. if (memcmp(maid->data, mep->cc_config.exp_maid.data,
  359. sizeof(maid->data)))
  360. /* MA ID not as expected */
  361. return 1;
  362. /* MEP ID is after common header + sequence number */
  363. mepid = skb_header_pointer(skb,
  364. CFM_CCM_PDU_MEPID_OFFSET,
  365. sizeof(_mepid), &_mepid);
  366. if (!mepid)
  367. return 1;
  368. peer_mep = br_peer_mep_find(mep, (u32)ntohs(*mepid));
  369. if (!peer_mep)
  370. return 1;
  371. /* Interval is in common header flags */
  372. interval = hdr->flags & 0x07;
  373. if (mep->cc_config.exp_interval != pdu_to_interval(interval))
  374. /* Interval not as expected */
  375. return 1;
  376. /* A valid CCM frame is received */
  377. if (peer_mep->cc_status.ccm_defect) {
  378. peer_mep->cc_status.ccm_defect = false;
  379. /* Change in CCM defect status - notify */
  380. br_cfm_notify(RTM_NEWLINK, port);
  381. /* Start CCM RX timer */
  382. ccm_rx_timer_start(peer_mep);
  383. }
  384. peer_mep->cc_status.seen = true;
  385. peer_mep->ccm_rx_count_miss = 0;
  386. /* RDI is in common header flags */
  387. peer_mep->cc_status.rdi = (hdr->flags & 0x80) ? true : false;
  388. /* Sequence number is after common header */
  389. snumber = skb_header_pointer(skb,
  390. CFM_CCM_PDU_SEQNR_OFFSET,
  391. sizeof(_snumber), &_snumber);
  392. if (!snumber)
  393. return 1;
  394. if (ntohl(*snumber) != (mep->ccm_rx_snumber + 1))
  395. /* Unexpected sequence number */
  396. peer_mep->cc_status.seq_unexp_seen = true;
  397. mep->ccm_rx_snumber = ntohl(*snumber);
  398. /* TLV end is after common header + sequence number + MEP ID +
  399. * MA ID + ITU reserved
  400. */
  401. index = CFM_CCM_PDU_TLV_OFFSET;
  402. max = 0;
  403. do { /* Handle all TLVs */
  404. size = ccm_tlv_extract(skb, index, peer_mep);
  405. index += size;
  406. max += 1;
  407. } while (size != 0 && max < 4); /* Max four TLVs possible */
  408. return 1;
  409. }
  410. mep->status.opcode_unexp_seen = true;
  411. return 1;
  412. }
  413. static struct br_frame_type cfm_frame_type __read_mostly = {
  414. .type = cpu_to_be16(ETH_P_CFM),
  415. .frame_handler = br_cfm_frame_rx,
  416. };
  417. int br_cfm_mep_create(struct net_bridge *br,
  418. const u32 instance,
  419. struct br_cfm_mep_create *const create,
  420. struct netlink_ext_ack *extack)
  421. {
  422. struct net_bridge_port *p;
  423. struct br_cfm_mep *mep;
  424. ASSERT_RTNL();
  425. if (create->domain == BR_CFM_VLAN) {
  426. NL_SET_ERR_MSG_MOD(extack,
  427. "VLAN domain not supported");
  428. return -EINVAL;
  429. }
  430. if (create->domain != BR_CFM_PORT) {
  431. NL_SET_ERR_MSG_MOD(extack,
  432. "Invalid domain value");
  433. return -EINVAL;
  434. }
  435. if (create->direction == BR_CFM_MEP_DIRECTION_UP) {
  436. NL_SET_ERR_MSG_MOD(extack,
  437. "Up-MEP not supported");
  438. return -EINVAL;
  439. }
  440. if (create->direction != BR_CFM_MEP_DIRECTION_DOWN) {
  441. NL_SET_ERR_MSG_MOD(extack,
  442. "Invalid direction value");
  443. return -EINVAL;
  444. }
  445. p = br_mep_get_port(br, create->ifindex);
  446. if (!p) {
  447. NL_SET_ERR_MSG_MOD(extack,
  448. "Port is not related to bridge");
  449. return -EINVAL;
  450. }
  451. mep = br_mep_find(br, instance);
  452. if (mep) {
  453. NL_SET_ERR_MSG_MOD(extack,
  454. "MEP instance already exists");
  455. return -EEXIST;
  456. }
  457. /* In PORT domain only one instance can be created per port */
  458. if (create->domain == BR_CFM_PORT) {
  459. mep = br_mep_find_ifindex(br, create->ifindex);
  460. if (mep) {
  461. NL_SET_ERR_MSG_MOD(extack,
  462. "Only one Port MEP on a port allowed");
  463. return -EINVAL;
  464. }
  465. }
  466. mep = kzalloc(sizeof(*mep), GFP_KERNEL);
  467. if (!mep)
  468. return -ENOMEM;
  469. mep->create = *create;
  470. mep->instance = instance;
  471. rcu_assign_pointer(mep->b_port, p);
  472. INIT_HLIST_HEAD(&mep->peer_mep_list);
  473. INIT_DELAYED_WORK(&mep->ccm_tx_dwork, ccm_tx_work_expired);
  474. if (hlist_empty(&br->mep_list))
  475. br_add_frame(br, &cfm_frame_type);
  476. hlist_add_tail_rcu(&mep->head, &br->mep_list);
  477. return 0;
  478. }
  479. static void mep_delete_implementation(struct net_bridge *br,
  480. struct br_cfm_mep *mep)
  481. {
  482. struct br_cfm_peer_mep *peer_mep;
  483. struct hlist_node *n_store;
  484. ASSERT_RTNL();
  485. /* Empty and free peer MEP list */
  486. hlist_for_each_entry_safe(peer_mep, n_store, &mep->peer_mep_list, head) {
  487. cancel_delayed_work_sync(&peer_mep->ccm_rx_dwork);
  488. hlist_del_rcu(&peer_mep->head);
  489. kfree_rcu(peer_mep, rcu);
  490. }
  491. cancel_delayed_work_sync(&mep->ccm_tx_dwork);
  492. RCU_INIT_POINTER(mep->b_port, NULL);
  493. hlist_del_rcu(&mep->head);
  494. kfree_rcu(mep, rcu);
  495. if (hlist_empty(&br->mep_list))
  496. br_del_frame(br, &cfm_frame_type);
  497. }
  498. int br_cfm_mep_delete(struct net_bridge *br,
  499. const u32 instance,
  500. struct netlink_ext_ack *extack)
  501. {
  502. struct br_cfm_mep *mep;
  503. ASSERT_RTNL();
  504. mep = br_mep_find(br, instance);
  505. if (!mep) {
  506. NL_SET_ERR_MSG_MOD(extack,
  507. "MEP instance does not exists");
  508. return -ENOENT;
  509. }
  510. mep_delete_implementation(br, mep);
  511. return 0;
  512. }
  513. int br_cfm_mep_config_set(struct net_bridge *br,
  514. const u32 instance,
  515. const struct br_cfm_mep_config *const config,
  516. struct netlink_ext_ack *extack)
  517. {
  518. struct br_cfm_mep *mep;
  519. ASSERT_RTNL();
  520. mep = br_mep_find(br, instance);
  521. if (!mep) {
  522. NL_SET_ERR_MSG_MOD(extack,
  523. "MEP instance does not exists");
  524. return -ENOENT;
  525. }
  526. mep->config = *config;
  527. return 0;
  528. }
  529. int br_cfm_cc_config_set(struct net_bridge *br,
  530. const u32 instance,
  531. const struct br_cfm_cc_config *const config,
  532. struct netlink_ext_ack *extack)
  533. {
  534. struct br_cfm_peer_mep *peer_mep;
  535. struct br_cfm_mep *mep;
  536. ASSERT_RTNL();
  537. mep = br_mep_find(br, instance);
  538. if (!mep) {
  539. NL_SET_ERR_MSG_MOD(extack,
  540. "MEP instance does not exists");
  541. return -ENOENT;
  542. }
  543. /* Check for no change in configuration */
  544. if (memcmp(config, &mep->cc_config, sizeof(*config)) == 0)
  545. return 0;
  546. if (config->enable && !mep->cc_config.enable)
  547. /* CC is enabled */
  548. hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
  549. cc_peer_enable(peer_mep);
  550. if (!config->enable && mep->cc_config.enable)
  551. /* CC is disabled */
  552. hlist_for_each_entry(peer_mep, &mep->peer_mep_list, head)
  553. cc_peer_disable(peer_mep);
  554. mep->cc_config = *config;
  555. mep->ccm_rx_snumber = 0;
  556. mep->ccm_tx_snumber = 1;
  557. return 0;
  558. }
  559. int br_cfm_cc_peer_mep_add(struct net_bridge *br, const u32 instance,
  560. u32 mepid,
  561. struct netlink_ext_ack *extack)
  562. {
  563. struct br_cfm_peer_mep *peer_mep;
  564. struct br_cfm_mep *mep;
  565. ASSERT_RTNL();
  566. mep = br_mep_find(br, instance);
  567. if (!mep) {
  568. NL_SET_ERR_MSG_MOD(extack,
  569. "MEP instance does not exists");
  570. return -ENOENT;
  571. }
  572. peer_mep = br_peer_mep_find(mep, mepid);
  573. if (peer_mep) {
  574. NL_SET_ERR_MSG_MOD(extack,
  575. "Peer MEP-ID already exists");
  576. return -EEXIST;
  577. }
  578. peer_mep = kzalloc(sizeof(*peer_mep), GFP_KERNEL);
  579. if (!peer_mep)
  580. return -ENOMEM;
  581. peer_mep->mepid = mepid;
  582. peer_mep->mep = mep;
  583. INIT_DELAYED_WORK(&peer_mep->ccm_rx_dwork, ccm_rx_work_expired);
  584. if (mep->cc_config.enable)
  585. cc_peer_enable(peer_mep);
  586. hlist_add_tail_rcu(&peer_mep->head, &mep->peer_mep_list);
  587. return 0;
  588. }
  589. int br_cfm_cc_peer_mep_remove(struct net_bridge *br, const u32 instance,
  590. u32 mepid,
  591. struct netlink_ext_ack *extack)
  592. {
  593. struct br_cfm_peer_mep *peer_mep;
  594. struct br_cfm_mep *mep;
  595. ASSERT_RTNL();
  596. mep = br_mep_find(br, instance);
  597. if (!mep) {
  598. NL_SET_ERR_MSG_MOD(extack,
  599. "MEP instance does not exists");
  600. return -ENOENT;
  601. }
  602. peer_mep = br_peer_mep_find(mep, mepid);
  603. if (!peer_mep) {
  604. NL_SET_ERR_MSG_MOD(extack,
  605. "Peer MEP-ID does not exists");
  606. return -ENOENT;
  607. }
  608. cc_peer_disable(peer_mep);
  609. hlist_del_rcu(&peer_mep->head);
  610. kfree_rcu(peer_mep, rcu);
  611. return 0;
  612. }
  613. int br_cfm_cc_rdi_set(struct net_bridge *br, const u32 instance,
  614. const bool rdi, struct netlink_ext_ack *extack)
  615. {
  616. struct br_cfm_mep *mep;
  617. ASSERT_RTNL();
  618. mep = br_mep_find(br, instance);
  619. if (!mep) {
  620. NL_SET_ERR_MSG_MOD(extack,
  621. "MEP instance does not exists");
  622. return -ENOENT;
  623. }
  624. mep->rdi = rdi;
  625. return 0;
  626. }
  627. int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance,
  628. const struct br_cfm_cc_ccm_tx_info *const tx_info,
  629. struct netlink_ext_ack *extack)
  630. {
  631. struct br_cfm_mep *mep;
  632. ASSERT_RTNL();
  633. mep = br_mep_find(br, instance);
  634. if (!mep) {
  635. NL_SET_ERR_MSG_MOD(extack,
  636. "MEP instance does not exists");
  637. return -ENOENT;
  638. }
  639. if (memcmp(tx_info, &mep->cc_ccm_tx_info, sizeof(*tx_info)) == 0) {
  640. /* No change in tx_info. */
  641. if (mep->cc_ccm_tx_info.period == 0)
  642. /* Transmission is not enabled - just return */
  643. return 0;
  644. /* Transmission is ongoing, the end time is recalculated */
  645. mep->ccm_tx_end = jiffies +
  646. usecs_to_jiffies(tx_info->period * 1000000);
  647. return 0;
  648. }
  649. if (tx_info->period == 0 && mep->cc_ccm_tx_info.period == 0)
  650. /* Some change in info and transmission is not ongoing */
  651. goto save;
  652. if (tx_info->period != 0 && mep->cc_ccm_tx_info.period != 0) {
  653. /* Some change in info and transmission is ongoing
  654. * The end time is recalculated
  655. */
  656. mep->ccm_tx_end = jiffies +
  657. usecs_to_jiffies(tx_info->period * 1000000);
  658. goto save;
  659. }
  660. if (tx_info->period == 0 && mep->cc_ccm_tx_info.period != 0) {
  661. cancel_delayed_work_sync(&mep->ccm_tx_dwork);
  662. goto save;
  663. }
  664. /* Start delayed work to transmit CCM frames. It is done with zero delay
  665. * to send first frame immediately
  666. */
  667. mep->ccm_tx_end = jiffies + usecs_to_jiffies(tx_info->period * 1000000);
  668. queue_delayed_work(system_wq, &mep->ccm_tx_dwork, 0);
  669. save:
  670. mep->cc_ccm_tx_info = *tx_info;
  671. return 0;
  672. }
  673. int br_cfm_mep_count(struct net_bridge *br, u32 *count)
  674. {
  675. struct br_cfm_mep *mep;
  676. *count = 0;
  677. rcu_read_lock();
  678. hlist_for_each_entry_rcu(mep, &br->mep_list, head)
  679. *count += 1;
  680. rcu_read_unlock();
  681. return 0;
  682. }
  683. int br_cfm_peer_mep_count(struct net_bridge *br, u32 *count)
  684. {
  685. struct br_cfm_peer_mep *peer_mep;
  686. struct br_cfm_mep *mep;
  687. *count = 0;
  688. rcu_read_lock();
  689. hlist_for_each_entry_rcu(mep, &br->mep_list, head)
  690. hlist_for_each_entry_rcu(peer_mep, &mep->peer_mep_list, head)
  691. *count += 1;
  692. rcu_read_unlock();
  693. return 0;
  694. }
  695. bool br_cfm_created(struct net_bridge *br)
  696. {
  697. return !hlist_empty(&br->mep_list);
  698. }
  699. /* Deletes the CFM instances on a specific bridge port
  700. */
  701. void br_cfm_port_del(struct net_bridge *br, struct net_bridge_port *port)
  702. {
  703. struct hlist_node *n_store;
  704. struct br_cfm_mep *mep;
  705. ASSERT_RTNL();
  706. hlist_for_each_entry_safe(mep, n_store, &br->mep_list, head)
  707. if (mep->create.ifindex == port->dev->ifindex)
  708. mep_delete_implementation(br, mep);
  709. }