br_mrp.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/mrp_bridge.h>
  3. #include "br_private_mrp.h"
  4. static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
  5. static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
  6. static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb);
  7. static struct br_frame_type mrp_frame_type __read_mostly = {
  8. .type = cpu_to_be16(ETH_P_MRP),
  9. .frame_handler = br_mrp_process,
  10. };
  11. static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
  12. struct net_bridge_port *s_port,
  13. struct net_bridge_port *port)
  14. {
  15. if (port == p_port ||
  16. port == s_port)
  17. return true;
  18. return false;
  19. }
  20. static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
  21. struct net_bridge_port *port)
  22. {
  23. if (port == i_port)
  24. return true;
  25. return false;
  26. }
  27. static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
  28. u32 ifindex)
  29. {
  30. struct net_bridge_port *res = NULL;
  31. struct net_bridge_port *port;
  32. list_for_each_entry(port, &br->port_list, list) {
  33. if (port->dev->ifindex == ifindex) {
  34. res = port;
  35. break;
  36. }
  37. }
  38. return res;
  39. }
  40. static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
  41. {
  42. struct br_mrp *res = NULL;
  43. struct br_mrp *mrp;
  44. hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
  45. lockdep_rtnl_is_held()) {
  46. if (mrp->ring_id == ring_id) {
  47. res = mrp;
  48. break;
  49. }
  50. }
  51. return res;
  52. }
  53. static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
  54. {
  55. struct br_mrp *res = NULL;
  56. struct br_mrp *mrp;
  57. hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
  58. lockdep_rtnl_is_held()) {
  59. if (mrp->in_id == in_id) {
  60. res = mrp;
  61. break;
  62. }
  63. }
  64. return res;
  65. }
  66. static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
  67. {
  68. struct br_mrp *mrp;
  69. hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
  70. lockdep_rtnl_is_held()) {
  71. struct net_bridge_port *p;
  72. p = rtnl_dereference(mrp->p_port);
  73. if (p && p->dev->ifindex == ifindex)
  74. return false;
  75. p = rtnl_dereference(mrp->s_port);
  76. if (p && p->dev->ifindex == ifindex)
  77. return false;
  78. p = rtnl_dereference(mrp->i_port);
  79. if (p && p->dev->ifindex == ifindex)
  80. return false;
  81. }
  82. return true;
  83. }
  84. static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
  85. struct net_bridge_port *p)
  86. {
  87. struct br_mrp *res = NULL;
  88. struct br_mrp *mrp;
  89. hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
  90. lockdep_rtnl_is_held()) {
  91. if (rcu_access_pointer(mrp->p_port) == p ||
  92. rcu_access_pointer(mrp->s_port) == p ||
  93. rcu_access_pointer(mrp->i_port) == p) {
  94. res = mrp;
  95. break;
  96. }
  97. }
  98. return res;
  99. }
  100. static int br_mrp_next_seq(struct br_mrp *mrp)
  101. {
  102. mrp->seq_id++;
  103. return mrp->seq_id;
  104. }
  105. static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
  106. const u8 *src, const u8 *dst)
  107. {
  108. struct ethhdr *eth_hdr;
  109. struct sk_buff *skb;
  110. __be16 *version;
  111. skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
  112. if (!skb)
  113. return NULL;
  114. skb->dev = p->dev;
  115. skb->protocol = htons(ETH_P_MRP);
  116. skb->priority = MRP_FRAME_PRIO;
  117. skb_reserve(skb, sizeof(*eth_hdr));
  118. eth_hdr = skb_push(skb, sizeof(*eth_hdr));
  119. ether_addr_copy(eth_hdr->h_dest, dst);
  120. ether_addr_copy(eth_hdr->h_source, src);
  121. eth_hdr->h_proto = htons(ETH_P_MRP);
  122. version = skb_put(skb, sizeof(*version));
  123. *version = cpu_to_be16(MRP_VERSION);
  124. return skb;
  125. }
  126. static void br_mrp_skb_tlv(struct sk_buff *skb,
  127. enum br_mrp_tlv_header_type type,
  128. u8 length)
  129. {
  130. struct br_mrp_tlv_hdr *hdr;
  131. hdr = skb_put(skb, sizeof(*hdr));
  132. hdr->type = type;
  133. hdr->length = length;
  134. }
  135. static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
  136. {
  137. struct br_mrp_common_hdr *hdr;
  138. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
  139. hdr = skb_put(skb, sizeof(*hdr));
  140. hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
  141. memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
  142. }
  143. static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
  144. struct net_bridge_port *p,
  145. enum br_mrp_port_role_type port_role)
  146. {
  147. struct br_mrp_ring_test_hdr *hdr = NULL;
  148. struct sk_buff *skb = NULL;
  149. if (!p)
  150. return NULL;
  151. skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
  152. if (!skb)
  153. return NULL;
  154. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
  155. hdr = skb_put(skb, sizeof(*hdr));
  156. hdr->prio = cpu_to_be16(mrp->prio);
  157. ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
  158. hdr->port_role = cpu_to_be16(port_role);
  159. hdr->state = cpu_to_be16(mrp->ring_state);
  160. hdr->transitions = cpu_to_be16(mrp->ring_transitions);
  161. hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
  162. br_mrp_skb_common(skb, mrp);
  163. /* In case the node behaves as MRA then the Test frame needs to have
  164. * an Option TLV which includes eventually a sub-option TLV that has
  165. * the type AUTO_MGR
  166. */
  167. if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
  168. struct br_mrp_sub_option1_hdr *sub_opt = NULL;
  169. struct br_mrp_tlv_hdr *sub_tlv = NULL;
  170. struct br_mrp_oui_hdr *oui = NULL;
  171. u8 length;
  172. length = sizeof(*sub_opt) + sizeof(*sub_tlv) + sizeof(oui) +
  173. MRP_OPT_PADDING;
  174. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_OPTION, length);
  175. oui = skb_put(skb, sizeof(*oui));
  176. memset(oui, 0x0, sizeof(*oui));
  177. sub_opt = skb_put(skb, sizeof(*sub_opt));
  178. memset(sub_opt, 0x0, sizeof(*sub_opt));
  179. sub_tlv = skb_put(skb, sizeof(*sub_tlv));
  180. sub_tlv->type = BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR;
  181. /* 32 bit alligment shall be ensured therefore add 2 bytes */
  182. skb_put(skb, MRP_OPT_PADDING);
  183. }
  184. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
  185. return skb;
  186. }
  187. static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
  188. struct net_bridge_port *p,
  189. enum br_mrp_port_role_type port_role)
  190. {
  191. struct br_mrp_in_test_hdr *hdr = NULL;
  192. struct sk_buff *skb = NULL;
  193. if (!p)
  194. return NULL;
  195. skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
  196. if (!skb)
  197. return NULL;
  198. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
  199. hdr = skb_put(skb, sizeof(*hdr));
  200. hdr->id = cpu_to_be16(mrp->in_id);
  201. ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
  202. hdr->port_role = cpu_to_be16(port_role);
  203. hdr->state = cpu_to_be16(mrp->in_state);
  204. hdr->transitions = cpu_to_be16(mrp->in_transitions);
  205. hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
  206. br_mrp_skb_common(skb, mrp);
  207. br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
  208. return skb;
  209. }
  210. /* This function is continuously called in the following cases:
  211. * - when node role is MRM, in this case test_monitor is always set to false
  212. * because it needs to notify the userspace that the ring is open and needs to
  213. * send MRP_Test frames
  214. * - when node role is MRA, there are 2 subcases:
  215. * - when MRA behaves as MRM, in this case is similar with MRM role
  216. * - when MRA behaves as MRC, in this case test_monitor is set to true,
  217. * because it needs to detect when it stops seeing MRP_Test frames
  218. * from MRM node but it doesn't need to send MRP_Test frames.
  219. */
  220. static void br_mrp_test_work_expired(struct work_struct *work)
  221. {
  222. struct delayed_work *del_work = to_delayed_work(work);
  223. struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
  224. struct net_bridge_port *p;
  225. bool notify_open = false;
  226. struct sk_buff *skb;
  227. if (time_before_eq(mrp->test_end, jiffies))
  228. return;
  229. if (mrp->test_count_miss < mrp->test_max_miss) {
  230. mrp->test_count_miss++;
  231. } else {
  232. /* Notify that the ring is open only if the ring state is
  233. * closed, otherwise it would continue to notify at every
  234. * interval.
  235. * Also notify that the ring is open when the node has the
  236. * role MRA and behaves as MRC. The reason is that the
  237. * userspace needs to know when the MRM stopped sending
  238. * MRP_Test frames so that the current node to try to take
  239. * the role of a MRM.
  240. */
  241. if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
  242. mrp->test_monitor)
  243. notify_open = true;
  244. }
  245. rcu_read_lock();
  246. p = rcu_dereference(mrp->p_port);
  247. if (p) {
  248. if (!mrp->test_monitor) {
  249. skb = br_mrp_alloc_test_skb(mrp, p,
  250. BR_MRP_PORT_ROLE_PRIMARY);
  251. if (!skb)
  252. goto out;
  253. skb_reset_network_header(skb);
  254. dev_queue_xmit(skb);
  255. }
  256. if (notify_open && !mrp->ring_role_offloaded)
  257. br_mrp_ring_port_open(p->dev, true);
  258. }
  259. p = rcu_dereference(mrp->s_port);
  260. if (p) {
  261. if (!mrp->test_monitor) {
  262. skb = br_mrp_alloc_test_skb(mrp, p,
  263. BR_MRP_PORT_ROLE_SECONDARY);
  264. if (!skb)
  265. goto out;
  266. skb_reset_network_header(skb);
  267. dev_queue_xmit(skb);
  268. }
  269. if (notify_open && !mrp->ring_role_offloaded)
  270. br_mrp_ring_port_open(p->dev, true);
  271. }
  272. out:
  273. rcu_read_unlock();
  274. queue_delayed_work(system_wq, &mrp->test_work,
  275. usecs_to_jiffies(mrp->test_interval));
  276. }
  277. /* This function is continuously called when the node has the interconnect role
  278. * MIM. It would generate interconnect test frames and will send them on all 3
  279. * ports. But will also check if it stop receiving interconnect test frames.
  280. */
  281. static void br_mrp_in_test_work_expired(struct work_struct *work)
  282. {
  283. struct delayed_work *del_work = to_delayed_work(work);
  284. struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
  285. struct net_bridge_port *p;
  286. bool notify_open = false;
  287. struct sk_buff *skb;
  288. if (time_before_eq(mrp->in_test_end, jiffies))
  289. return;
  290. if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
  291. mrp->in_test_count_miss++;
  292. } else {
  293. /* Notify that the interconnect ring is open only if the
  294. * interconnect ring state is closed, otherwise it would
  295. * continue to notify at every interval.
  296. */
  297. if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
  298. notify_open = true;
  299. }
  300. rcu_read_lock();
  301. p = rcu_dereference(mrp->p_port);
  302. if (p) {
  303. skb = br_mrp_alloc_in_test_skb(mrp, p,
  304. BR_MRP_PORT_ROLE_PRIMARY);
  305. if (!skb)
  306. goto out;
  307. skb_reset_network_header(skb);
  308. dev_queue_xmit(skb);
  309. if (notify_open && !mrp->in_role_offloaded)
  310. br_mrp_in_port_open(p->dev, true);
  311. }
  312. p = rcu_dereference(mrp->s_port);
  313. if (p) {
  314. skb = br_mrp_alloc_in_test_skb(mrp, p,
  315. BR_MRP_PORT_ROLE_SECONDARY);
  316. if (!skb)
  317. goto out;
  318. skb_reset_network_header(skb);
  319. dev_queue_xmit(skb);
  320. if (notify_open && !mrp->in_role_offloaded)
  321. br_mrp_in_port_open(p->dev, true);
  322. }
  323. p = rcu_dereference(mrp->i_port);
  324. if (p) {
  325. skb = br_mrp_alloc_in_test_skb(mrp, p,
  326. BR_MRP_PORT_ROLE_INTER);
  327. if (!skb)
  328. goto out;
  329. skb_reset_network_header(skb);
  330. dev_queue_xmit(skb);
  331. if (notify_open && !mrp->in_role_offloaded)
  332. br_mrp_in_port_open(p->dev, true);
  333. }
  334. out:
  335. rcu_read_unlock();
  336. queue_delayed_work(system_wq, &mrp->in_test_work,
  337. usecs_to_jiffies(mrp->in_test_interval));
  338. }
  339. /* Deletes the MRP instance.
  340. * note: called under rtnl_lock
  341. */
  342. static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
  343. {
  344. struct net_bridge_port *p;
  345. u8 state;
  346. /* Stop sending MRP_Test frames */
  347. cancel_delayed_work_sync(&mrp->test_work);
  348. br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
  349. /* Stop sending MRP_InTest frames if has an interconnect role */
  350. cancel_delayed_work_sync(&mrp->in_test_work);
  351. br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
  352. /* Disable the roles */
  353. br_mrp_switchdev_set_ring_role(br, mrp, BR_MRP_RING_ROLE_DISABLED);
  354. p = rtnl_dereference(mrp->i_port);
  355. if (p)
  356. br_mrp_switchdev_set_in_role(br, mrp, mrp->in_id, mrp->ring_id,
  357. BR_MRP_IN_ROLE_DISABLED);
  358. br_mrp_switchdev_del(br, mrp);
  359. /* Reset the ports */
  360. p = rtnl_dereference(mrp->p_port);
  361. if (p) {
  362. spin_lock_bh(&br->lock);
  363. state = netif_running(br->dev) ?
  364. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  365. p->state = state;
  366. p->flags &= ~BR_MRP_AWARE;
  367. spin_unlock_bh(&br->lock);
  368. br_mrp_port_switchdev_set_state(p, state);
  369. rcu_assign_pointer(mrp->p_port, NULL);
  370. }
  371. p = rtnl_dereference(mrp->s_port);
  372. if (p) {
  373. spin_lock_bh(&br->lock);
  374. state = netif_running(br->dev) ?
  375. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  376. p->state = state;
  377. p->flags &= ~BR_MRP_AWARE;
  378. spin_unlock_bh(&br->lock);
  379. br_mrp_port_switchdev_set_state(p, state);
  380. rcu_assign_pointer(mrp->s_port, NULL);
  381. }
  382. p = rtnl_dereference(mrp->i_port);
  383. if (p) {
  384. spin_lock_bh(&br->lock);
  385. state = netif_running(br->dev) ?
  386. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  387. p->state = state;
  388. p->flags &= ~BR_MRP_AWARE;
  389. spin_unlock_bh(&br->lock);
  390. br_mrp_port_switchdev_set_state(p, state);
  391. rcu_assign_pointer(mrp->i_port, NULL);
  392. }
  393. hlist_del_rcu(&mrp->list);
  394. kfree_rcu(mrp, rcu);
  395. if (hlist_empty(&br->mrp_list))
  396. br_del_frame(br, &mrp_frame_type);
  397. }
  398. /* Adds a new MRP instance.
  399. * note: called under rtnl_lock
  400. */
  401. int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
  402. {
  403. struct net_bridge_port *p;
  404. struct br_mrp *mrp;
  405. int err;
  406. /* If the ring exists, it is not possible to create another one with the
  407. * same ring_id
  408. */
  409. mrp = br_mrp_find_id(br, instance->ring_id);
  410. if (mrp)
  411. return -EINVAL;
  412. if (!br_mrp_get_port(br, instance->p_ifindex) ||
  413. !br_mrp_get_port(br, instance->s_ifindex))
  414. return -EINVAL;
  415. /* It is not possible to have the same port part of multiple rings */
  416. if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
  417. !br_mrp_unique_ifindex(br, instance->s_ifindex))
  418. return -EINVAL;
  419. mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
  420. if (!mrp)
  421. return -ENOMEM;
  422. mrp->ring_id = instance->ring_id;
  423. mrp->prio = instance->prio;
  424. p = br_mrp_get_port(br, instance->p_ifindex);
  425. spin_lock_bh(&br->lock);
  426. p->state = BR_STATE_FORWARDING;
  427. p->flags |= BR_MRP_AWARE;
  428. spin_unlock_bh(&br->lock);
  429. rcu_assign_pointer(mrp->p_port, p);
  430. p = br_mrp_get_port(br, instance->s_ifindex);
  431. spin_lock_bh(&br->lock);
  432. p->state = BR_STATE_FORWARDING;
  433. p->flags |= BR_MRP_AWARE;
  434. spin_unlock_bh(&br->lock);
  435. rcu_assign_pointer(mrp->s_port, p);
  436. if (hlist_empty(&br->mrp_list))
  437. br_add_frame(br, &mrp_frame_type);
  438. INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
  439. INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
  440. hlist_add_tail_rcu(&mrp->list, &br->mrp_list);
  441. err = br_mrp_switchdev_add(br, mrp);
  442. if (err)
  443. goto delete_mrp;
  444. return 0;
  445. delete_mrp:
  446. br_mrp_del_impl(br, mrp);
  447. return err;
  448. }
  449. /* Deletes the MRP instance from which the port is part of
  450. * note: called under rtnl_lock
  451. */
  452. void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
  453. {
  454. struct br_mrp *mrp = br_mrp_find_port(br, p);
  455. /* If the port is not part of a MRP instance just bail out */
  456. if (!mrp)
  457. return;
  458. br_mrp_del_impl(br, mrp);
  459. }
  460. /* Deletes existing MRP instance based on ring_id
  461. * note: called under rtnl_lock
  462. */
  463. int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
  464. {
  465. struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
  466. if (!mrp)
  467. return -EINVAL;
  468. br_mrp_del_impl(br, mrp);
  469. return 0;
  470. }
  471. /* Set port state, port state can be forwarding, blocked or disabled
  472. * note: already called with rtnl_lock
  473. */
  474. int br_mrp_set_port_state(struct net_bridge_port *p,
  475. enum br_mrp_port_state_type state)
  476. {
  477. u32 port_state;
  478. if (!p || !(p->flags & BR_MRP_AWARE))
  479. return -EINVAL;
  480. spin_lock_bh(&p->br->lock);
  481. if (state == BR_MRP_PORT_STATE_FORWARDING)
  482. port_state = BR_STATE_FORWARDING;
  483. else
  484. port_state = BR_STATE_BLOCKING;
  485. p->state = port_state;
  486. spin_unlock_bh(&p->br->lock);
  487. br_mrp_port_switchdev_set_state(p, port_state);
  488. return 0;
  489. }
  490. /* Set port role, port role can be primary or secondary
  491. * note: already called with rtnl_lock
  492. */
  493. int br_mrp_set_port_role(struct net_bridge_port *p,
  494. enum br_mrp_port_role_type role)
  495. {
  496. struct br_mrp *mrp;
  497. if (!p || !(p->flags & BR_MRP_AWARE))
  498. return -EINVAL;
  499. mrp = br_mrp_find_port(p->br, p);
  500. if (!mrp)
  501. return -EINVAL;
  502. switch (role) {
  503. case BR_MRP_PORT_ROLE_PRIMARY:
  504. rcu_assign_pointer(mrp->p_port, p);
  505. break;
  506. case BR_MRP_PORT_ROLE_SECONDARY:
  507. rcu_assign_pointer(mrp->s_port, p);
  508. break;
  509. default:
  510. return -EINVAL;
  511. }
  512. br_mrp_port_switchdev_set_role(p, role);
  513. return 0;
  514. }
  515. /* Set ring state, ring state can be only Open or Closed
  516. * note: already called with rtnl_lock
  517. */
  518. int br_mrp_set_ring_state(struct net_bridge *br,
  519. struct br_mrp_ring_state *state)
  520. {
  521. struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
  522. if (!mrp)
  523. return -EINVAL;
  524. if (mrp->ring_state != state->ring_state)
  525. mrp->ring_transitions++;
  526. mrp->ring_state = state->ring_state;
  527. br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
  528. return 0;
  529. }
  530. /* Set ring role, ring role can be only MRM(Media Redundancy Manager) or
  531. * MRC(Media Redundancy Client).
  532. * note: already called with rtnl_lock
  533. */
  534. int br_mrp_set_ring_role(struct net_bridge *br,
  535. struct br_mrp_ring_role *role)
  536. {
  537. struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
  538. enum br_mrp_hw_support support;
  539. if (!mrp)
  540. return -EINVAL;
  541. mrp->ring_role = role->ring_role;
  542. /* If there is an error just bailed out */
  543. support = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
  544. if (support == BR_MRP_NONE)
  545. return -EOPNOTSUPP;
  546. /* Now detect if the HW actually applied the role or not. If the HW
  547. * applied the role it means that the SW will not to do those operations
  548. * anymore. For example if the role ir MRM then the HW will notify the
  549. * SW when ring is open, but if the is not pushed to the HW the SW will
  550. * need to detect when the ring is open
  551. */
  552. mrp->ring_role_offloaded = support == BR_MRP_SW ? 0 : 1;
  553. return 0;
  554. }
  555. /* Start to generate or monitor MRP test frames, the frames are generated by
  556. * HW and if it fails, they are generated by the SW.
  557. * note: already called with rtnl_lock
  558. */
  559. int br_mrp_start_test(struct net_bridge *br,
  560. struct br_mrp_start_test *test)
  561. {
  562. struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
  563. enum br_mrp_hw_support support;
  564. if (!mrp)
  565. return -EINVAL;
  566. /* Try to push it to the HW and if it fails then continue with SW
  567. * implementation and if that also fails then return error.
  568. */
  569. support = br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
  570. test->max_miss, test->period,
  571. test->monitor);
  572. if (support == BR_MRP_NONE)
  573. return -EOPNOTSUPP;
  574. if (support == BR_MRP_HW)
  575. return 0;
  576. mrp->test_interval = test->interval;
  577. mrp->test_end = jiffies + usecs_to_jiffies(test->period);
  578. mrp->test_max_miss = test->max_miss;
  579. mrp->test_monitor = test->monitor;
  580. mrp->test_count_miss = 0;
  581. queue_delayed_work(system_wq, &mrp->test_work,
  582. usecs_to_jiffies(test->interval));
  583. return 0;
  584. }
  585. /* Set in state, int state can be only Open or Closed
  586. * note: already called with rtnl_lock
  587. */
  588. int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
  589. {
  590. struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
  591. if (!mrp)
  592. return -EINVAL;
  593. if (mrp->in_state != state->in_state)
  594. mrp->in_transitions++;
  595. mrp->in_state = state->in_state;
  596. br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
  597. return 0;
  598. }
  599. /* Set in role, in role can be only MIM(Media Interconnection Manager) or
  600. * MIC(Media Interconnection Client).
  601. * note: already called with rtnl_lock
  602. */
  603. int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
  604. {
  605. struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
  606. enum br_mrp_hw_support support;
  607. struct net_bridge_port *p;
  608. if (!mrp)
  609. return -EINVAL;
  610. if (!br_mrp_get_port(br, role->i_ifindex))
  611. return -EINVAL;
  612. if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
  613. u8 state;
  614. /* It is not allowed to disable a port that doesn't exist */
  615. p = rtnl_dereference(mrp->i_port);
  616. if (!p)
  617. return -EINVAL;
  618. /* Stop the generating MRP_InTest frames */
  619. cancel_delayed_work_sync(&mrp->in_test_work);
  620. br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
  621. /* Remove the port */
  622. spin_lock_bh(&br->lock);
  623. state = netif_running(br->dev) ?
  624. BR_STATE_FORWARDING : BR_STATE_DISABLED;
  625. p->state = state;
  626. p->flags &= ~BR_MRP_AWARE;
  627. spin_unlock_bh(&br->lock);
  628. br_mrp_port_switchdev_set_state(p, state);
  629. rcu_assign_pointer(mrp->i_port, NULL);
  630. mrp->in_role = role->in_role;
  631. mrp->in_id = 0;
  632. return 0;
  633. }
  634. /* It is not possible to have the same port part of multiple rings */
  635. if (!br_mrp_unique_ifindex(br, role->i_ifindex))
  636. return -EINVAL;
  637. /* It is not allowed to set a different interconnect port if the mrp
  638. * instance has already one. First it needs to be disabled and after
  639. * that set the new port
  640. */
  641. if (rcu_access_pointer(mrp->i_port))
  642. return -EINVAL;
  643. p = br_mrp_get_port(br, role->i_ifindex);
  644. spin_lock_bh(&br->lock);
  645. p->state = BR_STATE_FORWARDING;
  646. p->flags |= BR_MRP_AWARE;
  647. spin_unlock_bh(&br->lock);
  648. rcu_assign_pointer(mrp->i_port, p);
  649. mrp->in_role = role->in_role;
  650. mrp->in_id = role->in_id;
  651. /* If there is an error just bailed out */
  652. support = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
  653. role->ring_id, role->in_role);
  654. if (support == BR_MRP_NONE)
  655. return -EOPNOTSUPP;
  656. /* Now detect if the HW actually applied the role or not. If the HW
  657. * applied the role it means that the SW will not to do those operations
  658. * anymore. For example if the role is MIM then the HW will notify the
  659. * SW when interconnect ring is open, but if the is not pushed to the HW
  660. * the SW will need to detect when the interconnect ring is open.
  661. */
  662. mrp->in_role_offloaded = support == BR_MRP_SW ? 0 : 1;
  663. return 0;
  664. }
  665. /* Start to generate MRP_InTest frames, the frames are generated by
  666. * HW and if it fails, they are generated by the SW.
  667. * note: already called with rtnl_lock
  668. */
  669. int br_mrp_start_in_test(struct net_bridge *br,
  670. struct br_mrp_start_in_test *in_test)
  671. {
  672. struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
  673. enum br_mrp_hw_support support;
  674. if (!mrp)
  675. return -EINVAL;
  676. if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
  677. return -EINVAL;
  678. /* Try to push it to the HW and if it fails then continue with SW
  679. * implementation and if that also fails then return error.
  680. */
  681. support = br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
  682. in_test->max_miss,
  683. in_test->period);
  684. if (support == BR_MRP_NONE)
  685. return -EOPNOTSUPP;
  686. if (support == BR_MRP_HW)
  687. return 0;
  688. mrp->in_test_interval = in_test->interval;
  689. mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
  690. mrp->in_test_max_miss = in_test->max_miss;
  691. mrp->in_test_count_miss = 0;
  692. queue_delayed_work(system_wq, &mrp->in_test_work,
  693. usecs_to_jiffies(in_test->interval));
  694. return 0;
  695. }
  696. /* Determine if the frame type is a ring frame */
  697. static bool br_mrp_ring_frame(struct sk_buff *skb)
  698. {
  699. const struct br_mrp_tlv_hdr *hdr;
  700. struct br_mrp_tlv_hdr _hdr;
  701. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  702. if (!hdr)
  703. return false;
  704. if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
  705. hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
  706. hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
  707. hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
  708. hdr->type == BR_MRP_TLV_HEADER_OPTION)
  709. return true;
  710. return false;
  711. }
  712. /* Determine if the frame type is an interconnect frame */
  713. static bool br_mrp_in_frame(struct sk_buff *skb)
  714. {
  715. const struct br_mrp_tlv_hdr *hdr;
  716. struct br_mrp_tlv_hdr _hdr;
  717. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  718. if (!hdr)
  719. return false;
  720. if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
  721. hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
  722. hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
  723. hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
  724. hdr->type == BR_MRP_TLV_HEADER_IN_LINK_STATUS)
  725. return true;
  726. return false;
  727. }
  728. /* Process only MRP Test frame. All the other MRP frames are processed by
  729. * userspace application
  730. * note: already called with rcu_read_lock
  731. */
  732. static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
  733. struct sk_buff *skb)
  734. {
  735. const struct br_mrp_tlv_hdr *hdr;
  736. struct br_mrp_tlv_hdr _hdr;
  737. /* Each MRP header starts with a version field which is 16 bits.
  738. * Therefore skip the version and get directly the TLV header.
  739. */
  740. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  741. if (!hdr)
  742. return;
  743. if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
  744. return;
  745. mrp->test_count_miss = 0;
  746. /* Notify the userspace that the ring is closed only when the ring is
  747. * not closed
  748. */
  749. if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
  750. br_mrp_ring_port_open(port->dev, false);
  751. }
  752. /* Determine if the test hdr has a better priority than the node */
  753. static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
  754. struct net_bridge *br,
  755. const struct br_mrp_ring_test_hdr *hdr)
  756. {
  757. u16 prio = be16_to_cpu(hdr->prio);
  758. if (prio < mrp->prio ||
  759. (prio == mrp->prio &&
  760. ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
  761. return true;
  762. return false;
  763. }
  764. /* Process only MRP Test frame. All the other MRP frames are processed by
  765. * userspace application
  766. * note: already called with rcu_read_lock
  767. */
  768. static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
  769. struct net_bridge_port *port,
  770. struct sk_buff *skb)
  771. {
  772. const struct br_mrp_ring_test_hdr *test_hdr;
  773. struct br_mrp_ring_test_hdr _test_hdr;
  774. const struct br_mrp_tlv_hdr *hdr;
  775. struct br_mrp_tlv_hdr _hdr;
  776. /* Each MRP header starts with a version field which is 16 bits.
  777. * Therefore skip the version and get directly the TLV header.
  778. */
  779. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  780. if (!hdr)
  781. return;
  782. if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
  783. return;
  784. test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
  785. sizeof(_test_hdr), &_test_hdr);
  786. if (!test_hdr)
  787. return;
  788. /* Only frames that have a better priority than the node will
  789. * clear the miss counter because otherwise the node will need to behave
  790. * as MRM.
  791. */
  792. if (br_mrp_test_better_than_own(mrp, br, test_hdr))
  793. mrp->test_count_miss = 0;
  794. }
  795. /* Process only MRP InTest frame. All the other MRP frames are processed by
  796. * userspace application
  797. * note: already called with rcu_read_lock
  798. */
  799. static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
  800. struct sk_buff *skb)
  801. {
  802. const struct br_mrp_in_test_hdr *in_hdr;
  803. struct br_mrp_in_test_hdr _in_hdr;
  804. const struct br_mrp_tlv_hdr *hdr;
  805. struct br_mrp_tlv_hdr _hdr;
  806. /* Each MRP header starts with a version field which is 16 bits.
  807. * Therefore skip the version and get directly the TLV header.
  808. */
  809. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  810. if (!hdr)
  811. return false;
  812. /* The check for InTest frame type was already done */
  813. in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
  814. sizeof(_in_hdr), &_in_hdr);
  815. if (!in_hdr)
  816. return false;
  817. /* It needs to process only it's own InTest frames. */
  818. if (mrp->in_id != ntohs(in_hdr->id))
  819. return false;
  820. mrp->in_test_count_miss = 0;
  821. /* Notify the userspace that the ring is closed only when the ring is
  822. * not closed
  823. */
  824. if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
  825. br_mrp_in_port_open(port->dev, false);
  826. return true;
  827. }
  828. /* Get the MRP frame type
  829. * note: already called with rcu_read_lock
  830. */
  831. static u8 br_mrp_get_frame_type(struct sk_buff *skb)
  832. {
  833. const struct br_mrp_tlv_hdr *hdr;
  834. struct br_mrp_tlv_hdr _hdr;
  835. /* Each MRP header starts with a version field which is 16 bits.
  836. * Therefore skip the version and get directly the TLV header.
  837. */
  838. hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
  839. if (!hdr)
  840. return 0xff;
  841. return hdr->type;
  842. }
  843. static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
  844. {
  845. if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
  846. (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
  847. return true;
  848. return false;
  849. }
  850. static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
  851. {
  852. if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
  853. (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
  854. return true;
  855. return false;
  856. }
  857. /* This will just forward the frame to the other mrp ring ports, depending on
  858. * the frame type, ring role and interconnect role
  859. * note: already called with rcu_read_lock
  860. */
  861. static int br_mrp_rcv(struct net_bridge_port *p,
  862. struct sk_buff *skb, struct net_device *dev)
  863. {
  864. struct net_bridge_port *p_port, *s_port, *i_port = NULL;
  865. struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
  866. struct net_bridge *br;
  867. struct br_mrp *mrp;
  868. /* If port is disabled don't accept any frames */
  869. if (p->state == BR_STATE_DISABLED)
  870. return 0;
  871. br = p->br;
  872. mrp = br_mrp_find_port(br, p);
  873. if (unlikely(!mrp))
  874. return 0;
  875. p_port = rcu_dereference(mrp->p_port);
  876. if (!p_port)
  877. return 0;
  878. p_dst = p_port;
  879. s_port = rcu_dereference(mrp->s_port);
  880. if (!s_port)
  881. return 0;
  882. s_dst = s_port;
  883. /* If the frame is a ring frame then it is not required to check the
  884. * interconnect role and ports to process or forward the frame
  885. */
  886. if (br_mrp_ring_frame(skb)) {
  887. /* If the role is MRM then don't forward the frames */
  888. if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
  889. br_mrp_mrm_process(mrp, p, skb);
  890. goto no_forward;
  891. }
  892. /* If the role is MRA then don't forward the frames if it
  893. * behaves as MRM node
  894. */
  895. if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
  896. if (!mrp->test_monitor) {
  897. br_mrp_mrm_process(mrp, p, skb);
  898. goto no_forward;
  899. }
  900. br_mrp_mra_process(mrp, br, p, skb);
  901. }
  902. goto forward;
  903. }
  904. if (br_mrp_in_frame(skb)) {
  905. u8 in_type = br_mrp_get_frame_type(skb);
  906. i_port = rcu_dereference(mrp->i_port);
  907. i_dst = i_port;
  908. /* If the ring port is in block state it should not forward
  909. * In_Test frames
  910. */
  911. if (br_mrp_is_ring_port(p_port, s_port, p) &&
  912. p->state == BR_STATE_BLOCKING &&
  913. in_type == BR_MRP_TLV_HEADER_IN_TEST)
  914. goto no_forward;
  915. /* Nodes that behaves as MRM needs to stop forwarding the
  916. * frames in case the ring is closed, otherwise will be a loop.
  917. * In this case the frame is no forward between the ring ports.
  918. */
  919. if (br_mrp_mrm_behaviour(mrp) &&
  920. br_mrp_is_ring_port(p_port, s_port, p) &&
  921. (s_port->state != BR_STATE_FORWARDING ||
  922. p_port->state != BR_STATE_FORWARDING)) {
  923. p_dst = NULL;
  924. s_dst = NULL;
  925. }
  926. /* A node that behaves as MRC and doesn't have a interconnect
  927. * role then it should forward all frames between the ring ports
  928. * because it doesn't have an interconnect port
  929. */
  930. if (br_mrp_mrc_behaviour(mrp) &&
  931. mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
  932. goto forward;
  933. if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
  934. if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
  935. /* MIM should not forward it's own InTest
  936. * frames
  937. */
  938. if (br_mrp_mim_process(mrp, p, skb)) {
  939. goto no_forward;
  940. } else {
  941. if (br_mrp_is_ring_port(p_port, s_port,
  942. p))
  943. i_dst = NULL;
  944. if (br_mrp_is_in_port(i_port, p))
  945. goto no_forward;
  946. }
  947. } else {
  948. /* MIM should forward IntLinkChange/Status and
  949. * IntTopoChange between ring ports but MIM
  950. * should not forward IntLinkChange/Status and
  951. * IntTopoChange if the frame was received at
  952. * the interconnect port
  953. */
  954. if (br_mrp_is_ring_port(p_port, s_port, p))
  955. i_dst = NULL;
  956. if (br_mrp_is_in_port(i_port, p))
  957. goto no_forward;
  958. }
  959. }
  960. if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
  961. /* MIC should forward InTest frames on all ports
  962. * regardless of the received port
  963. */
  964. if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
  965. goto forward;
  966. /* MIC should forward IntLinkChange frames only if they
  967. * are received on ring ports to all the ports
  968. */
  969. if (br_mrp_is_ring_port(p_port, s_port, p) &&
  970. (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
  971. in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
  972. goto forward;
  973. /* MIC should forward IntLinkStatus frames only to
  974. * interconnect port if it was received on a ring port.
  975. * If it is received on interconnect port then, it
  976. * should be forward on both ring ports
  977. */
  978. if (br_mrp_is_ring_port(p_port, s_port, p) &&
  979. in_type == BR_MRP_TLV_HEADER_IN_LINK_STATUS) {
  980. p_dst = NULL;
  981. s_dst = NULL;
  982. }
  983. /* Should forward the InTopo frames only between the
  984. * ring ports
  985. */
  986. if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
  987. i_dst = NULL;
  988. goto forward;
  989. }
  990. /* In all the other cases don't forward the frames */
  991. goto no_forward;
  992. }
  993. }
  994. forward:
  995. if (p_dst)
  996. br_forward(p_dst, skb, true, false);
  997. if (s_dst)
  998. br_forward(s_dst, skb, true, false);
  999. if (i_dst)
  1000. br_forward(i_dst, skb, true, false);
  1001. no_forward:
  1002. return 1;
  1003. }
  1004. /* Check if the frame was received on a port that is part of MRP ring
  1005. * and if the frame has MRP eth. In that case process the frame otherwise do
  1006. * normal forwarding.
  1007. * note: already called with rcu_read_lock
  1008. */
  1009. static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
  1010. {
  1011. /* If there is no MRP instance do normal forwarding */
  1012. if (likely(!(p->flags & BR_MRP_AWARE)))
  1013. goto out;
  1014. return br_mrp_rcv(p, skb, p->dev);
  1015. out:
  1016. return 0;
  1017. }
  1018. bool br_mrp_enabled(struct net_bridge *br)
  1019. {
  1020. return !hlist_empty(&br->mrp_list);
  1021. }