bond_alb.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/skbuff.h>
  6. #include <linux/netdevice.h>
  7. #include <linux/etherdevice.h>
  8. #include <linux/pkt_sched.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/slab.h>
  11. #include <linux/timer.h>
  12. #include <linux/ip.h>
  13. #include <linux/ipv6.h>
  14. #include <linux/if_arp.h>
  15. #include <linux/if_ether.h>
  16. #include <linux/if_bonding.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/in.h>
  19. #include <net/arp.h>
  20. #include <net/ipv6.h>
  21. #include <net/ndisc.h>
  22. #include <asm/byteorder.h>
  23. #include <net/bonding.h>
  24. #include <net/bond_alb.h>
  25. static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
  26. 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
  27. };
  28. static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
  29. #pragma pack(1)
  30. struct learning_pkt {
  31. u8 mac_dst[ETH_ALEN];
  32. u8 mac_src[ETH_ALEN];
  33. __be16 type;
  34. u8 padding[ETH_ZLEN - ETH_HLEN];
  35. };
  36. struct arp_pkt {
  37. __be16 hw_addr_space;
  38. __be16 prot_addr_space;
  39. u8 hw_addr_len;
  40. u8 prot_addr_len;
  41. __be16 op_code;
  42. u8 mac_src[ETH_ALEN]; /* sender hardware address */
  43. __be32 ip_src; /* sender IP address */
  44. u8 mac_dst[ETH_ALEN]; /* target hardware address */
  45. __be32 ip_dst; /* target IP address */
  46. };
  47. #pragma pack()
  48. /* Forward declaration */
  49. static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
  50. bool strict_match);
  51. static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
  52. static void rlb_src_unlink(struct bonding *bond, u32 index);
  53. static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
  54. u32 ip_dst_hash);
  55. static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
  56. {
  57. int i;
  58. u8 hash = 0;
  59. for (i = 0; i < hash_size; i++)
  60. hash ^= hash_start[i];
  61. return hash;
  62. }
  63. /*********************** tlb specific functions ***************************/
  64. static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
  65. {
  66. if (save_load) {
  67. entry->load_history = 1 + entry->tx_bytes /
  68. BOND_TLB_REBALANCE_INTERVAL;
  69. entry->tx_bytes = 0;
  70. }
  71. entry->tx_slave = NULL;
  72. entry->next = TLB_NULL_INDEX;
  73. entry->prev = TLB_NULL_INDEX;
  74. }
  75. static inline void tlb_init_slave(struct slave *slave)
  76. {
  77. SLAVE_TLB_INFO(slave).load = 0;
  78. SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
  79. }
  80. static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
  81. int save_load)
  82. {
  83. struct tlb_client_info *tx_hash_table;
  84. u32 index;
  85. /* clear slave from tx_hashtbl */
  86. tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
  87. /* skip this if we've already freed the tx hash table */
  88. if (tx_hash_table) {
  89. index = SLAVE_TLB_INFO(slave).head;
  90. while (index != TLB_NULL_INDEX) {
  91. u32 next_index = tx_hash_table[index].next;
  92. tlb_init_table_entry(&tx_hash_table[index], save_load);
  93. index = next_index;
  94. }
  95. }
  96. tlb_init_slave(slave);
  97. }
  98. static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
  99. int save_load)
  100. {
  101. spin_lock_bh(&bond->mode_lock);
  102. __tlb_clear_slave(bond, slave, save_load);
  103. spin_unlock_bh(&bond->mode_lock);
  104. }
  105. /* Must be called before starting the monitor timer */
  106. static int tlb_initialize(struct bonding *bond)
  107. {
  108. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  109. int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
  110. struct tlb_client_info *new_hashtbl;
  111. int i;
  112. new_hashtbl = kzalloc(size, GFP_KERNEL);
  113. if (!new_hashtbl)
  114. return -ENOMEM;
  115. spin_lock_bh(&bond->mode_lock);
  116. bond_info->tx_hashtbl = new_hashtbl;
  117. for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
  118. tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
  119. spin_unlock_bh(&bond->mode_lock);
  120. return 0;
  121. }
  122. /* Must be called only after all slaves have been released */
  123. static void tlb_deinitialize(struct bonding *bond)
  124. {
  125. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  126. spin_lock_bh(&bond->mode_lock);
  127. kfree(bond_info->tx_hashtbl);
  128. bond_info->tx_hashtbl = NULL;
  129. spin_unlock_bh(&bond->mode_lock);
  130. }
  131. static long long compute_gap(struct slave *slave)
  132. {
  133. return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
  134. (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
  135. }
  136. static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
  137. {
  138. struct slave *slave, *least_loaded;
  139. struct list_head *iter;
  140. long long max_gap;
  141. least_loaded = NULL;
  142. max_gap = LLONG_MIN;
  143. /* Find the slave with the largest gap */
  144. bond_for_each_slave_rcu(bond, slave, iter) {
  145. if (bond_slave_can_tx(slave)) {
  146. long long gap = compute_gap(slave);
  147. if (max_gap < gap) {
  148. least_loaded = slave;
  149. max_gap = gap;
  150. }
  151. }
  152. }
  153. return least_loaded;
  154. }
  155. static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
  156. u32 skb_len)
  157. {
  158. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  159. struct tlb_client_info *hash_table;
  160. struct slave *assigned_slave;
  161. hash_table = bond_info->tx_hashtbl;
  162. assigned_slave = hash_table[hash_index].tx_slave;
  163. if (!assigned_slave) {
  164. assigned_slave = tlb_get_least_loaded_slave(bond);
  165. if (assigned_slave) {
  166. struct tlb_slave_info *slave_info =
  167. &(SLAVE_TLB_INFO(assigned_slave));
  168. u32 next_index = slave_info->head;
  169. hash_table[hash_index].tx_slave = assigned_slave;
  170. hash_table[hash_index].next = next_index;
  171. hash_table[hash_index].prev = TLB_NULL_INDEX;
  172. if (next_index != TLB_NULL_INDEX)
  173. hash_table[next_index].prev = hash_index;
  174. slave_info->head = hash_index;
  175. slave_info->load +=
  176. hash_table[hash_index].load_history;
  177. }
  178. }
  179. if (assigned_slave)
  180. hash_table[hash_index].tx_bytes += skb_len;
  181. return assigned_slave;
  182. }
  183. static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
  184. u32 skb_len)
  185. {
  186. struct slave *tx_slave;
  187. /* We don't need to disable softirq here, because
  188. * tlb_choose_channel() is only called by bond_alb_xmit()
  189. * which already has softirq disabled.
  190. */
  191. spin_lock(&bond->mode_lock);
  192. tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
  193. spin_unlock(&bond->mode_lock);
  194. return tx_slave;
  195. }
  196. /*********************** rlb specific functions ***************************/
  197. /* when an ARP REPLY is received from a client update its info
  198. * in the rx_hashtbl
  199. */
  200. static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
  201. {
  202. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  203. struct rlb_client_info *client_info;
  204. u32 hash_index;
  205. spin_lock_bh(&bond->mode_lock);
  206. hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
  207. client_info = &(bond_info->rx_hashtbl[hash_index]);
  208. if ((client_info->assigned) &&
  209. (client_info->ip_src == arp->ip_dst) &&
  210. (client_info->ip_dst == arp->ip_src) &&
  211. (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
  212. /* update the clients MAC address */
  213. ether_addr_copy(client_info->mac_dst, arp->mac_src);
  214. client_info->ntt = 1;
  215. bond_info->rx_ntt = 1;
  216. }
  217. spin_unlock_bh(&bond->mode_lock);
  218. }
  219. static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
  220. struct slave *slave)
  221. {
  222. struct arp_pkt *arp, _arp;
  223. if (skb->protocol != cpu_to_be16(ETH_P_ARP))
  224. goto out;
  225. arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
  226. if (!arp)
  227. goto out;
  228. /* We received an ARP from arp->ip_src.
  229. * We might have used this IP address previously (on the bonding host
  230. * itself or on a system that is bridged together with the bond).
  231. * However, if arp->mac_src is different than what is stored in
  232. * rx_hashtbl, some other host is now using the IP and we must prevent
  233. * sending out client updates with this IP address and the old MAC
  234. * address.
  235. * Clean up all hash table entries that have this address as ip_src but
  236. * have a different mac_src.
  237. */
  238. rlb_purge_src_ip(bond, arp);
  239. if (arp->op_code == htons(ARPOP_REPLY)) {
  240. /* update rx hash table for this ARP */
  241. rlb_update_entry_from_arp(bond, arp);
  242. slave_dbg(bond->dev, slave->dev, "Server received an ARP Reply from client\n");
  243. }
  244. out:
  245. return RX_HANDLER_ANOTHER;
  246. }
  247. /* Caller must hold rcu_read_lock() */
  248. static struct slave *__rlb_next_rx_slave(struct bonding *bond)
  249. {
  250. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  251. struct slave *before = NULL, *rx_slave = NULL, *slave;
  252. struct list_head *iter;
  253. bool found = false;
  254. bond_for_each_slave_rcu(bond, slave, iter) {
  255. if (!bond_slave_can_tx(slave))
  256. continue;
  257. if (!found) {
  258. if (!before || before->speed < slave->speed)
  259. before = slave;
  260. } else {
  261. if (!rx_slave || rx_slave->speed < slave->speed)
  262. rx_slave = slave;
  263. }
  264. if (slave == bond_info->rx_slave)
  265. found = true;
  266. }
  267. /* we didn't find anything after the current or we have something
  268. * better before and up to the current slave
  269. */
  270. if (!rx_slave || (before && rx_slave->speed < before->speed))
  271. rx_slave = before;
  272. if (rx_slave)
  273. bond_info->rx_slave = rx_slave;
  274. return rx_slave;
  275. }
  276. /* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */
  277. static struct slave *rlb_next_rx_slave(struct bonding *bond)
  278. {
  279. struct slave *rx_slave;
  280. ASSERT_RTNL();
  281. rcu_read_lock();
  282. rx_slave = __rlb_next_rx_slave(bond);
  283. rcu_read_unlock();
  284. return rx_slave;
  285. }
  286. /* teach the switch the mac of a disabled slave
  287. * on the primary for fault tolerance
  288. *
  289. * Caller must hold RTNL
  290. */
  291. static void rlb_teach_disabled_mac_on_primary(struct bonding *bond,
  292. const u8 addr[])
  293. {
  294. struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
  295. if (!curr_active)
  296. return;
  297. if (!bond->alb_info.primary_is_promisc) {
  298. if (!dev_set_promiscuity(curr_active->dev, 1))
  299. bond->alb_info.primary_is_promisc = 1;
  300. else
  301. bond->alb_info.primary_is_promisc = 0;
  302. }
  303. bond->alb_info.rlb_promisc_timeout_counter = 0;
  304. alb_send_learning_packets(curr_active, addr, true);
  305. }
  306. /* slave being removed should not be active at this point
  307. *
  308. * Caller must hold rtnl.
  309. */
  310. static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
  311. {
  312. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  313. struct rlb_client_info *rx_hash_table;
  314. u32 index, next_index;
  315. /* clear slave from rx_hashtbl */
  316. spin_lock_bh(&bond->mode_lock);
  317. rx_hash_table = bond_info->rx_hashtbl;
  318. index = bond_info->rx_hashtbl_used_head;
  319. for (; index != RLB_NULL_INDEX; index = next_index) {
  320. next_index = rx_hash_table[index].used_next;
  321. if (rx_hash_table[index].slave == slave) {
  322. struct slave *assigned_slave = rlb_next_rx_slave(bond);
  323. if (assigned_slave) {
  324. rx_hash_table[index].slave = assigned_slave;
  325. if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) {
  326. bond_info->rx_hashtbl[index].ntt = 1;
  327. bond_info->rx_ntt = 1;
  328. /* A slave has been removed from the
  329. * table because it is either disabled
  330. * or being released. We must retry the
  331. * update to avoid clients from not
  332. * being updated & disconnecting when
  333. * there is stress
  334. */
  335. bond_info->rlb_update_retry_counter =
  336. RLB_UPDATE_RETRY;
  337. }
  338. } else { /* there is no active slave */
  339. rx_hash_table[index].slave = NULL;
  340. }
  341. }
  342. }
  343. spin_unlock_bh(&bond->mode_lock);
  344. if (slave != rtnl_dereference(bond->curr_active_slave))
  345. rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
  346. }
  347. static void rlb_update_client(struct rlb_client_info *client_info)
  348. {
  349. int i;
  350. if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
  351. return;
  352. for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
  353. struct sk_buff *skb;
  354. skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
  355. client_info->ip_dst,
  356. client_info->slave->dev,
  357. client_info->ip_src,
  358. client_info->mac_dst,
  359. client_info->slave->dev->dev_addr,
  360. client_info->mac_dst);
  361. if (!skb) {
  362. slave_err(client_info->slave->bond->dev,
  363. client_info->slave->dev,
  364. "failed to create an ARP packet\n");
  365. continue;
  366. }
  367. skb->dev = client_info->slave->dev;
  368. if (client_info->vlan_id) {
  369. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  370. client_info->vlan_id);
  371. }
  372. arp_xmit(skb);
  373. }
  374. }
  375. /* sends ARP REPLIES that update the clients that need updating */
  376. static void rlb_update_rx_clients(struct bonding *bond)
  377. {
  378. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  379. struct rlb_client_info *client_info;
  380. u32 hash_index;
  381. spin_lock_bh(&bond->mode_lock);
  382. hash_index = bond_info->rx_hashtbl_used_head;
  383. for (; hash_index != RLB_NULL_INDEX;
  384. hash_index = client_info->used_next) {
  385. client_info = &(bond_info->rx_hashtbl[hash_index]);
  386. if (client_info->ntt) {
  387. rlb_update_client(client_info);
  388. if (bond_info->rlb_update_retry_counter == 0)
  389. client_info->ntt = 0;
  390. }
  391. }
  392. /* do not update the entries again until this counter is zero so that
  393. * not to confuse the clients.
  394. */
  395. bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
  396. spin_unlock_bh(&bond->mode_lock);
  397. }
  398. /* The slave was assigned a new mac address - update the clients */
  399. static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
  400. {
  401. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  402. struct rlb_client_info *client_info;
  403. int ntt = 0;
  404. u32 hash_index;
  405. spin_lock_bh(&bond->mode_lock);
  406. hash_index = bond_info->rx_hashtbl_used_head;
  407. for (; hash_index != RLB_NULL_INDEX;
  408. hash_index = client_info->used_next) {
  409. client_info = &(bond_info->rx_hashtbl[hash_index]);
  410. if ((client_info->slave == slave) &&
  411. is_valid_ether_addr(client_info->mac_dst)) {
  412. client_info->ntt = 1;
  413. ntt = 1;
  414. }
  415. }
  416. /* update the team's flag only after the whole iteration */
  417. if (ntt) {
  418. bond_info->rx_ntt = 1;
  419. /* fasten the change */
  420. bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
  421. }
  422. spin_unlock_bh(&bond->mode_lock);
  423. }
  424. /* mark all clients using src_ip to be updated */
  425. static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
  426. {
  427. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  428. struct rlb_client_info *client_info;
  429. u32 hash_index;
  430. spin_lock(&bond->mode_lock);
  431. hash_index = bond_info->rx_hashtbl_used_head;
  432. for (; hash_index != RLB_NULL_INDEX;
  433. hash_index = client_info->used_next) {
  434. client_info = &(bond_info->rx_hashtbl[hash_index]);
  435. if (!client_info->slave) {
  436. netdev_err(bond->dev, "found a client with no channel in the client's hash table\n");
  437. continue;
  438. }
  439. /* update all clients using this src_ip, that are not assigned
  440. * to the team's address (curr_active_slave) and have a known
  441. * unicast mac address.
  442. */
  443. if ((client_info->ip_src == src_ip) &&
  444. !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
  445. bond->dev->dev_addr) &&
  446. is_valid_ether_addr(client_info->mac_dst)) {
  447. client_info->ntt = 1;
  448. bond_info->rx_ntt = 1;
  449. }
  450. }
  451. spin_unlock(&bond->mode_lock);
  452. }
  453. static struct slave *rlb_choose_channel(struct sk_buff *skb,
  454. struct bonding *bond,
  455. const struct arp_pkt *arp)
  456. {
  457. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  458. struct slave *assigned_slave, *curr_active_slave;
  459. struct rlb_client_info *client_info;
  460. u32 hash_index = 0;
  461. spin_lock(&bond->mode_lock);
  462. curr_active_slave = rcu_dereference(bond->curr_active_slave);
  463. hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
  464. client_info = &(bond_info->rx_hashtbl[hash_index]);
  465. if (client_info->assigned) {
  466. if ((client_info->ip_src == arp->ip_src) &&
  467. (client_info->ip_dst == arp->ip_dst)) {
  468. /* the entry is already assigned to this client */
  469. if (!is_broadcast_ether_addr(arp->mac_dst)) {
  470. /* update mac address from arp */
  471. ether_addr_copy(client_info->mac_dst, arp->mac_dst);
  472. }
  473. ether_addr_copy(client_info->mac_src, arp->mac_src);
  474. assigned_slave = client_info->slave;
  475. if (assigned_slave) {
  476. spin_unlock(&bond->mode_lock);
  477. return assigned_slave;
  478. }
  479. } else {
  480. /* the entry is already assigned to some other client,
  481. * move the old client to primary (curr_active_slave) so
  482. * that the new client can be assigned to this entry.
  483. */
  484. if (curr_active_slave &&
  485. client_info->slave != curr_active_slave) {
  486. client_info->slave = curr_active_slave;
  487. rlb_update_client(client_info);
  488. }
  489. }
  490. }
  491. /* assign a new slave */
  492. assigned_slave = __rlb_next_rx_slave(bond);
  493. if (assigned_slave) {
  494. if (!(client_info->assigned &&
  495. client_info->ip_src == arp->ip_src)) {
  496. /* ip_src is going to be updated,
  497. * fix the src hash list
  498. */
  499. u32 hash_src = _simple_hash((u8 *)&arp->ip_src,
  500. sizeof(arp->ip_src));
  501. rlb_src_unlink(bond, hash_index);
  502. rlb_src_link(bond, hash_src, hash_index);
  503. }
  504. client_info->ip_src = arp->ip_src;
  505. client_info->ip_dst = arp->ip_dst;
  506. /* arp->mac_dst is broadcast for arp requests.
  507. * will be updated with clients actual unicast mac address
  508. * upon receiving an arp reply.
  509. */
  510. ether_addr_copy(client_info->mac_dst, arp->mac_dst);
  511. ether_addr_copy(client_info->mac_src, arp->mac_src);
  512. client_info->slave = assigned_slave;
  513. if (is_valid_ether_addr(client_info->mac_dst)) {
  514. client_info->ntt = 1;
  515. bond->alb_info.rx_ntt = 1;
  516. } else {
  517. client_info->ntt = 0;
  518. }
  519. if (vlan_get_tag(skb, &client_info->vlan_id))
  520. client_info->vlan_id = 0;
  521. if (!client_info->assigned) {
  522. u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
  523. bond_info->rx_hashtbl_used_head = hash_index;
  524. client_info->used_next = prev_tbl_head;
  525. if (prev_tbl_head != RLB_NULL_INDEX) {
  526. bond_info->rx_hashtbl[prev_tbl_head].used_prev =
  527. hash_index;
  528. }
  529. client_info->assigned = 1;
  530. }
  531. }
  532. spin_unlock(&bond->mode_lock);
  533. return assigned_slave;
  534. }
  535. /* chooses (and returns) transmit channel for arp reply
  536. * does not choose channel for other arp types since they are
  537. * sent on the curr_active_slave
  538. */
  539. static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
  540. {
  541. struct slave *tx_slave = NULL;
  542. struct net_device *dev;
  543. struct arp_pkt *arp;
  544. if (!pskb_network_may_pull(skb, sizeof(*arp)))
  545. return NULL;
  546. arp = (struct arp_pkt *)skb_network_header(skb);
  547. /* Don't modify or load balance ARPs that do not originate
  548. * from the bond itself or a VLAN directly above the bond.
  549. */
  550. if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
  551. return NULL;
  552. dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
  553. if (dev) {
  554. if (netif_is_bridge_master(dev)) {
  555. dev_put(dev);
  556. return NULL;
  557. }
  558. dev_put(dev);
  559. }
  560. if (arp->op_code == htons(ARPOP_REPLY)) {
  561. /* the arp must be sent on the selected rx channel */
  562. tx_slave = rlb_choose_channel(skb, bond, arp);
  563. if (tx_slave)
  564. bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
  565. tx_slave->dev->addr_len);
  566. netdev_dbg(bond->dev, "(slave %s): Server sent ARP Reply packet\n",
  567. tx_slave ? tx_slave->dev->name : "NULL");
  568. } else if (arp->op_code == htons(ARPOP_REQUEST)) {
  569. /* Create an entry in the rx_hashtbl for this client as a
  570. * place holder.
  571. * When the arp reply is received the entry will be updated
  572. * with the correct unicast address of the client.
  573. */
  574. tx_slave = rlb_choose_channel(skb, bond, arp);
  575. /* The ARP reply packets must be delayed so that
  576. * they can cancel out the influence of the ARP request.
  577. */
  578. bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
  579. /* arp requests are broadcast and are sent on the primary
  580. * the arp request will collapse all clients on the subnet to
  581. * the primary slave. We must register these clients to be
  582. * updated with their assigned mac.
  583. */
  584. rlb_req_update_subnet_clients(bond, arp->ip_src);
  585. netdev_dbg(bond->dev, "(slave %s): Server sent ARP Request packet\n",
  586. tx_slave ? tx_slave->dev->name : "NULL");
  587. }
  588. return tx_slave;
  589. }
  590. static void rlb_rebalance(struct bonding *bond)
  591. {
  592. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  593. struct slave *assigned_slave;
  594. struct rlb_client_info *client_info;
  595. int ntt;
  596. u32 hash_index;
  597. spin_lock_bh(&bond->mode_lock);
  598. ntt = 0;
  599. hash_index = bond_info->rx_hashtbl_used_head;
  600. for (; hash_index != RLB_NULL_INDEX;
  601. hash_index = client_info->used_next) {
  602. client_info = &(bond_info->rx_hashtbl[hash_index]);
  603. assigned_slave = __rlb_next_rx_slave(bond);
  604. if (assigned_slave && (client_info->slave != assigned_slave)) {
  605. client_info->slave = assigned_slave;
  606. if (!is_zero_ether_addr(client_info->mac_dst)) {
  607. client_info->ntt = 1;
  608. ntt = 1;
  609. }
  610. }
  611. }
  612. /* update the team's flag only after the whole iteration */
  613. if (ntt)
  614. bond_info->rx_ntt = 1;
  615. spin_unlock_bh(&bond->mode_lock);
  616. }
  617. /* Caller must hold mode_lock */
  618. static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
  619. {
  620. entry->used_next = RLB_NULL_INDEX;
  621. entry->used_prev = RLB_NULL_INDEX;
  622. entry->assigned = 0;
  623. entry->slave = NULL;
  624. entry->vlan_id = 0;
  625. }
  626. static void rlb_init_table_entry_src(struct rlb_client_info *entry)
  627. {
  628. entry->src_first = RLB_NULL_INDEX;
  629. entry->src_prev = RLB_NULL_INDEX;
  630. entry->src_next = RLB_NULL_INDEX;
  631. }
  632. static void rlb_init_table_entry(struct rlb_client_info *entry)
  633. {
  634. memset(entry, 0, sizeof(struct rlb_client_info));
  635. rlb_init_table_entry_dst(entry);
  636. rlb_init_table_entry_src(entry);
  637. }
  638. static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index)
  639. {
  640. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  641. u32 next_index = bond_info->rx_hashtbl[index].used_next;
  642. u32 prev_index = bond_info->rx_hashtbl[index].used_prev;
  643. if (index == bond_info->rx_hashtbl_used_head)
  644. bond_info->rx_hashtbl_used_head = next_index;
  645. if (prev_index != RLB_NULL_INDEX)
  646. bond_info->rx_hashtbl[prev_index].used_next = next_index;
  647. if (next_index != RLB_NULL_INDEX)
  648. bond_info->rx_hashtbl[next_index].used_prev = prev_index;
  649. }
  650. /* unlink a rlb hash table entry from the src list */
  651. static void rlb_src_unlink(struct bonding *bond, u32 index)
  652. {
  653. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  654. u32 next_index = bond_info->rx_hashtbl[index].src_next;
  655. u32 prev_index = bond_info->rx_hashtbl[index].src_prev;
  656. bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX;
  657. bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX;
  658. if (next_index != RLB_NULL_INDEX)
  659. bond_info->rx_hashtbl[next_index].src_prev = prev_index;
  660. if (prev_index == RLB_NULL_INDEX)
  661. return;
  662. /* is prev_index pointing to the head of this list? */
  663. if (bond_info->rx_hashtbl[prev_index].src_first == index)
  664. bond_info->rx_hashtbl[prev_index].src_first = next_index;
  665. else
  666. bond_info->rx_hashtbl[prev_index].src_next = next_index;
  667. }
  668. static void rlb_delete_table_entry(struct bonding *bond, u32 index)
  669. {
  670. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  671. struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
  672. rlb_delete_table_entry_dst(bond, index);
  673. rlb_init_table_entry_dst(entry);
  674. rlb_src_unlink(bond, index);
  675. }
  676. /* add the rx_hashtbl[ip_dst_hash] entry to the list
  677. * of entries with identical ip_src_hash
  678. */
  679. static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
  680. {
  681. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  682. u32 next;
  683. bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash;
  684. next = bond_info->rx_hashtbl[ip_src_hash].src_first;
  685. bond_info->rx_hashtbl[ip_dst_hash].src_next = next;
  686. if (next != RLB_NULL_INDEX)
  687. bond_info->rx_hashtbl[next].src_prev = ip_dst_hash;
  688. bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash;
  689. }
  690. /* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
  691. * not match arp->mac_src
  692. */
  693. static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
  694. {
  695. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  696. u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
  697. u32 index;
  698. spin_lock_bh(&bond->mode_lock);
  699. index = bond_info->rx_hashtbl[ip_src_hash].src_first;
  700. while (index != RLB_NULL_INDEX) {
  701. struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
  702. u32 next_index = entry->src_next;
  703. if (entry->ip_src == arp->ip_src &&
  704. !ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
  705. rlb_delete_table_entry(bond, index);
  706. index = next_index;
  707. }
  708. spin_unlock_bh(&bond->mode_lock);
  709. }
  710. static int rlb_initialize(struct bonding *bond)
  711. {
  712. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  713. struct rlb_client_info *new_hashtbl;
  714. int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
  715. int i;
  716. new_hashtbl = kmalloc(size, GFP_KERNEL);
  717. if (!new_hashtbl)
  718. return -1;
  719. spin_lock_bh(&bond->mode_lock);
  720. bond_info->rx_hashtbl = new_hashtbl;
  721. bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
  722. for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
  723. rlb_init_table_entry(bond_info->rx_hashtbl + i);
  724. spin_unlock_bh(&bond->mode_lock);
  725. /* register to receive ARPs */
  726. bond->recv_probe = rlb_arp_recv;
  727. return 0;
  728. }
  729. static void rlb_deinitialize(struct bonding *bond)
  730. {
  731. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  732. spin_lock_bh(&bond->mode_lock);
  733. kfree(bond_info->rx_hashtbl);
  734. bond_info->rx_hashtbl = NULL;
  735. bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
  736. spin_unlock_bh(&bond->mode_lock);
  737. }
  738. static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
  739. {
  740. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  741. u32 curr_index;
  742. spin_lock_bh(&bond->mode_lock);
  743. curr_index = bond_info->rx_hashtbl_used_head;
  744. while (curr_index != RLB_NULL_INDEX) {
  745. struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
  746. u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
  747. if (curr->vlan_id == vlan_id)
  748. rlb_delete_table_entry(bond, curr_index);
  749. curr_index = next_index;
  750. }
  751. spin_unlock_bh(&bond->mode_lock);
  752. }
  753. /*********************** tlb/rlb shared functions *********************/
  754. static void alb_send_lp_vid(struct slave *slave, const u8 mac_addr[],
  755. __be16 vlan_proto, u16 vid)
  756. {
  757. struct learning_pkt pkt;
  758. struct sk_buff *skb;
  759. int size = sizeof(struct learning_pkt);
  760. memset(&pkt, 0, size);
  761. ether_addr_copy(pkt.mac_dst, mac_addr);
  762. ether_addr_copy(pkt.mac_src, mac_addr);
  763. pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
  764. skb = dev_alloc_skb(size);
  765. if (!skb)
  766. return;
  767. skb_put_data(skb, &pkt, size);
  768. skb_reset_mac_header(skb);
  769. skb->network_header = skb->mac_header + ETH_HLEN;
  770. skb->protocol = pkt.type;
  771. skb->priority = TC_PRIO_CONTROL;
  772. skb->dev = slave->dev;
  773. slave_dbg(slave->bond->dev, slave->dev,
  774. "Send learning packet: mac %pM vlan %d\n", mac_addr, vid);
  775. if (vid)
  776. __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
  777. dev_queue_xmit(skb);
  778. }
  779. struct alb_walk_data {
  780. struct bonding *bond;
  781. struct slave *slave;
  782. const u8 *mac_addr;
  783. bool strict_match;
  784. };
  785. static int alb_upper_dev_walk(struct net_device *upper,
  786. struct netdev_nested_priv *priv)
  787. {
  788. struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
  789. bool strict_match = data->strict_match;
  790. const u8 *mac_addr = data->mac_addr;
  791. struct bonding *bond = data->bond;
  792. struct slave *slave = data->slave;
  793. struct bond_vlan_tag *tags;
  794. if (is_vlan_dev(upper) &&
  795. bond->dev->lower_level == upper->lower_level - 1) {
  796. if (upper->addr_assign_type == NET_ADDR_STOLEN) {
  797. alb_send_lp_vid(slave, mac_addr,
  798. vlan_dev_vlan_proto(upper),
  799. vlan_dev_vlan_id(upper));
  800. } else {
  801. alb_send_lp_vid(slave, upper->dev_addr,
  802. vlan_dev_vlan_proto(upper),
  803. vlan_dev_vlan_id(upper));
  804. }
  805. }
  806. /* If this is a macvlan device, then only send updates
  807. * when strict_match is turned off.
  808. */
  809. if (netif_is_macvlan(upper) && !strict_match) {
  810. tags = bond_verify_device_path(bond->dev, upper, 0);
  811. if (IS_ERR_OR_NULL(tags))
  812. BUG();
  813. alb_send_lp_vid(slave, upper->dev_addr,
  814. tags[0].vlan_proto, tags[0].vlan_id);
  815. kfree(tags);
  816. }
  817. return 0;
  818. }
  819. static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
  820. bool strict_match)
  821. {
  822. struct bonding *bond = bond_get_bond_by_slave(slave);
  823. struct netdev_nested_priv priv;
  824. struct alb_walk_data data = {
  825. .strict_match = strict_match,
  826. .mac_addr = mac_addr,
  827. .slave = slave,
  828. .bond = bond,
  829. };
  830. priv.data = (void *)&data;
  831. /* send untagged */
  832. alb_send_lp_vid(slave, mac_addr, 0, 0);
  833. /* loop through all devices and see if we need to send a packet
  834. * for that device.
  835. */
  836. rcu_read_lock();
  837. netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &priv);
  838. rcu_read_unlock();
  839. }
  840. static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
  841. unsigned int len)
  842. {
  843. struct net_device *dev = slave->dev;
  844. struct sockaddr_storage ss;
  845. if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
  846. __dev_addr_set(dev, addr, len);
  847. return 0;
  848. }
  849. /* for rlb each slave must have a unique hw mac addresses so that
  850. * each slave will receive packets destined to a different mac
  851. */
  852. memcpy(ss.__data, addr, len);
  853. ss.ss_family = dev->type;
  854. if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
  855. slave_err(slave->bond->dev, dev, "dev_set_mac_address on slave failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n");
  856. return -EOPNOTSUPP;
  857. }
  858. return 0;
  859. }
  860. /* Swap MAC addresses between two slaves.
  861. *
  862. * Called with RTNL held, and no other locks.
  863. */
  864. static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
  865. {
  866. u8 tmp_mac_addr[MAX_ADDR_LEN];
  867. bond_hw_addr_copy(tmp_mac_addr, slave1->dev->dev_addr,
  868. slave1->dev->addr_len);
  869. alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr,
  870. slave2->dev->addr_len);
  871. alb_set_slave_mac_addr(slave2, tmp_mac_addr,
  872. slave1->dev->addr_len);
  873. }
  874. /* Send learning packets after MAC address swap.
  875. *
  876. * Called with RTNL and no other locks
  877. */
  878. static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
  879. struct slave *slave2)
  880. {
  881. int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
  882. struct slave *disabled_slave = NULL;
  883. ASSERT_RTNL();
  884. /* fasten the change in the switch */
  885. if (bond_slave_can_tx(slave1)) {
  886. alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
  887. if (bond->alb_info.rlb_enabled) {
  888. /* inform the clients that the mac address
  889. * has changed
  890. */
  891. rlb_req_update_slave_clients(bond, slave1);
  892. }
  893. } else {
  894. disabled_slave = slave1;
  895. }
  896. if (bond_slave_can_tx(slave2)) {
  897. alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
  898. if (bond->alb_info.rlb_enabled) {
  899. /* inform the clients that the mac address
  900. * has changed
  901. */
  902. rlb_req_update_slave_clients(bond, slave2);
  903. }
  904. } else {
  905. disabled_slave = slave2;
  906. }
  907. if (bond->alb_info.rlb_enabled && slaves_state_differ) {
  908. /* A disabled slave was assigned an active mac addr */
  909. rlb_teach_disabled_mac_on_primary(bond,
  910. disabled_slave->dev->dev_addr);
  911. }
  912. }
  913. /**
  914. * alb_change_hw_addr_on_detach
  915. * @bond: bonding we're working on
  916. * @slave: the slave that was just detached
  917. *
  918. * We assume that @slave was already detached from the slave list.
  919. *
  920. * If @slave's permanent hw address is different both from its current
  921. * address and from @bond's address, then somewhere in the bond there's
  922. * a slave that has @slave's permanet address as its current address.
  923. * We'll make sure that slave no longer uses @slave's permanent address.
  924. *
  925. * Caller must hold RTNL and no other locks
  926. */
  927. static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
  928. {
  929. int perm_curr_diff;
  930. int perm_bond_diff;
  931. struct slave *found_slave;
  932. perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
  933. slave->dev->dev_addr);
  934. perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
  935. bond->dev->dev_addr);
  936. if (perm_curr_diff && perm_bond_diff) {
  937. found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr);
  938. if (found_slave) {
  939. alb_swap_mac_addr(slave, found_slave);
  940. alb_fasten_mac_swap(bond, slave, found_slave);
  941. }
  942. }
  943. }
  944. /**
  945. * alb_handle_addr_collision_on_attach
  946. * @bond: bonding we're working on
  947. * @slave: the slave that was just attached
  948. *
  949. * checks uniqueness of slave's mac address and handles the case the
  950. * new slave uses the bonds mac address.
  951. *
  952. * If the permanent hw address of @slave is @bond's hw address, we need to
  953. * find a different hw address to give @slave, that isn't in use by any other
  954. * slave in the bond. This address must be, of course, one of the permanent
  955. * addresses of the other slaves.
  956. *
  957. * We go over the slave list, and for each slave there we compare its
  958. * permanent hw address with the current address of all the other slaves.
  959. * If no match was found, then we've found a slave with a permanent address
  960. * that isn't used by any other slave in the bond, so we can assign it to
  961. * @slave.
  962. *
  963. * assumption: this function is called before @slave is attached to the
  964. * bond slave list.
  965. */
  966. static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
  967. {
  968. struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave);
  969. struct slave *tmp_slave1, *free_mac_slave = NULL;
  970. struct list_head *iter;
  971. if (!bond_has_slaves(bond)) {
  972. /* this is the first slave */
  973. return 0;
  974. }
  975. /* if slave's mac address differs from bond's mac address
  976. * check uniqueness of slave's mac address against the other
  977. * slaves in the bond.
  978. */
  979. if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
  980. if (!bond_slave_has_mac(bond, slave->dev->dev_addr))
  981. return 0;
  982. /* Try setting slave mac to bond address and fall-through
  983. * to code handling that situation below...
  984. */
  985. alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
  986. bond->dev->addr_len);
  987. }
  988. /* The slave's address is equal to the address of the bond.
  989. * Search for a spare address in the bond for this slave.
  990. */
  991. bond_for_each_slave(bond, tmp_slave1, iter) {
  992. if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
  993. /* no slave has tmp_slave1's perm addr
  994. * as its curr addr
  995. */
  996. free_mac_slave = tmp_slave1;
  997. break;
  998. }
  999. if (!has_bond_addr) {
  1000. if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
  1001. bond->dev->dev_addr)) {
  1002. has_bond_addr = tmp_slave1;
  1003. }
  1004. }
  1005. }
  1006. if (free_mac_slave) {
  1007. alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
  1008. free_mac_slave->dev->addr_len);
  1009. slave_warn(bond->dev, slave->dev, "the slave hw address is in use by the bond; giving it the hw address of %s\n",
  1010. free_mac_slave->dev->name);
  1011. } else if (has_bond_addr) {
  1012. slave_err(bond->dev, slave->dev, "the slave hw address is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n");
  1013. return -EFAULT;
  1014. }
  1015. return 0;
  1016. }
  1017. /**
  1018. * alb_set_mac_address
  1019. * @bond: bonding we're working on
  1020. * @addr: MAC address to set
  1021. *
  1022. * In TLB mode all slaves are configured to the bond's hw address, but set
  1023. * their dev_addr field to different addresses (based on their permanent hw
  1024. * addresses).
  1025. *
  1026. * For each slave, this function sets the interface to the new address and then
  1027. * changes its dev_addr field to its previous value.
  1028. *
  1029. * Unwinding assumes bond's mac address has not yet changed.
  1030. */
  1031. static int alb_set_mac_address(struct bonding *bond, void *addr)
  1032. {
  1033. struct slave *slave, *rollback_slave;
  1034. struct list_head *iter;
  1035. struct sockaddr_storage ss;
  1036. char tmp_addr[MAX_ADDR_LEN];
  1037. int res;
  1038. if (bond->alb_info.rlb_enabled)
  1039. return 0;
  1040. bond_for_each_slave(bond, slave, iter) {
  1041. /* save net_device's current hw address */
  1042. bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr,
  1043. slave->dev->addr_len);
  1044. res = dev_set_mac_address(slave->dev, addr, NULL);
  1045. /* restore net_device's hw address */
  1046. dev_addr_set(slave->dev, tmp_addr);
  1047. if (res)
  1048. goto unwind;
  1049. }
  1050. return 0;
  1051. unwind:
  1052. memcpy(ss.__data, bond->dev->dev_addr, bond->dev->addr_len);
  1053. ss.ss_family = bond->dev->type;
  1054. /* unwind from head to the slave that failed */
  1055. bond_for_each_slave(bond, rollback_slave, iter) {
  1056. if (rollback_slave == slave)
  1057. break;
  1058. bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
  1059. rollback_slave->dev->addr_len);
  1060. dev_set_mac_address(rollback_slave->dev,
  1061. (struct sockaddr *)&ss, NULL);
  1062. dev_addr_set(rollback_slave->dev, tmp_addr);
  1063. }
  1064. return res;
  1065. }
  1066. /* determine if the packet is NA or NS */
  1067. static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond)
  1068. {
  1069. struct ipv6hdr *ip6hdr;
  1070. struct icmp6hdr *hdr;
  1071. if (!pskb_network_may_pull(skb, sizeof(*ip6hdr)))
  1072. return true;
  1073. ip6hdr = ipv6_hdr(skb);
  1074. if (ip6hdr->nexthdr != IPPROTO_ICMPV6)
  1075. return false;
  1076. if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr)))
  1077. return true;
  1078. hdr = icmp6_hdr(skb);
  1079. return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT ||
  1080. hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION;
  1081. }
  1082. /************************ exported alb functions ************************/
  1083. int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
  1084. {
  1085. int res;
  1086. res = tlb_initialize(bond);
  1087. if (res)
  1088. return res;
  1089. if (rlb_enabled) {
  1090. res = rlb_initialize(bond);
  1091. if (res) {
  1092. tlb_deinitialize(bond);
  1093. return res;
  1094. }
  1095. bond->alb_info.rlb_enabled = 1;
  1096. } else {
  1097. bond->alb_info.rlb_enabled = 0;
  1098. }
  1099. return 0;
  1100. }
  1101. void bond_alb_deinitialize(struct bonding *bond)
  1102. {
  1103. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1104. tlb_deinitialize(bond);
  1105. if (bond_info->rlb_enabled)
  1106. rlb_deinitialize(bond);
  1107. }
  1108. static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
  1109. struct slave *tx_slave)
  1110. {
  1111. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1112. struct ethhdr *eth_data = eth_hdr(skb);
  1113. if (!tx_slave) {
  1114. /* unbalanced or unassigned, send through primary */
  1115. tx_slave = rcu_dereference(bond->curr_active_slave);
  1116. if (bond->params.tlb_dynamic_lb)
  1117. bond_info->unbalanced_load += skb->len;
  1118. }
  1119. if (tx_slave && bond_slave_can_tx(tx_slave)) {
  1120. if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) {
  1121. ether_addr_copy(eth_data->h_source,
  1122. tx_slave->dev->dev_addr);
  1123. }
  1124. return bond_dev_queue_xmit(bond, skb, tx_slave->dev);
  1125. }
  1126. if (tx_slave && bond->params.tlb_dynamic_lb) {
  1127. spin_lock(&bond->mode_lock);
  1128. __tlb_clear_slave(bond, tx_slave, 0);
  1129. spin_unlock(&bond->mode_lock);
  1130. }
  1131. /* no suitable interface, frame not sent */
  1132. return bond_tx_drop(bond->dev, skb);
  1133. }
  1134. struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
  1135. struct sk_buff *skb)
  1136. {
  1137. struct slave *tx_slave = NULL;
  1138. struct ethhdr *eth_data;
  1139. u32 hash_index;
  1140. skb_reset_mac_header(skb);
  1141. eth_data = eth_hdr(skb);
  1142. /* Do not TX balance any multicast or broadcast */
  1143. if (!is_multicast_ether_addr(eth_data->h_dest)) {
  1144. switch (skb->protocol) {
  1145. case htons(ETH_P_IPV6):
  1146. if (alb_determine_nd(skb, bond))
  1147. break;
  1148. fallthrough;
  1149. case htons(ETH_P_IP):
  1150. hash_index = bond_xmit_hash(bond, skb);
  1151. if (bond->params.tlb_dynamic_lb) {
  1152. tx_slave = tlb_choose_channel(bond,
  1153. hash_index & 0xFF,
  1154. skb->len);
  1155. } else {
  1156. struct bond_up_slave *slaves;
  1157. unsigned int count;
  1158. slaves = rcu_dereference(bond->usable_slaves);
  1159. count = slaves ? READ_ONCE(slaves->count) : 0;
  1160. if (likely(count))
  1161. tx_slave = slaves->arr[hash_index %
  1162. count];
  1163. }
  1164. break;
  1165. }
  1166. }
  1167. return tx_slave;
  1168. }
  1169. netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
  1170. {
  1171. struct bonding *bond = netdev_priv(bond_dev);
  1172. struct slave *tx_slave;
  1173. tx_slave = bond_xmit_tlb_slave_get(bond, skb);
  1174. return bond_do_alb_xmit(skb, bond, tx_slave);
  1175. }
  1176. struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
  1177. struct sk_buff *skb)
  1178. {
  1179. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1180. static const __be32 ip_bcast = htonl(0xffffffff);
  1181. struct slave *tx_slave = NULL;
  1182. const u8 *hash_start = NULL;
  1183. bool do_tx_balance = true;
  1184. struct ethhdr *eth_data;
  1185. u32 hash_index = 0;
  1186. int hash_size = 0;
  1187. skb_reset_mac_header(skb);
  1188. eth_data = eth_hdr(skb);
  1189. switch (ntohs(skb->protocol)) {
  1190. case ETH_P_IP: {
  1191. const struct iphdr *iph;
  1192. if (is_broadcast_ether_addr(eth_data->h_dest) ||
  1193. !pskb_network_may_pull(skb, sizeof(*iph))) {
  1194. do_tx_balance = false;
  1195. break;
  1196. }
  1197. iph = ip_hdr(skb);
  1198. if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) {
  1199. do_tx_balance = false;
  1200. break;
  1201. }
  1202. hash_start = (char *)&(iph->daddr);
  1203. hash_size = sizeof(iph->daddr);
  1204. break;
  1205. }
  1206. case ETH_P_IPV6: {
  1207. const struct ipv6hdr *ip6hdr;
  1208. /* IPv6 doesn't really use broadcast mac address, but leave
  1209. * that here just in case.
  1210. */
  1211. if (is_broadcast_ether_addr(eth_data->h_dest)) {
  1212. do_tx_balance = false;
  1213. break;
  1214. }
  1215. /* IPv6 uses all-nodes multicast as an equivalent to
  1216. * broadcasts in IPv4.
  1217. */
  1218. if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
  1219. do_tx_balance = false;
  1220. break;
  1221. }
  1222. if (alb_determine_nd(skb, bond)) {
  1223. do_tx_balance = false;
  1224. break;
  1225. }
  1226. /* The IPv6 header is pulled by alb_determine_nd */
  1227. /* Additionally, DAD probes should not be tx-balanced as that
  1228. * will lead to false positives for duplicate addresses and
  1229. * prevent address configuration from working.
  1230. */
  1231. ip6hdr = ipv6_hdr(skb);
  1232. if (ipv6_addr_any(&ip6hdr->saddr)) {
  1233. do_tx_balance = false;
  1234. break;
  1235. }
  1236. hash_start = (char *)&ip6hdr->daddr;
  1237. hash_size = sizeof(ip6hdr->daddr);
  1238. break;
  1239. }
  1240. case ETH_P_ARP:
  1241. do_tx_balance = false;
  1242. if (bond_info->rlb_enabled)
  1243. tx_slave = rlb_arp_xmit(skb, bond);
  1244. break;
  1245. default:
  1246. do_tx_balance = false;
  1247. break;
  1248. }
  1249. if (do_tx_balance) {
  1250. if (bond->params.tlb_dynamic_lb) {
  1251. hash_index = _simple_hash(hash_start, hash_size);
  1252. tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
  1253. } else {
  1254. /*
  1255. * do_tx_balance means we are free to select the tx_slave
  1256. * So we do exactly what tlb would do for hash selection
  1257. */
  1258. struct bond_up_slave *slaves;
  1259. unsigned int count;
  1260. slaves = rcu_dereference(bond->usable_slaves);
  1261. count = slaves ? READ_ONCE(slaves->count) : 0;
  1262. if (likely(count))
  1263. tx_slave = slaves->arr[bond_xmit_hash(bond, skb) %
  1264. count];
  1265. }
  1266. }
  1267. return tx_slave;
  1268. }
  1269. netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
  1270. {
  1271. struct bonding *bond = netdev_priv(bond_dev);
  1272. struct slave *tx_slave = NULL;
  1273. tx_slave = bond_xmit_alb_slave_get(bond, skb);
  1274. return bond_do_alb_xmit(skb, bond, tx_slave);
  1275. }
  1276. void bond_alb_monitor(struct work_struct *work)
  1277. {
  1278. struct bonding *bond = container_of(work, struct bonding,
  1279. alb_work.work);
  1280. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1281. struct list_head *iter;
  1282. struct slave *slave;
  1283. if (!bond_has_slaves(bond)) {
  1284. atomic_set(&bond_info->tx_rebalance_counter, 0);
  1285. bond_info->lp_counter = 0;
  1286. goto re_arm;
  1287. }
  1288. rcu_read_lock();
  1289. atomic_inc(&bond_info->tx_rebalance_counter);
  1290. bond_info->lp_counter++;
  1291. /* send learning packets */
  1292. if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
  1293. bool strict_match;
  1294. bond_for_each_slave_rcu(bond, slave, iter) {
  1295. /* If updating current_active, use all currently
  1296. * user mac addresses (!strict_match). Otherwise, only
  1297. * use mac of the slave device.
  1298. * In RLB mode, we always use strict matches.
  1299. */
  1300. strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) ||
  1301. bond_info->rlb_enabled);
  1302. alb_send_learning_packets(slave, slave->dev->dev_addr,
  1303. strict_match);
  1304. }
  1305. bond_info->lp_counter = 0;
  1306. }
  1307. /* rebalance tx traffic */
  1308. if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
  1309. bond_for_each_slave_rcu(bond, slave, iter) {
  1310. tlb_clear_slave(bond, slave, 1);
  1311. if (slave == rcu_access_pointer(bond->curr_active_slave)) {
  1312. SLAVE_TLB_INFO(slave).load =
  1313. bond_info->unbalanced_load /
  1314. BOND_TLB_REBALANCE_INTERVAL;
  1315. bond_info->unbalanced_load = 0;
  1316. }
  1317. }
  1318. atomic_set(&bond_info->tx_rebalance_counter, 0);
  1319. }
  1320. if (bond_info->rlb_enabled) {
  1321. if (bond_info->primary_is_promisc &&
  1322. (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
  1323. /* dev_set_promiscuity requires rtnl and
  1324. * nothing else. Avoid race with bond_close.
  1325. */
  1326. rcu_read_unlock();
  1327. if (!rtnl_trylock())
  1328. goto re_arm;
  1329. bond_info->rlb_promisc_timeout_counter = 0;
  1330. /* If the primary was set to promiscuous mode
  1331. * because a slave was disabled then
  1332. * it can now leave promiscuous mode.
  1333. */
  1334. dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev,
  1335. -1);
  1336. bond_info->primary_is_promisc = 0;
  1337. rtnl_unlock();
  1338. rcu_read_lock();
  1339. }
  1340. if (bond_info->rlb_rebalance) {
  1341. bond_info->rlb_rebalance = 0;
  1342. rlb_rebalance(bond);
  1343. }
  1344. /* check if clients need updating */
  1345. if (bond_info->rx_ntt) {
  1346. if (bond_info->rlb_update_delay_counter) {
  1347. --bond_info->rlb_update_delay_counter;
  1348. } else {
  1349. rlb_update_rx_clients(bond);
  1350. if (bond_info->rlb_update_retry_counter)
  1351. --bond_info->rlb_update_retry_counter;
  1352. else
  1353. bond_info->rx_ntt = 0;
  1354. }
  1355. }
  1356. }
  1357. rcu_read_unlock();
  1358. re_arm:
  1359. queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
  1360. }
  1361. /* assumption: called before the slave is attached to the bond
  1362. * and not locked by the bond lock
  1363. */
  1364. int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
  1365. {
  1366. int res;
  1367. res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
  1368. slave->dev->addr_len);
  1369. if (res)
  1370. return res;
  1371. res = alb_handle_addr_collision_on_attach(bond, slave);
  1372. if (res)
  1373. return res;
  1374. tlb_init_slave(slave);
  1375. /* order a rebalance ASAP */
  1376. atomic_set(&bond->alb_info.tx_rebalance_counter,
  1377. BOND_TLB_REBALANCE_TICKS);
  1378. if (bond->alb_info.rlb_enabled)
  1379. bond->alb_info.rlb_rebalance = 1;
  1380. return 0;
  1381. }
  1382. /* Remove slave from tlb and rlb hash tables, and fix up MAC addresses
  1383. * if necessary.
  1384. *
  1385. * Caller must hold RTNL and no other locks
  1386. */
  1387. void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
  1388. {
  1389. if (bond_has_slaves(bond))
  1390. alb_change_hw_addr_on_detach(bond, slave);
  1391. tlb_clear_slave(bond, slave, 0);
  1392. if (bond->alb_info.rlb_enabled) {
  1393. bond->alb_info.rx_slave = NULL;
  1394. rlb_clear_slave(bond, slave);
  1395. }
  1396. }
  1397. void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
  1398. {
  1399. struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
  1400. if (link == BOND_LINK_DOWN) {
  1401. tlb_clear_slave(bond, slave, 0);
  1402. if (bond->alb_info.rlb_enabled)
  1403. rlb_clear_slave(bond, slave);
  1404. } else if (link == BOND_LINK_UP) {
  1405. /* order a rebalance ASAP */
  1406. atomic_set(&bond_info->tx_rebalance_counter,
  1407. BOND_TLB_REBALANCE_TICKS);
  1408. if (bond->alb_info.rlb_enabled) {
  1409. bond->alb_info.rlb_rebalance = 1;
  1410. /* If the updelay module parameter is smaller than the
  1411. * forwarding delay of the switch the rebalance will
  1412. * not work because the rebalance arp replies will
  1413. * not be forwarded to the clients..
  1414. */
  1415. }
  1416. }
  1417. if (bond_is_nondyn_tlb(bond)) {
  1418. if (bond_update_slave_arr(bond, NULL))
  1419. pr_err("Failed to build slave-array for TLB mode.\n");
  1420. }
  1421. }
  1422. /**
  1423. * bond_alb_handle_active_change - assign new curr_active_slave
  1424. * @bond: our bonding struct
  1425. * @new_slave: new slave to assign
  1426. *
  1427. * Set the bond->curr_active_slave to @new_slave and handle
  1428. * mac address swapping and promiscuity changes as needed.
  1429. *
  1430. * Caller must hold RTNL
  1431. */
  1432. void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
  1433. {
  1434. struct slave *swap_slave;
  1435. struct slave *curr_active;
  1436. curr_active = rtnl_dereference(bond->curr_active_slave);
  1437. if (curr_active == new_slave)
  1438. return;
  1439. if (curr_active && bond->alb_info.primary_is_promisc) {
  1440. dev_set_promiscuity(curr_active->dev, -1);
  1441. bond->alb_info.primary_is_promisc = 0;
  1442. bond->alb_info.rlb_promisc_timeout_counter = 0;
  1443. }
  1444. swap_slave = curr_active;
  1445. rcu_assign_pointer(bond->curr_active_slave, new_slave);
  1446. if (!new_slave || !bond_has_slaves(bond))
  1447. return;
  1448. /* set the new curr_active_slave to the bonds mac address
  1449. * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
  1450. */
  1451. if (!swap_slave)
  1452. swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr);
  1453. /* Arrange for swap_slave and new_slave to temporarily be
  1454. * ignored so we can mess with their MAC addresses without
  1455. * fear of interference from transmit activity.
  1456. */
  1457. if (swap_slave)
  1458. tlb_clear_slave(bond, swap_slave, 1);
  1459. tlb_clear_slave(bond, new_slave, 1);
  1460. /* in TLB mode, the slave might flip down/up with the old dev_addr,
  1461. * and thus filter bond->dev_addr's packets, so force bond's mac
  1462. */
  1463. if (BOND_MODE(bond) == BOND_MODE_TLB) {
  1464. struct sockaddr_storage ss;
  1465. u8 tmp_addr[MAX_ADDR_LEN];
  1466. bond_hw_addr_copy(tmp_addr, new_slave->dev->dev_addr,
  1467. new_slave->dev->addr_len);
  1468. bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
  1469. bond->dev->addr_len);
  1470. ss.ss_family = bond->dev->type;
  1471. /* we don't care if it can't change its mac, best effort */
  1472. dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
  1473. NULL);
  1474. dev_addr_set(new_slave->dev, tmp_addr);
  1475. }
  1476. /* curr_active_slave must be set before calling alb_swap_mac_addr */
  1477. if (swap_slave) {
  1478. /* swap mac address */
  1479. alb_swap_mac_addr(swap_slave, new_slave);
  1480. alb_fasten_mac_swap(bond, swap_slave, new_slave);
  1481. } else {
  1482. /* set the new_slave to the bond mac address */
  1483. alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
  1484. bond->dev->addr_len);
  1485. alb_send_learning_packets(new_slave, bond->dev->dev_addr,
  1486. false);
  1487. }
  1488. }
  1489. /* Called with RTNL */
  1490. int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
  1491. {
  1492. struct bonding *bond = netdev_priv(bond_dev);
  1493. struct sockaddr_storage *ss = addr;
  1494. struct slave *curr_active;
  1495. struct slave *swap_slave;
  1496. int res;
  1497. if (!is_valid_ether_addr(ss->__data))
  1498. return -EADDRNOTAVAIL;
  1499. res = alb_set_mac_address(bond, addr);
  1500. if (res)
  1501. return res;
  1502. dev_addr_set(bond_dev, ss->__data);
  1503. /* If there is no curr_active_slave there is nothing else to do.
  1504. * Otherwise we'll need to pass the new address to it and handle
  1505. * duplications.
  1506. */
  1507. curr_active = rtnl_dereference(bond->curr_active_slave);
  1508. if (!curr_active)
  1509. return 0;
  1510. swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
  1511. if (swap_slave) {
  1512. alb_swap_mac_addr(swap_slave, curr_active);
  1513. alb_fasten_mac_swap(bond, swap_slave, curr_active);
  1514. } else {
  1515. alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr,
  1516. bond_dev->addr_len);
  1517. alb_send_learning_packets(curr_active,
  1518. bond_dev->dev_addr, false);
  1519. if (bond->alb_info.rlb_enabled) {
  1520. /* inform clients mac address has changed */
  1521. rlb_req_update_slave_clients(bond, curr_active);
  1522. }
  1523. }
  1524. return 0;
  1525. }
  1526. void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
  1527. {
  1528. if (bond->alb_info.rlb_enabled)
  1529. rlb_clear_vlan(bond, vlan_id);
  1530. }