br_multicast.c 131 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Bridge multicast support.
  4. *
  5. * Copyright (c) 2010 Herbert Xu <[email protected]>
  6. */
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/if_ether.h>
  10. #include <linux/igmp.h>
  11. #include <linux/in.h>
  12. #include <linux/jhash.h>
  13. #include <linux/kernel.h>
  14. #include <linux/log2.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/netfilter_bridge.h>
  17. #include <linux/random.h>
  18. #include <linux/rculist.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/slab.h>
  21. #include <linux/timer.h>
  22. #include <linux/inetdevice.h>
  23. #include <linux/mroute.h>
  24. #include <net/ip.h>
  25. #include <net/switchdev.h>
  26. #if IS_ENABLED(CONFIG_IPV6)
  27. #include <linux/icmpv6.h>
  28. #include <net/ipv6.h>
  29. #include <net/mld.h>
  30. #include <net/ip6_checksum.h>
  31. #include <net/addrconf.h>
  32. #endif
  33. #include "br_private.h"
  34. #include "br_private_mcast_eht.h"
  35. static const struct rhashtable_params br_mdb_rht_params = {
  36. .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
  37. .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
  38. .key_len = sizeof(struct br_ip),
  39. .automatic_shrinking = true,
  40. };
  41. static const struct rhashtable_params br_sg_port_rht_params = {
  42. .head_offset = offsetof(struct net_bridge_port_group, rhnode),
  43. .key_offset = offsetof(struct net_bridge_port_group, key),
  44. .key_len = sizeof(struct net_bridge_port_group_sg_key),
  45. .automatic_shrinking = true,
  46. };
  47. static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
  48. struct bridge_mcast_own_query *query);
  49. static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
  50. struct net_bridge_mcast_port *pmctx);
  51. static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
  52. struct net_bridge_mcast_port *pmctx,
  53. __be32 group,
  54. __u16 vid,
  55. const unsigned char *src);
  56. static void br_multicast_port_group_rexmit(struct timer_list *t);
  57. static void
  58. br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
  59. static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
  60. struct net_bridge_mcast_port *pmctx);
  61. #if IS_ENABLED(CONFIG_IPV6)
  62. static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
  63. struct net_bridge_mcast_port *pmctx,
  64. const struct in6_addr *group,
  65. __u16 vid, const unsigned char *src);
  66. #endif
  67. static struct net_bridge_port_group *
  68. __br_multicast_add_group(struct net_bridge_mcast *brmctx,
  69. struct net_bridge_mcast_port *pmctx,
  70. struct br_ip *group,
  71. const unsigned char *src,
  72. u8 filter_mode,
  73. bool igmpv2_mldv1,
  74. bool blocked);
  75. static void br_multicast_find_del_pg(struct net_bridge *br,
  76. struct net_bridge_port_group *pg);
  77. static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
  78. static int br_mc_disabled_update(struct net_device *dev, bool value,
  79. struct netlink_ext_ack *extack);
  80. static struct net_bridge_port_group *
  81. br_sg_port_find(struct net_bridge *br,
  82. struct net_bridge_port_group_sg_key *sg_p)
  83. {
  84. lockdep_assert_held_once(&br->multicast_lock);
  85. return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
  86. br_sg_port_rht_params);
  87. }
  88. static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
  89. struct br_ip *dst)
  90. {
  91. return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  92. }
  93. struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
  94. struct br_ip *dst)
  95. {
  96. struct net_bridge_mdb_entry *ent;
  97. lockdep_assert_held_once(&br->multicast_lock);
  98. rcu_read_lock();
  99. ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  100. rcu_read_unlock();
  101. return ent;
  102. }
  103. static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
  104. __be32 dst, __u16 vid)
  105. {
  106. struct br_ip br_dst;
  107. memset(&br_dst, 0, sizeof(br_dst));
  108. br_dst.dst.ip4 = dst;
  109. br_dst.proto = htons(ETH_P_IP);
  110. br_dst.vid = vid;
  111. return br_mdb_ip_get(br, &br_dst);
  112. }
  113. #if IS_ENABLED(CONFIG_IPV6)
  114. static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
  115. const struct in6_addr *dst,
  116. __u16 vid)
  117. {
  118. struct br_ip br_dst;
  119. memset(&br_dst, 0, sizeof(br_dst));
  120. br_dst.dst.ip6 = *dst;
  121. br_dst.proto = htons(ETH_P_IPV6);
  122. br_dst.vid = vid;
  123. return br_mdb_ip_get(br, &br_dst);
  124. }
  125. #endif
  126. struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
  127. struct sk_buff *skb, u16 vid)
  128. {
  129. struct net_bridge *br = brmctx->br;
  130. struct br_ip ip;
  131. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
  132. br_multicast_ctx_vlan_global_disabled(brmctx))
  133. return NULL;
  134. if (BR_INPUT_SKB_CB(skb)->igmp)
  135. return NULL;
  136. memset(&ip, 0, sizeof(ip));
  137. ip.proto = skb->protocol;
  138. ip.vid = vid;
  139. switch (skb->protocol) {
  140. case htons(ETH_P_IP):
  141. ip.dst.ip4 = ip_hdr(skb)->daddr;
  142. if (brmctx->multicast_igmp_version == 3) {
  143. struct net_bridge_mdb_entry *mdb;
  144. ip.src.ip4 = ip_hdr(skb)->saddr;
  145. mdb = br_mdb_ip_get_rcu(br, &ip);
  146. if (mdb)
  147. return mdb;
  148. ip.src.ip4 = 0;
  149. }
  150. break;
  151. #if IS_ENABLED(CONFIG_IPV6)
  152. case htons(ETH_P_IPV6):
  153. ip.dst.ip6 = ipv6_hdr(skb)->daddr;
  154. if (brmctx->multicast_mld_version == 2) {
  155. struct net_bridge_mdb_entry *mdb;
  156. ip.src.ip6 = ipv6_hdr(skb)->saddr;
  157. mdb = br_mdb_ip_get_rcu(br, &ip);
  158. if (mdb)
  159. return mdb;
  160. memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
  161. }
  162. break;
  163. #endif
  164. default:
  165. ip.proto = 0;
  166. ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
  167. }
  168. return br_mdb_ip_get_rcu(br, &ip);
  169. }
  170. /* IMPORTANT: this function must be used only when the contexts cannot be
  171. * passed down (e.g. timer) and must be used for read-only purposes because
  172. * the vlan snooping option can change, so it can return any context
  173. * (non-vlan or vlan). Its initial intended purpose is to read timer values
  174. * from the *current* context based on the option. At worst that could lead
  175. * to inconsistent timers when the contexts are changed, i.e. src timer
  176. * which needs to re-arm with a specific delay taken from the old context
  177. */
  178. static struct net_bridge_mcast_port *
  179. br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
  180. {
  181. struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
  182. struct net_bridge_vlan *vlan;
  183. lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
  184. /* if vlan snooping is disabled use the port's multicast context */
  185. if (!pg->key.addr.vid ||
  186. !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
  187. goto out;
  188. /* locking is tricky here, due to different rules for multicast and
  189. * vlans we need to take rcu to find the vlan and make sure it has
  190. * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
  191. * multicast_lock which must be already held here, so the vlan's pmctx
  192. * can safely be used on return
  193. */
  194. rcu_read_lock();
  195. vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
  196. if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
  197. pmctx = &vlan->port_mcast_ctx;
  198. else
  199. pmctx = NULL;
  200. rcu_read_unlock();
  201. out:
  202. return pmctx;
  203. }
  204. /* when snooping we need to check if the contexts should be used
  205. * in the following order:
  206. * - if pmctx is non-NULL (port), check if it should be used
  207. * - if pmctx is NULL (bridge), check if brmctx should be used
  208. */
  209. static bool
  210. br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
  211. const struct net_bridge_mcast_port *pmctx)
  212. {
  213. if (!netif_running(brmctx->br->dev))
  214. return false;
  215. if (pmctx)
  216. return !br_multicast_port_ctx_state_disabled(pmctx);
  217. else
  218. return !br_multicast_ctx_vlan_disabled(brmctx);
  219. }
  220. static bool br_port_group_equal(struct net_bridge_port_group *p,
  221. struct net_bridge_port *port,
  222. const unsigned char *src)
  223. {
  224. if (p->key.port != port)
  225. return false;
  226. if (!(port->flags & BR_MULTICAST_TO_UNICAST))
  227. return true;
  228. return ether_addr_equal(src, p->eth_addr);
  229. }
  230. static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
  231. struct net_bridge_port_group *pg,
  232. struct br_ip *sg_ip)
  233. {
  234. struct net_bridge_port_group_sg_key sg_key;
  235. struct net_bridge_port_group *src_pg;
  236. struct net_bridge_mcast *brmctx;
  237. memset(&sg_key, 0, sizeof(sg_key));
  238. brmctx = br_multicast_port_ctx_get_global(pmctx);
  239. sg_key.port = pg->key.port;
  240. sg_key.addr = *sg_ip;
  241. if (br_sg_port_find(brmctx->br, &sg_key))
  242. return;
  243. src_pg = __br_multicast_add_group(brmctx, pmctx,
  244. sg_ip, pg->eth_addr,
  245. MCAST_INCLUDE, false, false);
  246. if (IS_ERR_OR_NULL(src_pg) ||
  247. src_pg->rt_protocol != RTPROT_KERNEL)
  248. return;
  249. src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
  250. }
  251. static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
  252. struct br_ip *sg_ip)
  253. {
  254. struct net_bridge_port_group_sg_key sg_key;
  255. struct net_bridge *br = pg->key.port->br;
  256. struct net_bridge_port_group *src_pg;
  257. memset(&sg_key, 0, sizeof(sg_key));
  258. sg_key.port = pg->key.port;
  259. sg_key.addr = *sg_ip;
  260. src_pg = br_sg_port_find(br, &sg_key);
  261. if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
  262. src_pg->rt_protocol != RTPROT_KERNEL)
  263. return;
  264. br_multicast_find_del_pg(br, src_pg);
  265. }
  266. /* When a port group transitions to (or is added as) EXCLUDE we need to add it
  267. * to all other ports' S,G entries which are not blocked by the current group
  268. * for proper replication, the assumption is that any S,G blocked entries
  269. * are already added so the S,G,port lookup should skip them.
  270. * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
  271. * deleted we need to remove it from all ports' S,G entries where it was
  272. * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
  273. */
  274. void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
  275. u8 filter_mode)
  276. {
  277. struct net_bridge *br = pg->key.port->br;
  278. struct net_bridge_port_group *pg_lst;
  279. struct net_bridge_mcast_port *pmctx;
  280. struct net_bridge_mdb_entry *mp;
  281. struct br_ip sg_ip;
  282. if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
  283. return;
  284. mp = br_mdb_ip_get(br, &pg->key.addr);
  285. if (!mp)
  286. return;
  287. pmctx = br_multicast_pg_to_port_ctx(pg);
  288. if (!pmctx)
  289. return;
  290. memset(&sg_ip, 0, sizeof(sg_ip));
  291. sg_ip = pg->key.addr;
  292. for (pg_lst = mlock_dereference(mp->ports, br);
  293. pg_lst;
  294. pg_lst = mlock_dereference(pg_lst->next, br)) {
  295. struct net_bridge_group_src *src_ent;
  296. if (pg_lst == pg)
  297. continue;
  298. hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
  299. if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
  300. continue;
  301. sg_ip.src = src_ent->addr.src;
  302. switch (filter_mode) {
  303. case MCAST_INCLUDE:
  304. __fwd_del_star_excl(pg, &sg_ip);
  305. break;
  306. case MCAST_EXCLUDE:
  307. __fwd_add_star_excl(pmctx, pg, &sg_ip);
  308. break;
  309. }
  310. }
  311. }
  312. }
  313. /* called when adding a new S,G with host_joined == false by default */
  314. static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
  315. struct net_bridge_port_group *sg)
  316. {
  317. struct net_bridge_mdb_entry *sg_mp;
  318. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  319. return;
  320. if (!star_mp->host_joined)
  321. return;
  322. sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
  323. if (!sg_mp)
  324. return;
  325. sg_mp->host_joined = true;
  326. }
  327. /* set the host_joined state of all of *,G's S,G entries */
  328. static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
  329. {
  330. struct net_bridge *br = star_mp->br;
  331. struct net_bridge_mdb_entry *sg_mp;
  332. struct net_bridge_port_group *pg;
  333. struct br_ip sg_ip;
  334. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  335. return;
  336. memset(&sg_ip, 0, sizeof(sg_ip));
  337. sg_ip = star_mp->addr;
  338. for (pg = mlock_dereference(star_mp->ports, br);
  339. pg;
  340. pg = mlock_dereference(pg->next, br)) {
  341. struct net_bridge_group_src *src_ent;
  342. hlist_for_each_entry(src_ent, &pg->src_list, node) {
  343. if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
  344. continue;
  345. sg_ip.src = src_ent->addr.src;
  346. sg_mp = br_mdb_ip_get(br, &sg_ip);
  347. if (!sg_mp)
  348. continue;
  349. sg_mp->host_joined = star_mp->host_joined;
  350. }
  351. }
  352. }
  353. static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
  354. {
  355. struct net_bridge_port_group __rcu **pp;
  356. struct net_bridge_port_group *p;
  357. /* *,G exclude ports are only added to S,G entries */
  358. if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
  359. return;
  360. /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
  361. * we should ignore perm entries since they're managed by user-space
  362. */
  363. for (pp = &sgmp->ports;
  364. (p = mlock_dereference(*pp, sgmp->br)) != NULL;
  365. pp = &p->next)
  366. if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
  367. MDB_PG_FLAGS_PERMANENT)))
  368. return;
  369. /* currently the host can only have joined the *,G which means
  370. * we treat it as EXCLUDE {}, so for an S,G it's considered a
  371. * STAR_EXCLUDE entry and we can safely leave it
  372. */
  373. sgmp->host_joined = false;
  374. for (pp = &sgmp->ports;
  375. (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
  376. if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
  377. br_multicast_del_pg(sgmp, p, pp);
  378. else
  379. pp = &p->next;
  380. }
  381. }
  382. void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
  383. struct net_bridge_port_group *sg)
  384. {
  385. struct net_bridge_port_group_sg_key sg_key;
  386. struct net_bridge *br = star_mp->br;
  387. struct net_bridge_mcast_port *pmctx;
  388. struct net_bridge_port_group *pg;
  389. struct net_bridge_mcast *brmctx;
  390. if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
  391. return;
  392. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  393. return;
  394. br_multicast_sg_host_state(star_mp, sg);
  395. memset(&sg_key, 0, sizeof(sg_key));
  396. sg_key.addr = sg->key.addr;
  397. /* we need to add all exclude ports to the S,G */
  398. for (pg = mlock_dereference(star_mp->ports, br);
  399. pg;
  400. pg = mlock_dereference(pg->next, br)) {
  401. struct net_bridge_port_group *src_pg;
  402. if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
  403. continue;
  404. sg_key.port = pg->key.port;
  405. if (br_sg_port_find(br, &sg_key))
  406. continue;
  407. pmctx = br_multicast_pg_to_port_ctx(pg);
  408. if (!pmctx)
  409. continue;
  410. brmctx = br_multicast_port_ctx_get_global(pmctx);
  411. src_pg = __br_multicast_add_group(brmctx, pmctx,
  412. &sg->key.addr,
  413. sg->eth_addr,
  414. MCAST_INCLUDE, false, false);
  415. if (IS_ERR_OR_NULL(src_pg) ||
  416. src_pg->rt_protocol != RTPROT_KERNEL)
  417. continue;
  418. src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
  419. }
  420. }
  421. static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
  422. {
  423. struct net_bridge_mdb_entry *star_mp;
  424. struct net_bridge_mcast_port *pmctx;
  425. struct net_bridge_port_group *sg;
  426. struct net_bridge_mcast *brmctx;
  427. struct br_ip sg_ip;
  428. if (src->flags & BR_SGRP_F_INSTALLED)
  429. return;
  430. memset(&sg_ip, 0, sizeof(sg_ip));
  431. pmctx = br_multicast_pg_to_port_ctx(src->pg);
  432. if (!pmctx)
  433. return;
  434. brmctx = br_multicast_port_ctx_get_global(pmctx);
  435. sg_ip = src->pg->key.addr;
  436. sg_ip.src = src->addr.src;
  437. sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
  438. src->pg->eth_addr, MCAST_INCLUDE, false,
  439. !timer_pending(&src->timer));
  440. if (IS_ERR_OR_NULL(sg))
  441. return;
  442. src->flags |= BR_SGRP_F_INSTALLED;
  443. sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
  444. /* if it was added by user-space as perm we can skip next steps */
  445. if (sg->rt_protocol != RTPROT_KERNEL &&
  446. (sg->flags & MDB_PG_FLAGS_PERMANENT))
  447. return;
  448. /* the kernel is now responsible for removing this S,G */
  449. del_timer(&sg->timer);
  450. star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
  451. if (!star_mp)
  452. return;
  453. br_multicast_sg_add_exclude_ports(star_mp, sg);
  454. }
  455. static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
  456. bool fastleave)
  457. {
  458. struct net_bridge_port_group *p, *pg = src->pg;
  459. struct net_bridge_port_group __rcu **pp;
  460. struct net_bridge_mdb_entry *mp;
  461. struct br_ip sg_ip;
  462. memset(&sg_ip, 0, sizeof(sg_ip));
  463. sg_ip = pg->key.addr;
  464. sg_ip.src = src->addr.src;
  465. mp = br_mdb_ip_get(src->br, &sg_ip);
  466. if (!mp)
  467. return;
  468. for (pp = &mp->ports;
  469. (p = mlock_dereference(*pp, src->br)) != NULL;
  470. pp = &p->next) {
  471. if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
  472. continue;
  473. if (p->rt_protocol != RTPROT_KERNEL &&
  474. (p->flags & MDB_PG_FLAGS_PERMANENT))
  475. break;
  476. if (fastleave)
  477. p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  478. br_multicast_del_pg(mp, p, pp);
  479. break;
  480. }
  481. src->flags &= ~BR_SGRP_F_INSTALLED;
  482. }
  483. /* install S,G and based on src's timer enable or disable forwarding */
  484. static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
  485. {
  486. struct net_bridge_port_group_sg_key sg_key;
  487. struct net_bridge_port_group *sg;
  488. u8 old_flags;
  489. br_multicast_fwd_src_add(src);
  490. memset(&sg_key, 0, sizeof(sg_key));
  491. sg_key.addr = src->pg->key.addr;
  492. sg_key.addr.src = src->addr.src;
  493. sg_key.port = src->pg->key.port;
  494. sg = br_sg_port_find(src->br, &sg_key);
  495. if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
  496. return;
  497. old_flags = sg->flags;
  498. if (timer_pending(&src->timer))
  499. sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
  500. else
  501. sg->flags |= MDB_PG_FLAGS_BLOCKED;
  502. if (old_flags != sg->flags) {
  503. struct net_bridge_mdb_entry *sg_mp;
  504. sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
  505. if (!sg_mp)
  506. return;
  507. br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
  508. }
  509. }
  510. static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
  511. {
  512. struct net_bridge_mdb_entry *mp;
  513. mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
  514. WARN_ON(!hlist_unhashed(&mp->mdb_node));
  515. WARN_ON(mp->ports);
  516. del_timer_sync(&mp->timer);
  517. kfree_rcu(mp, rcu);
  518. }
  519. static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
  520. {
  521. struct net_bridge *br = mp->br;
  522. rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
  523. br_mdb_rht_params);
  524. hlist_del_init_rcu(&mp->mdb_node);
  525. hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
  526. queue_work(system_long_wq, &br->mcast_gc_work);
  527. }
  528. static void br_multicast_group_expired(struct timer_list *t)
  529. {
  530. struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
  531. struct net_bridge *br = mp->br;
  532. spin_lock(&br->multicast_lock);
  533. if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
  534. timer_pending(&mp->timer))
  535. goto out;
  536. br_multicast_host_leave(mp, true);
  537. if (mp->ports)
  538. goto out;
  539. br_multicast_del_mdb_entry(mp);
  540. out:
  541. spin_unlock(&br->multicast_lock);
  542. }
  543. static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
  544. {
  545. struct net_bridge_group_src *src;
  546. src = container_of(gc, struct net_bridge_group_src, mcast_gc);
  547. WARN_ON(!hlist_unhashed(&src->node));
  548. del_timer_sync(&src->timer);
  549. kfree_rcu(src, rcu);
  550. }
  551. void br_multicast_del_group_src(struct net_bridge_group_src *src,
  552. bool fastleave)
  553. {
  554. struct net_bridge *br = src->pg->key.port->br;
  555. br_multicast_fwd_src_remove(src, fastleave);
  556. hlist_del_init_rcu(&src->node);
  557. src->pg->src_ents--;
  558. hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
  559. queue_work(system_long_wq, &br->mcast_gc_work);
  560. }
  561. static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
  562. {
  563. struct net_bridge_port_group *pg;
  564. pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
  565. WARN_ON(!hlist_unhashed(&pg->mglist));
  566. WARN_ON(!hlist_empty(&pg->src_list));
  567. del_timer_sync(&pg->rexmit_timer);
  568. del_timer_sync(&pg->timer);
  569. kfree_rcu(pg, rcu);
  570. }
  571. void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
  572. struct net_bridge_port_group *pg,
  573. struct net_bridge_port_group __rcu **pp)
  574. {
  575. struct net_bridge *br = pg->key.port->br;
  576. struct net_bridge_group_src *ent;
  577. struct hlist_node *tmp;
  578. rcu_assign_pointer(*pp, pg->next);
  579. hlist_del_init(&pg->mglist);
  580. br_multicast_eht_clean_sets(pg);
  581. hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
  582. br_multicast_del_group_src(ent, false);
  583. br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
  584. if (!br_multicast_is_star_g(&mp->addr)) {
  585. rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
  586. br_sg_port_rht_params);
  587. br_multicast_sg_del_exclude_ports(mp);
  588. } else {
  589. br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
  590. }
  591. hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
  592. queue_work(system_long_wq, &br->mcast_gc_work);
  593. if (!mp->ports && !mp->host_joined && netif_running(br->dev))
  594. mod_timer(&mp->timer, jiffies);
  595. }
  596. static void br_multicast_find_del_pg(struct net_bridge *br,
  597. struct net_bridge_port_group *pg)
  598. {
  599. struct net_bridge_port_group __rcu **pp;
  600. struct net_bridge_mdb_entry *mp;
  601. struct net_bridge_port_group *p;
  602. mp = br_mdb_ip_get(br, &pg->key.addr);
  603. if (WARN_ON(!mp))
  604. return;
  605. for (pp = &mp->ports;
  606. (p = mlock_dereference(*pp, br)) != NULL;
  607. pp = &p->next) {
  608. if (p != pg)
  609. continue;
  610. br_multicast_del_pg(mp, pg, pp);
  611. return;
  612. }
  613. WARN_ON(1);
  614. }
  615. static void br_multicast_port_group_expired(struct timer_list *t)
  616. {
  617. struct net_bridge_port_group *pg = from_timer(pg, t, timer);
  618. struct net_bridge_group_src *src_ent;
  619. struct net_bridge *br = pg->key.port->br;
  620. struct hlist_node *tmp;
  621. bool changed;
  622. spin_lock(&br->multicast_lock);
  623. if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
  624. hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
  625. goto out;
  626. changed = !!(pg->filter_mode == MCAST_EXCLUDE);
  627. pg->filter_mode = MCAST_INCLUDE;
  628. hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
  629. if (!timer_pending(&src_ent->timer)) {
  630. br_multicast_del_group_src(src_ent, false);
  631. changed = true;
  632. }
  633. }
  634. if (hlist_empty(&pg->src_list)) {
  635. br_multicast_find_del_pg(br, pg);
  636. } else if (changed) {
  637. struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
  638. if (changed && br_multicast_is_star_g(&pg->key.addr))
  639. br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
  640. if (WARN_ON(!mp))
  641. goto out;
  642. br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
  643. }
  644. out:
  645. spin_unlock(&br->multicast_lock);
  646. }
  647. static void br_multicast_gc(struct hlist_head *head)
  648. {
  649. struct net_bridge_mcast_gc *gcent;
  650. struct hlist_node *tmp;
  651. hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
  652. hlist_del_init(&gcent->gc_node);
  653. gcent->destroy(gcent);
  654. }
  655. }
  656. static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
  657. struct net_bridge_mcast_port *pmctx,
  658. struct sk_buff *skb)
  659. {
  660. struct net_bridge_vlan *vlan = NULL;
  661. if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
  662. vlan = pmctx->vlan;
  663. else if (br_multicast_ctx_is_vlan(brmctx))
  664. vlan = brmctx->vlan;
  665. if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
  666. u16 vlan_proto;
  667. if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
  668. return;
  669. __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
  670. }
  671. }
  672. static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  673. struct net_bridge_mcast_port *pmctx,
  674. struct net_bridge_port_group *pg,
  675. __be32 ip_dst, __be32 group,
  676. bool with_srcs, bool over_lmqt,
  677. u8 sflag, u8 *igmp_type,
  678. bool *need_rexmit)
  679. {
  680. struct net_bridge_port *p = pg ? pg->key.port : NULL;
  681. struct net_bridge_group_src *ent;
  682. size_t pkt_size, igmp_hdr_size;
  683. unsigned long now = jiffies;
  684. struct igmpv3_query *ihv3;
  685. void *csum_start = NULL;
  686. __sum16 *csum = NULL;
  687. struct sk_buff *skb;
  688. struct igmphdr *ih;
  689. struct ethhdr *eth;
  690. unsigned long lmqt;
  691. struct iphdr *iph;
  692. u16 lmqt_srcs = 0;
  693. igmp_hdr_size = sizeof(*ih);
  694. if (brmctx->multicast_igmp_version == 3) {
  695. igmp_hdr_size = sizeof(*ihv3);
  696. if (pg && with_srcs) {
  697. lmqt = now + (brmctx->multicast_last_member_interval *
  698. brmctx->multicast_last_member_count);
  699. hlist_for_each_entry(ent, &pg->src_list, node) {
  700. if (over_lmqt == time_after(ent->timer.expires,
  701. lmqt) &&
  702. ent->src_query_rexmit_cnt > 0)
  703. lmqt_srcs++;
  704. }
  705. if (!lmqt_srcs)
  706. return NULL;
  707. igmp_hdr_size += lmqt_srcs * sizeof(__be32);
  708. }
  709. }
  710. pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
  711. if ((p && pkt_size > p->dev->mtu) ||
  712. pkt_size > brmctx->br->dev->mtu)
  713. return NULL;
  714. skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
  715. if (!skb)
  716. goto out;
  717. __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
  718. skb->protocol = htons(ETH_P_IP);
  719. skb_reset_mac_header(skb);
  720. eth = eth_hdr(skb);
  721. ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
  722. ip_eth_mc_map(ip_dst, eth->h_dest);
  723. eth->h_proto = htons(ETH_P_IP);
  724. skb_put(skb, sizeof(*eth));
  725. skb_set_network_header(skb, skb->len);
  726. iph = ip_hdr(skb);
  727. iph->tot_len = htons(pkt_size - sizeof(*eth));
  728. iph->version = 4;
  729. iph->ihl = 6;
  730. iph->tos = 0xc0;
  731. iph->id = 0;
  732. iph->frag_off = htons(IP_DF);
  733. iph->ttl = 1;
  734. iph->protocol = IPPROTO_IGMP;
  735. iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
  736. inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
  737. iph->daddr = ip_dst;
  738. ((u8 *)&iph[1])[0] = IPOPT_RA;
  739. ((u8 *)&iph[1])[1] = 4;
  740. ((u8 *)&iph[1])[2] = 0;
  741. ((u8 *)&iph[1])[3] = 0;
  742. ip_send_check(iph);
  743. skb_put(skb, 24);
  744. skb_set_transport_header(skb, skb->len);
  745. *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
  746. switch (brmctx->multicast_igmp_version) {
  747. case 2:
  748. ih = igmp_hdr(skb);
  749. ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
  750. ih->code = (group ? brmctx->multicast_last_member_interval :
  751. brmctx->multicast_query_response_interval) /
  752. (HZ / IGMP_TIMER_SCALE);
  753. ih->group = group;
  754. ih->csum = 0;
  755. csum = &ih->csum;
  756. csum_start = (void *)ih;
  757. break;
  758. case 3:
  759. ihv3 = igmpv3_query_hdr(skb);
  760. ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
  761. ihv3->code = (group ? brmctx->multicast_last_member_interval :
  762. brmctx->multicast_query_response_interval) /
  763. (HZ / IGMP_TIMER_SCALE);
  764. ihv3->group = group;
  765. ihv3->qqic = brmctx->multicast_query_interval / HZ;
  766. ihv3->nsrcs = htons(lmqt_srcs);
  767. ihv3->resv = 0;
  768. ihv3->suppress = sflag;
  769. ihv3->qrv = 2;
  770. ihv3->csum = 0;
  771. csum = &ihv3->csum;
  772. csum_start = (void *)ihv3;
  773. if (!pg || !with_srcs)
  774. break;
  775. lmqt_srcs = 0;
  776. hlist_for_each_entry(ent, &pg->src_list, node) {
  777. if (over_lmqt == time_after(ent->timer.expires,
  778. lmqt) &&
  779. ent->src_query_rexmit_cnt > 0) {
  780. ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
  781. ent->src_query_rexmit_cnt--;
  782. if (need_rexmit && ent->src_query_rexmit_cnt)
  783. *need_rexmit = true;
  784. }
  785. }
  786. if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
  787. kfree_skb(skb);
  788. return NULL;
  789. }
  790. break;
  791. }
  792. if (WARN_ON(!csum || !csum_start)) {
  793. kfree_skb(skb);
  794. return NULL;
  795. }
  796. *csum = ip_compute_csum(csum_start, igmp_hdr_size);
  797. skb_put(skb, igmp_hdr_size);
  798. __skb_pull(skb, sizeof(*eth));
  799. out:
  800. return skb;
  801. }
  802. #if IS_ENABLED(CONFIG_IPV6)
  803. static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  804. struct net_bridge_mcast_port *pmctx,
  805. struct net_bridge_port_group *pg,
  806. const struct in6_addr *ip6_dst,
  807. const struct in6_addr *group,
  808. bool with_srcs, bool over_llqt,
  809. u8 sflag, u8 *igmp_type,
  810. bool *need_rexmit)
  811. {
  812. struct net_bridge_port *p = pg ? pg->key.port : NULL;
  813. struct net_bridge_group_src *ent;
  814. size_t pkt_size, mld_hdr_size;
  815. unsigned long now = jiffies;
  816. struct mld2_query *mld2q;
  817. void *csum_start = NULL;
  818. unsigned long interval;
  819. __sum16 *csum = NULL;
  820. struct ipv6hdr *ip6h;
  821. struct mld_msg *mldq;
  822. struct sk_buff *skb;
  823. unsigned long llqt;
  824. struct ethhdr *eth;
  825. u16 llqt_srcs = 0;
  826. u8 *hopopt;
  827. mld_hdr_size = sizeof(*mldq);
  828. if (brmctx->multicast_mld_version == 2) {
  829. mld_hdr_size = sizeof(*mld2q);
  830. if (pg && with_srcs) {
  831. llqt = now + (brmctx->multicast_last_member_interval *
  832. brmctx->multicast_last_member_count);
  833. hlist_for_each_entry(ent, &pg->src_list, node) {
  834. if (over_llqt == time_after(ent->timer.expires,
  835. llqt) &&
  836. ent->src_query_rexmit_cnt > 0)
  837. llqt_srcs++;
  838. }
  839. if (!llqt_srcs)
  840. return NULL;
  841. mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
  842. }
  843. }
  844. pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
  845. if ((p && pkt_size > p->dev->mtu) ||
  846. pkt_size > brmctx->br->dev->mtu)
  847. return NULL;
  848. skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
  849. if (!skb)
  850. goto out;
  851. __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
  852. skb->protocol = htons(ETH_P_IPV6);
  853. /* Ethernet header */
  854. skb_reset_mac_header(skb);
  855. eth = eth_hdr(skb);
  856. ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
  857. eth->h_proto = htons(ETH_P_IPV6);
  858. skb_put(skb, sizeof(*eth));
  859. /* IPv6 header + HbH option */
  860. skb_set_network_header(skb, skb->len);
  861. ip6h = ipv6_hdr(skb);
  862. *(__force __be32 *)ip6h = htonl(0x60000000);
  863. ip6h->payload_len = htons(8 + mld_hdr_size);
  864. ip6h->nexthdr = IPPROTO_HOPOPTS;
  865. ip6h->hop_limit = 1;
  866. ip6h->daddr = *ip6_dst;
  867. if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
  868. &ip6h->daddr, 0, &ip6h->saddr)) {
  869. kfree_skb(skb);
  870. br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
  871. return NULL;
  872. }
  873. br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
  874. ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
  875. hopopt = (u8 *)(ip6h + 1);
  876. hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
  877. hopopt[1] = 0; /* length of HbH */
  878. hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
  879. hopopt[3] = 2; /* Length of RA Option */
  880. hopopt[4] = 0; /* Type = 0x0000 (MLD) */
  881. hopopt[5] = 0;
  882. hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
  883. hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
  884. skb_put(skb, sizeof(*ip6h) + 8);
  885. /* ICMPv6 */
  886. skb_set_transport_header(skb, skb->len);
  887. interval = ipv6_addr_any(group) ?
  888. brmctx->multicast_query_response_interval :
  889. brmctx->multicast_last_member_interval;
  890. *igmp_type = ICMPV6_MGM_QUERY;
  891. switch (brmctx->multicast_mld_version) {
  892. case 1:
  893. mldq = (struct mld_msg *)icmp6_hdr(skb);
  894. mldq->mld_type = ICMPV6_MGM_QUERY;
  895. mldq->mld_code = 0;
  896. mldq->mld_cksum = 0;
  897. mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
  898. mldq->mld_reserved = 0;
  899. mldq->mld_mca = *group;
  900. csum = &mldq->mld_cksum;
  901. csum_start = (void *)mldq;
  902. break;
  903. case 2:
  904. mld2q = (struct mld2_query *)icmp6_hdr(skb);
  905. mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
  906. mld2q->mld2q_type = ICMPV6_MGM_QUERY;
  907. mld2q->mld2q_code = 0;
  908. mld2q->mld2q_cksum = 0;
  909. mld2q->mld2q_resv1 = 0;
  910. mld2q->mld2q_resv2 = 0;
  911. mld2q->mld2q_suppress = sflag;
  912. mld2q->mld2q_qrv = 2;
  913. mld2q->mld2q_nsrcs = htons(llqt_srcs);
  914. mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
  915. mld2q->mld2q_mca = *group;
  916. csum = &mld2q->mld2q_cksum;
  917. csum_start = (void *)mld2q;
  918. if (!pg || !with_srcs)
  919. break;
  920. llqt_srcs = 0;
  921. hlist_for_each_entry(ent, &pg->src_list, node) {
  922. if (over_llqt == time_after(ent->timer.expires,
  923. llqt) &&
  924. ent->src_query_rexmit_cnt > 0) {
  925. mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
  926. ent->src_query_rexmit_cnt--;
  927. if (need_rexmit && ent->src_query_rexmit_cnt)
  928. *need_rexmit = true;
  929. }
  930. }
  931. if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
  932. kfree_skb(skb);
  933. return NULL;
  934. }
  935. break;
  936. }
  937. if (WARN_ON(!csum || !csum_start)) {
  938. kfree_skb(skb);
  939. return NULL;
  940. }
  941. *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
  942. IPPROTO_ICMPV6,
  943. csum_partial(csum_start, mld_hdr_size, 0));
  944. skb_put(skb, mld_hdr_size);
  945. __skb_pull(skb, sizeof(*eth));
  946. out:
  947. return skb;
  948. }
  949. #endif
  950. static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  951. struct net_bridge_mcast_port *pmctx,
  952. struct net_bridge_port_group *pg,
  953. struct br_ip *ip_dst,
  954. struct br_ip *group,
  955. bool with_srcs, bool over_lmqt,
  956. u8 sflag, u8 *igmp_type,
  957. bool *need_rexmit)
  958. {
  959. __be32 ip4_dst;
  960. switch (group->proto) {
  961. case htons(ETH_P_IP):
  962. ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
  963. return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
  964. ip4_dst, group->dst.ip4,
  965. with_srcs, over_lmqt,
  966. sflag, igmp_type,
  967. need_rexmit);
  968. #if IS_ENABLED(CONFIG_IPV6)
  969. case htons(ETH_P_IPV6): {
  970. struct in6_addr ip6_dst;
  971. if (ip_dst)
  972. ip6_dst = ip_dst->dst.ip6;
  973. else
  974. ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
  975. htonl(1));
  976. return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
  977. &ip6_dst, &group->dst.ip6,
  978. with_srcs, over_lmqt,
  979. sflag, igmp_type,
  980. need_rexmit);
  981. }
  982. #endif
  983. }
  984. return NULL;
  985. }
  986. struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
  987. struct br_ip *group)
  988. {
  989. struct net_bridge_mdb_entry *mp;
  990. int err;
  991. mp = br_mdb_ip_get(br, group);
  992. if (mp)
  993. return mp;
  994. if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
  995. br_mc_disabled_update(br->dev, false, NULL);
  996. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
  997. return ERR_PTR(-E2BIG);
  998. }
  999. mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
  1000. if (unlikely(!mp))
  1001. return ERR_PTR(-ENOMEM);
  1002. mp->br = br;
  1003. mp->addr = *group;
  1004. mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
  1005. timer_setup(&mp->timer, br_multicast_group_expired, 0);
  1006. err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
  1007. br_mdb_rht_params);
  1008. if (err) {
  1009. kfree(mp);
  1010. mp = ERR_PTR(err);
  1011. } else {
  1012. hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
  1013. }
  1014. return mp;
  1015. }
  1016. static void br_multicast_group_src_expired(struct timer_list *t)
  1017. {
  1018. struct net_bridge_group_src *src = from_timer(src, t, timer);
  1019. struct net_bridge_port_group *pg;
  1020. struct net_bridge *br = src->br;
  1021. spin_lock(&br->multicast_lock);
  1022. if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
  1023. timer_pending(&src->timer))
  1024. goto out;
  1025. pg = src->pg;
  1026. if (pg->filter_mode == MCAST_INCLUDE) {
  1027. br_multicast_del_group_src(src, false);
  1028. if (!hlist_empty(&pg->src_list))
  1029. goto out;
  1030. br_multicast_find_del_pg(br, pg);
  1031. } else {
  1032. br_multicast_fwd_src_handle(src);
  1033. }
  1034. out:
  1035. spin_unlock(&br->multicast_lock);
  1036. }
  1037. struct net_bridge_group_src *
  1038. br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
  1039. {
  1040. struct net_bridge_group_src *ent;
  1041. switch (ip->proto) {
  1042. case htons(ETH_P_IP):
  1043. hlist_for_each_entry(ent, &pg->src_list, node)
  1044. if (ip->src.ip4 == ent->addr.src.ip4)
  1045. return ent;
  1046. break;
  1047. #if IS_ENABLED(CONFIG_IPV6)
  1048. case htons(ETH_P_IPV6):
  1049. hlist_for_each_entry(ent, &pg->src_list, node)
  1050. if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
  1051. return ent;
  1052. break;
  1053. #endif
  1054. }
  1055. return NULL;
  1056. }
  1057. static struct net_bridge_group_src *
  1058. br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
  1059. {
  1060. struct net_bridge_group_src *grp_src;
  1061. if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
  1062. return NULL;
  1063. switch (src_ip->proto) {
  1064. case htons(ETH_P_IP):
  1065. if (ipv4_is_zeronet(src_ip->src.ip4) ||
  1066. ipv4_is_multicast(src_ip->src.ip4))
  1067. return NULL;
  1068. break;
  1069. #if IS_ENABLED(CONFIG_IPV6)
  1070. case htons(ETH_P_IPV6):
  1071. if (ipv6_addr_any(&src_ip->src.ip6) ||
  1072. ipv6_addr_is_multicast(&src_ip->src.ip6))
  1073. return NULL;
  1074. break;
  1075. #endif
  1076. }
  1077. grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
  1078. if (unlikely(!grp_src))
  1079. return NULL;
  1080. grp_src->pg = pg;
  1081. grp_src->br = pg->key.port->br;
  1082. grp_src->addr = *src_ip;
  1083. grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
  1084. timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
  1085. hlist_add_head_rcu(&grp_src->node, &pg->src_list);
  1086. pg->src_ents++;
  1087. return grp_src;
  1088. }
  1089. struct net_bridge_port_group *br_multicast_new_port_group(
  1090. struct net_bridge_port *port,
  1091. struct br_ip *group,
  1092. struct net_bridge_port_group __rcu *next,
  1093. unsigned char flags,
  1094. const unsigned char *src,
  1095. u8 filter_mode,
  1096. u8 rt_protocol)
  1097. {
  1098. struct net_bridge_port_group *p;
  1099. p = kzalloc(sizeof(*p), GFP_ATOMIC);
  1100. if (unlikely(!p))
  1101. return NULL;
  1102. p->key.addr = *group;
  1103. p->key.port = port;
  1104. p->flags = flags;
  1105. p->filter_mode = filter_mode;
  1106. p->rt_protocol = rt_protocol;
  1107. p->eht_host_tree = RB_ROOT;
  1108. p->eht_set_tree = RB_ROOT;
  1109. p->mcast_gc.destroy = br_multicast_destroy_port_group;
  1110. INIT_HLIST_HEAD(&p->src_list);
  1111. if (!br_multicast_is_star_g(group) &&
  1112. rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
  1113. br_sg_port_rht_params)) {
  1114. kfree(p);
  1115. return NULL;
  1116. }
  1117. rcu_assign_pointer(p->next, next);
  1118. timer_setup(&p->timer, br_multicast_port_group_expired, 0);
  1119. timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
  1120. hlist_add_head(&p->mglist, &port->mglist);
  1121. if (src)
  1122. memcpy(p->eth_addr, src, ETH_ALEN);
  1123. else
  1124. eth_broadcast_addr(p->eth_addr);
  1125. return p;
  1126. }
  1127. void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
  1128. struct net_bridge_mdb_entry *mp, bool notify)
  1129. {
  1130. if (!mp->host_joined) {
  1131. mp->host_joined = true;
  1132. if (br_multicast_is_star_g(&mp->addr))
  1133. br_multicast_star_g_host_state(mp);
  1134. if (notify)
  1135. br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
  1136. }
  1137. if (br_group_is_l2(&mp->addr))
  1138. return;
  1139. mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
  1140. }
  1141. void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
  1142. {
  1143. if (!mp->host_joined)
  1144. return;
  1145. mp->host_joined = false;
  1146. if (br_multicast_is_star_g(&mp->addr))
  1147. br_multicast_star_g_host_state(mp);
  1148. if (notify)
  1149. br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
  1150. }
  1151. static struct net_bridge_port_group *
  1152. __br_multicast_add_group(struct net_bridge_mcast *brmctx,
  1153. struct net_bridge_mcast_port *pmctx,
  1154. struct br_ip *group,
  1155. const unsigned char *src,
  1156. u8 filter_mode,
  1157. bool igmpv2_mldv1,
  1158. bool blocked)
  1159. {
  1160. struct net_bridge_port_group __rcu **pp;
  1161. struct net_bridge_port_group *p = NULL;
  1162. struct net_bridge_mdb_entry *mp;
  1163. unsigned long now = jiffies;
  1164. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  1165. goto out;
  1166. mp = br_multicast_new_group(brmctx->br, group);
  1167. if (IS_ERR(mp))
  1168. return ERR_CAST(mp);
  1169. if (!pmctx) {
  1170. br_multicast_host_join(brmctx, mp, true);
  1171. goto out;
  1172. }
  1173. for (pp = &mp->ports;
  1174. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  1175. pp = &p->next) {
  1176. if (br_port_group_equal(p, pmctx->port, src))
  1177. goto found;
  1178. if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
  1179. break;
  1180. }
  1181. p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
  1182. filter_mode, RTPROT_KERNEL);
  1183. if (unlikely(!p)) {
  1184. p = ERR_PTR(-ENOMEM);
  1185. goto out;
  1186. }
  1187. rcu_assign_pointer(*pp, p);
  1188. if (blocked)
  1189. p->flags |= MDB_PG_FLAGS_BLOCKED;
  1190. br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
  1191. found:
  1192. if (igmpv2_mldv1)
  1193. mod_timer(&p->timer,
  1194. now + brmctx->multicast_membership_interval);
  1195. out:
  1196. return p;
  1197. }
  1198. static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
  1199. struct net_bridge_mcast_port *pmctx,
  1200. struct br_ip *group,
  1201. const unsigned char *src,
  1202. u8 filter_mode,
  1203. bool igmpv2_mldv1)
  1204. {
  1205. struct net_bridge_port_group *pg;
  1206. int err;
  1207. spin_lock(&brmctx->br->multicast_lock);
  1208. pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
  1209. igmpv2_mldv1, false);
  1210. /* NULL is considered valid for host joined groups */
  1211. err = PTR_ERR_OR_ZERO(pg);
  1212. spin_unlock(&brmctx->br->multicast_lock);
  1213. return err;
  1214. }
  1215. static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
  1216. struct net_bridge_mcast_port *pmctx,
  1217. __be32 group,
  1218. __u16 vid,
  1219. const unsigned char *src,
  1220. bool igmpv2)
  1221. {
  1222. struct br_ip br_group;
  1223. u8 filter_mode;
  1224. if (ipv4_is_local_multicast(group))
  1225. return 0;
  1226. memset(&br_group, 0, sizeof(br_group));
  1227. br_group.dst.ip4 = group;
  1228. br_group.proto = htons(ETH_P_IP);
  1229. br_group.vid = vid;
  1230. filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
  1231. return br_multicast_add_group(brmctx, pmctx, &br_group, src,
  1232. filter_mode, igmpv2);
  1233. }
  1234. #if IS_ENABLED(CONFIG_IPV6)
  1235. static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
  1236. struct net_bridge_mcast_port *pmctx,
  1237. const struct in6_addr *group,
  1238. __u16 vid,
  1239. const unsigned char *src,
  1240. bool mldv1)
  1241. {
  1242. struct br_ip br_group;
  1243. u8 filter_mode;
  1244. if (ipv6_addr_is_ll_all_nodes(group))
  1245. return 0;
  1246. memset(&br_group, 0, sizeof(br_group));
  1247. br_group.dst.ip6 = *group;
  1248. br_group.proto = htons(ETH_P_IPV6);
  1249. br_group.vid = vid;
  1250. filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
  1251. return br_multicast_add_group(brmctx, pmctx, &br_group, src,
  1252. filter_mode, mldv1);
  1253. }
  1254. #endif
  1255. static bool br_multicast_rport_del(struct hlist_node *rlist)
  1256. {
  1257. if (hlist_unhashed(rlist))
  1258. return false;
  1259. hlist_del_init_rcu(rlist);
  1260. return true;
  1261. }
  1262. static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
  1263. {
  1264. return br_multicast_rport_del(&pmctx->ip4_rlist);
  1265. }
  1266. static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
  1267. {
  1268. #if IS_ENABLED(CONFIG_IPV6)
  1269. return br_multicast_rport_del(&pmctx->ip6_rlist);
  1270. #else
  1271. return false;
  1272. #endif
  1273. }
  1274. static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
  1275. struct timer_list *t,
  1276. struct hlist_node *rlist)
  1277. {
  1278. struct net_bridge *br = pmctx->port->br;
  1279. bool del;
  1280. spin_lock(&br->multicast_lock);
  1281. if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  1282. pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
  1283. timer_pending(t))
  1284. goto out;
  1285. del = br_multicast_rport_del(rlist);
  1286. br_multicast_rport_del_notify(pmctx, del);
  1287. out:
  1288. spin_unlock(&br->multicast_lock);
  1289. }
  1290. static void br_ip4_multicast_router_expired(struct timer_list *t)
  1291. {
  1292. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1293. ip4_mc_router_timer);
  1294. br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
  1295. }
  1296. #if IS_ENABLED(CONFIG_IPV6)
  1297. static void br_ip6_multicast_router_expired(struct timer_list *t)
  1298. {
  1299. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1300. ip6_mc_router_timer);
  1301. br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
  1302. }
  1303. #endif
  1304. static void br_mc_router_state_change(struct net_bridge *p,
  1305. bool is_mc_router)
  1306. {
  1307. struct switchdev_attr attr = {
  1308. .orig_dev = p->dev,
  1309. .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
  1310. .flags = SWITCHDEV_F_DEFER,
  1311. .u.mrouter = is_mc_router,
  1312. };
  1313. switchdev_port_attr_set(p->dev, &attr, NULL);
  1314. }
  1315. static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
  1316. struct timer_list *timer)
  1317. {
  1318. spin_lock(&brmctx->br->multicast_lock);
  1319. if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  1320. brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
  1321. br_ip4_multicast_is_router(brmctx) ||
  1322. br_ip6_multicast_is_router(brmctx))
  1323. goto out;
  1324. br_mc_router_state_change(brmctx->br, false);
  1325. out:
  1326. spin_unlock(&brmctx->br->multicast_lock);
  1327. }
  1328. static void br_ip4_multicast_local_router_expired(struct timer_list *t)
  1329. {
  1330. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1331. ip4_mc_router_timer);
  1332. br_multicast_local_router_expired(brmctx, t);
  1333. }
  1334. #if IS_ENABLED(CONFIG_IPV6)
  1335. static void br_ip6_multicast_local_router_expired(struct timer_list *t)
  1336. {
  1337. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1338. ip6_mc_router_timer);
  1339. br_multicast_local_router_expired(brmctx, t);
  1340. }
  1341. #endif
  1342. static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
  1343. struct bridge_mcast_own_query *query)
  1344. {
  1345. spin_lock(&brmctx->br->multicast_lock);
  1346. if (!netif_running(brmctx->br->dev) ||
  1347. br_multicast_ctx_vlan_global_disabled(brmctx) ||
  1348. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1349. goto out;
  1350. br_multicast_start_querier(brmctx, query);
  1351. out:
  1352. spin_unlock(&brmctx->br->multicast_lock);
  1353. }
  1354. static void br_ip4_multicast_querier_expired(struct timer_list *t)
  1355. {
  1356. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1357. ip4_other_query.timer);
  1358. br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
  1359. }
  1360. #if IS_ENABLED(CONFIG_IPV6)
  1361. static void br_ip6_multicast_querier_expired(struct timer_list *t)
  1362. {
  1363. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1364. ip6_other_query.timer);
  1365. br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
  1366. }
  1367. #endif
  1368. static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
  1369. struct br_ip *ip,
  1370. struct sk_buff *skb)
  1371. {
  1372. if (ip->proto == htons(ETH_P_IP))
  1373. brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
  1374. #if IS_ENABLED(CONFIG_IPV6)
  1375. else
  1376. brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
  1377. #endif
  1378. }
  1379. static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
  1380. struct net_bridge_mcast_port *pmctx,
  1381. struct net_bridge_port_group *pg,
  1382. struct br_ip *ip_dst,
  1383. struct br_ip *group,
  1384. bool with_srcs,
  1385. u8 sflag,
  1386. bool *need_rexmit)
  1387. {
  1388. bool over_lmqt = !!sflag;
  1389. struct sk_buff *skb;
  1390. u8 igmp_type;
  1391. if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
  1392. !br_multicast_ctx_matches_vlan_snooping(brmctx))
  1393. return;
  1394. again_under_lmqt:
  1395. skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
  1396. with_srcs, over_lmqt, sflag, &igmp_type,
  1397. need_rexmit);
  1398. if (!skb)
  1399. return;
  1400. if (pmctx) {
  1401. skb->dev = pmctx->port->dev;
  1402. br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
  1403. BR_MCAST_DIR_TX);
  1404. NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
  1405. dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
  1406. br_dev_queue_push_xmit);
  1407. if (over_lmqt && with_srcs && sflag) {
  1408. over_lmqt = false;
  1409. goto again_under_lmqt;
  1410. }
  1411. } else {
  1412. br_multicast_select_own_querier(brmctx, group, skb);
  1413. br_multicast_count(brmctx->br, NULL, skb, igmp_type,
  1414. BR_MCAST_DIR_RX);
  1415. netif_rx(skb);
  1416. }
  1417. }
  1418. static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
  1419. struct bridge_mcast_querier *dest)
  1420. {
  1421. unsigned int seq;
  1422. memset(dest, 0, sizeof(*dest));
  1423. do {
  1424. seq = read_seqcount_begin(&querier->seq);
  1425. dest->port_ifidx = querier->port_ifidx;
  1426. memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
  1427. } while (read_seqcount_retry(&querier->seq, seq));
  1428. }
  1429. static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
  1430. struct bridge_mcast_querier *querier,
  1431. int ifindex,
  1432. struct br_ip *saddr)
  1433. {
  1434. write_seqcount_begin(&querier->seq);
  1435. querier->port_ifidx = ifindex;
  1436. memcpy(&querier->addr, saddr, sizeof(*saddr));
  1437. write_seqcount_end(&querier->seq);
  1438. }
  1439. static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
  1440. struct net_bridge_mcast_port *pmctx,
  1441. struct bridge_mcast_own_query *own_query)
  1442. {
  1443. struct bridge_mcast_other_query *other_query = NULL;
  1444. struct bridge_mcast_querier *querier;
  1445. struct br_ip br_group;
  1446. unsigned long time;
  1447. if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
  1448. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
  1449. !brmctx->multicast_querier)
  1450. return;
  1451. memset(&br_group.dst, 0, sizeof(br_group.dst));
  1452. if (pmctx ? (own_query == &pmctx->ip4_own_query) :
  1453. (own_query == &brmctx->ip4_own_query)) {
  1454. querier = &brmctx->ip4_querier;
  1455. other_query = &brmctx->ip4_other_query;
  1456. br_group.proto = htons(ETH_P_IP);
  1457. #if IS_ENABLED(CONFIG_IPV6)
  1458. } else {
  1459. querier = &brmctx->ip6_querier;
  1460. other_query = &brmctx->ip6_other_query;
  1461. br_group.proto = htons(ETH_P_IPV6);
  1462. #endif
  1463. }
  1464. if (!other_query || timer_pending(&other_query->timer))
  1465. return;
  1466. /* we're about to select ourselves as querier */
  1467. if (!pmctx && querier->port_ifidx) {
  1468. struct br_ip zeroip = {};
  1469. br_multicast_update_querier(brmctx, querier, 0, &zeroip);
  1470. }
  1471. __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
  1472. 0, NULL);
  1473. time = jiffies;
  1474. time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
  1475. brmctx->multicast_startup_query_interval :
  1476. brmctx->multicast_query_interval;
  1477. mod_timer(&own_query->timer, time);
  1478. }
  1479. static void
  1480. br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
  1481. struct bridge_mcast_own_query *query)
  1482. {
  1483. struct net_bridge *br = pmctx->port->br;
  1484. struct net_bridge_mcast *brmctx;
  1485. spin_lock(&br->multicast_lock);
  1486. if (br_multicast_port_ctx_state_stopped(pmctx))
  1487. goto out;
  1488. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1489. if (query->startup_sent < brmctx->multicast_startup_query_count)
  1490. query->startup_sent++;
  1491. br_multicast_send_query(brmctx, pmctx, query);
  1492. out:
  1493. spin_unlock(&br->multicast_lock);
  1494. }
  1495. static void br_ip4_multicast_port_query_expired(struct timer_list *t)
  1496. {
  1497. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1498. ip4_own_query.timer);
  1499. br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
  1500. }
  1501. #if IS_ENABLED(CONFIG_IPV6)
  1502. static void br_ip6_multicast_port_query_expired(struct timer_list *t)
  1503. {
  1504. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1505. ip6_own_query.timer);
  1506. br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
  1507. }
  1508. #endif
  1509. static void br_multicast_port_group_rexmit(struct timer_list *t)
  1510. {
  1511. struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
  1512. struct bridge_mcast_other_query *other_query = NULL;
  1513. struct net_bridge *br = pg->key.port->br;
  1514. struct net_bridge_mcast_port *pmctx;
  1515. struct net_bridge_mcast *brmctx;
  1516. bool need_rexmit = false;
  1517. spin_lock(&br->multicast_lock);
  1518. if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
  1519. !br_opt_get(br, BROPT_MULTICAST_ENABLED))
  1520. goto out;
  1521. pmctx = br_multicast_pg_to_port_ctx(pg);
  1522. if (!pmctx)
  1523. goto out;
  1524. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1525. if (!brmctx->multicast_querier)
  1526. goto out;
  1527. if (pg->key.addr.proto == htons(ETH_P_IP))
  1528. other_query = &brmctx->ip4_other_query;
  1529. #if IS_ENABLED(CONFIG_IPV6)
  1530. else
  1531. other_query = &brmctx->ip6_other_query;
  1532. #endif
  1533. if (!other_query || timer_pending(&other_query->timer))
  1534. goto out;
  1535. if (pg->grp_query_rexmit_cnt) {
  1536. pg->grp_query_rexmit_cnt--;
  1537. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1538. &pg->key.addr, false, 1, NULL);
  1539. }
  1540. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1541. &pg->key.addr, true, 0, &need_rexmit);
  1542. if (pg->grp_query_rexmit_cnt || need_rexmit)
  1543. mod_timer(&pg->rexmit_timer, jiffies +
  1544. brmctx->multicast_last_member_interval);
  1545. out:
  1546. spin_unlock(&br->multicast_lock);
  1547. }
  1548. static int br_mc_disabled_update(struct net_device *dev, bool value,
  1549. struct netlink_ext_ack *extack)
  1550. {
  1551. struct switchdev_attr attr = {
  1552. .orig_dev = dev,
  1553. .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
  1554. .flags = SWITCHDEV_F_DEFER,
  1555. .u.mc_disabled = !value,
  1556. };
  1557. return switchdev_port_attr_set(dev, &attr, extack);
  1558. }
  1559. void br_multicast_port_ctx_init(struct net_bridge_port *port,
  1560. struct net_bridge_vlan *vlan,
  1561. struct net_bridge_mcast_port *pmctx)
  1562. {
  1563. pmctx->port = port;
  1564. pmctx->vlan = vlan;
  1565. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  1566. timer_setup(&pmctx->ip4_mc_router_timer,
  1567. br_ip4_multicast_router_expired, 0);
  1568. timer_setup(&pmctx->ip4_own_query.timer,
  1569. br_ip4_multicast_port_query_expired, 0);
  1570. #if IS_ENABLED(CONFIG_IPV6)
  1571. timer_setup(&pmctx->ip6_mc_router_timer,
  1572. br_ip6_multicast_router_expired, 0);
  1573. timer_setup(&pmctx->ip6_own_query.timer,
  1574. br_ip6_multicast_port_query_expired, 0);
  1575. #endif
  1576. }
  1577. void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
  1578. {
  1579. #if IS_ENABLED(CONFIG_IPV6)
  1580. del_timer_sync(&pmctx->ip6_mc_router_timer);
  1581. #endif
  1582. del_timer_sync(&pmctx->ip4_mc_router_timer);
  1583. }
  1584. int br_multicast_add_port(struct net_bridge_port *port)
  1585. {
  1586. int err;
  1587. port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
  1588. br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
  1589. err = br_mc_disabled_update(port->dev,
  1590. br_opt_get(port->br,
  1591. BROPT_MULTICAST_ENABLED),
  1592. NULL);
  1593. if (err && err != -EOPNOTSUPP)
  1594. return err;
  1595. port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
  1596. if (!port->mcast_stats)
  1597. return -ENOMEM;
  1598. return 0;
  1599. }
  1600. void br_multicast_del_port(struct net_bridge_port *port)
  1601. {
  1602. struct net_bridge *br = port->br;
  1603. struct net_bridge_port_group *pg;
  1604. HLIST_HEAD(deleted_head);
  1605. struct hlist_node *n;
  1606. /* Take care of the remaining groups, only perm ones should be left */
  1607. spin_lock_bh(&br->multicast_lock);
  1608. hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
  1609. br_multicast_find_del_pg(br, pg);
  1610. hlist_move_list(&br->mcast_gc_list, &deleted_head);
  1611. spin_unlock_bh(&br->multicast_lock);
  1612. br_multicast_gc(&deleted_head);
  1613. br_multicast_port_ctx_deinit(&port->multicast_ctx);
  1614. free_percpu(port->mcast_stats);
  1615. }
  1616. static void br_multicast_enable(struct bridge_mcast_own_query *query)
  1617. {
  1618. query->startup_sent = 0;
  1619. if (try_to_del_timer_sync(&query->timer) >= 0 ||
  1620. del_timer(&query->timer))
  1621. mod_timer(&query->timer, jiffies);
  1622. }
  1623. static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1624. {
  1625. struct net_bridge *br = pmctx->port->br;
  1626. struct net_bridge_mcast *brmctx;
  1627. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1628. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
  1629. !netif_running(br->dev))
  1630. return;
  1631. br_multicast_enable(&pmctx->ip4_own_query);
  1632. #if IS_ENABLED(CONFIG_IPV6)
  1633. br_multicast_enable(&pmctx->ip6_own_query);
  1634. #endif
  1635. if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
  1636. br_ip4_multicast_add_router(brmctx, pmctx);
  1637. br_ip6_multicast_add_router(brmctx, pmctx);
  1638. }
  1639. }
  1640. void br_multicast_enable_port(struct net_bridge_port *port)
  1641. {
  1642. struct net_bridge *br = port->br;
  1643. spin_lock_bh(&br->multicast_lock);
  1644. __br_multicast_enable_port_ctx(&port->multicast_ctx);
  1645. spin_unlock_bh(&br->multicast_lock);
  1646. }
  1647. static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1648. {
  1649. struct net_bridge_port_group *pg;
  1650. struct hlist_node *n;
  1651. bool del = false;
  1652. hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
  1653. if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
  1654. (!br_multicast_port_ctx_is_vlan(pmctx) ||
  1655. pg->key.addr.vid == pmctx->vlan->vid))
  1656. br_multicast_find_del_pg(pmctx->port->br, pg);
  1657. del |= br_ip4_multicast_rport_del(pmctx);
  1658. del_timer(&pmctx->ip4_mc_router_timer);
  1659. del_timer(&pmctx->ip4_own_query.timer);
  1660. del |= br_ip6_multicast_rport_del(pmctx);
  1661. #if IS_ENABLED(CONFIG_IPV6)
  1662. del_timer(&pmctx->ip6_mc_router_timer);
  1663. del_timer(&pmctx->ip6_own_query.timer);
  1664. #endif
  1665. br_multicast_rport_del_notify(pmctx, del);
  1666. }
  1667. void br_multicast_disable_port(struct net_bridge_port *port)
  1668. {
  1669. spin_lock_bh(&port->br->multicast_lock);
  1670. __br_multicast_disable_port_ctx(&port->multicast_ctx);
  1671. spin_unlock_bh(&port->br->multicast_lock);
  1672. }
  1673. static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
  1674. {
  1675. struct net_bridge_group_src *ent;
  1676. struct hlist_node *tmp;
  1677. int deleted = 0;
  1678. hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
  1679. if (ent->flags & BR_SGRP_F_DELETE) {
  1680. br_multicast_del_group_src(ent, false);
  1681. deleted++;
  1682. }
  1683. return deleted;
  1684. }
  1685. static void __grp_src_mod_timer(struct net_bridge_group_src *src,
  1686. unsigned long expires)
  1687. {
  1688. mod_timer(&src->timer, expires);
  1689. br_multicast_fwd_src_handle(src);
  1690. }
  1691. static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
  1692. struct net_bridge_mcast_port *pmctx,
  1693. struct net_bridge_port_group *pg)
  1694. {
  1695. struct bridge_mcast_other_query *other_query = NULL;
  1696. u32 lmqc = brmctx->multicast_last_member_count;
  1697. unsigned long lmqt, lmi, now = jiffies;
  1698. struct net_bridge_group_src *ent;
  1699. if (!netif_running(brmctx->br->dev) ||
  1700. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1701. return;
  1702. if (pg->key.addr.proto == htons(ETH_P_IP))
  1703. other_query = &brmctx->ip4_other_query;
  1704. #if IS_ENABLED(CONFIG_IPV6)
  1705. else
  1706. other_query = &brmctx->ip6_other_query;
  1707. #endif
  1708. lmqt = now + br_multicast_lmqt(brmctx);
  1709. hlist_for_each_entry(ent, &pg->src_list, node) {
  1710. if (ent->flags & BR_SGRP_F_SEND) {
  1711. ent->flags &= ~BR_SGRP_F_SEND;
  1712. if (ent->timer.expires > lmqt) {
  1713. if (brmctx->multicast_querier &&
  1714. other_query &&
  1715. !timer_pending(&other_query->timer))
  1716. ent->src_query_rexmit_cnt = lmqc;
  1717. __grp_src_mod_timer(ent, lmqt);
  1718. }
  1719. }
  1720. }
  1721. if (!brmctx->multicast_querier ||
  1722. !other_query || timer_pending(&other_query->timer))
  1723. return;
  1724. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1725. &pg->key.addr, true, 1, NULL);
  1726. lmi = now + brmctx->multicast_last_member_interval;
  1727. if (!timer_pending(&pg->rexmit_timer) ||
  1728. time_after(pg->rexmit_timer.expires, lmi))
  1729. mod_timer(&pg->rexmit_timer, lmi);
  1730. }
  1731. static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
  1732. struct net_bridge_mcast_port *pmctx,
  1733. struct net_bridge_port_group *pg)
  1734. {
  1735. struct bridge_mcast_other_query *other_query = NULL;
  1736. unsigned long now = jiffies, lmi;
  1737. if (!netif_running(brmctx->br->dev) ||
  1738. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1739. return;
  1740. if (pg->key.addr.proto == htons(ETH_P_IP))
  1741. other_query = &brmctx->ip4_other_query;
  1742. #if IS_ENABLED(CONFIG_IPV6)
  1743. else
  1744. other_query = &brmctx->ip6_other_query;
  1745. #endif
  1746. if (brmctx->multicast_querier &&
  1747. other_query && !timer_pending(&other_query->timer)) {
  1748. lmi = now + brmctx->multicast_last_member_interval;
  1749. pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
  1750. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1751. &pg->key.addr, false, 0, NULL);
  1752. if (!timer_pending(&pg->rexmit_timer) ||
  1753. time_after(pg->rexmit_timer.expires, lmi))
  1754. mod_timer(&pg->rexmit_timer, lmi);
  1755. }
  1756. if (pg->filter_mode == MCAST_EXCLUDE &&
  1757. (!timer_pending(&pg->timer) ||
  1758. time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
  1759. mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
  1760. }
  1761. /* State Msg type New state Actions
  1762. * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
  1763. * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
  1764. * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
  1765. */
  1766. static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
  1767. struct net_bridge_port_group *pg, void *h_addr,
  1768. void *srcs, u32 nsrcs, size_t addr_size,
  1769. int grec_type)
  1770. {
  1771. struct net_bridge_group_src *ent;
  1772. unsigned long now = jiffies;
  1773. bool changed = false;
  1774. struct br_ip src_ip;
  1775. u32 src_idx;
  1776. memset(&src_ip, 0, sizeof(src_ip));
  1777. src_ip.proto = pg->key.addr.proto;
  1778. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1779. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1780. ent = br_multicast_find_group_src(pg, &src_ip);
  1781. if (!ent) {
  1782. ent = br_multicast_new_group_src(pg, &src_ip);
  1783. if (ent)
  1784. changed = true;
  1785. }
  1786. if (ent)
  1787. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  1788. }
  1789. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1790. grec_type))
  1791. changed = true;
  1792. return changed;
  1793. }
  1794. /* State Msg type New state Actions
  1795. * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  1796. * Delete (A-B)
  1797. * Group Timer=GMI
  1798. */
  1799. static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
  1800. struct net_bridge_port_group *pg, void *h_addr,
  1801. void *srcs, u32 nsrcs, size_t addr_size,
  1802. int grec_type)
  1803. {
  1804. struct net_bridge_group_src *ent;
  1805. struct br_ip src_ip;
  1806. u32 src_idx;
  1807. hlist_for_each_entry(ent, &pg->src_list, node)
  1808. ent->flags |= BR_SGRP_F_DELETE;
  1809. memset(&src_ip, 0, sizeof(src_ip));
  1810. src_ip.proto = pg->key.addr.proto;
  1811. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1812. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1813. ent = br_multicast_find_group_src(pg, &src_ip);
  1814. if (ent)
  1815. ent->flags &= ~BR_SGRP_F_DELETE;
  1816. else
  1817. ent = br_multicast_new_group_src(pg, &src_ip);
  1818. if (ent)
  1819. br_multicast_fwd_src_handle(ent);
  1820. }
  1821. br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1822. grec_type);
  1823. __grp_src_delete_marked(pg);
  1824. }
  1825. /* State Msg type New state Actions
  1826. * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
  1827. * Delete (X-A)
  1828. * Delete (Y-A)
  1829. * Group Timer=GMI
  1830. */
  1831. static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
  1832. struct net_bridge_port_group *pg, void *h_addr,
  1833. void *srcs, u32 nsrcs, size_t addr_size,
  1834. int grec_type)
  1835. {
  1836. struct net_bridge_group_src *ent;
  1837. unsigned long now = jiffies;
  1838. bool changed = false;
  1839. struct br_ip src_ip;
  1840. u32 src_idx;
  1841. hlist_for_each_entry(ent, &pg->src_list, node)
  1842. ent->flags |= BR_SGRP_F_DELETE;
  1843. memset(&src_ip, 0, sizeof(src_ip));
  1844. src_ip.proto = pg->key.addr.proto;
  1845. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1846. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1847. ent = br_multicast_find_group_src(pg, &src_ip);
  1848. if (ent) {
  1849. ent->flags &= ~BR_SGRP_F_DELETE;
  1850. } else {
  1851. ent = br_multicast_new_group_src(pg, &src_ip);
  1852. if (ent) {
  1853. __grp_src_mod_timer(ent,
  1854. now + br_multicast_gmi(brmctx));
  1855. changed = true;
  1856. }
  1857. }
  1858. }
  1859. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1860. grec_type))
  1861. changed = true;
  1862. if (__grp_src_delete_marked(pg))
  1863. changed = true;
  1864. return changed;
  1865. }
  1866. static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
  1867. struct net_bridge_port_group *pg, void *h_addr,
  1868. void *srcs, u32 nsrcs, size_t addr_size,
  1869. int grec_type)
  1870. {
  1871. bool changed = false;
  1872. switch (pg->filter_mode) {
  1873. case MCAST_INCLUDE:
  1874. __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1875. grec_type);
  1876. br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
  1877. changed = true;
  1878. break;
  1879. case MCAST_EXCLUDE:
  1880. changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
  1881. addr_size, grec_type);
  1882. break;
  1883. }
  1884. pg->filter_mode = MCAST_EXCLUDE;
  1885. mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
  1886. return changed;
  1887. }
  1888. /* State Msg type New state Actions
  1889. * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
  1890. * Send Q(G,A-B)
  1891. */
  1892. static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
  1893. struct net_bridge_mcast_port *pmctx,
  1894. struct net_bridge_port_group *pg, void *h_addr,
  1895. void *srcs, u32 nsrcs, size_t addr_size,
  1896. int grec_type)
  1897. {
  1898. u32 src_idx, to_send = pg->src_ents;
  1899. struct net_bridge_group_src *ent;
  1900. unsigned long now = jiffies;
  1901. bool changed = false;
  1902. struct br_ip src_ip;
  1903. hlist_for_each_entry(ent, &pg->src_list, node)
  1904. ent->flags |= BR_SGRP_F_SEND;
  1905. memset(&src_ip, 0, sizeof(src_ip));
  1906. src_ip.proto = pg->key.addr.proto;
  1907. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1908. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1909. ent = br_multicast_find_group_src(pg, &src_ip);
  1910. if (ent) {
  1911. ent->flags &= ~BR_SGRP_F_SEND;
  1912. to_send--;
  1913. } else {
  1914. ent = br_multicast_new_group_src(pg, &src_ip);
  1915. if (ent)
  1916. changed = true;
  1917. }
  1918. if (ent)
  1919. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  1920. }
  1921. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1922. grec_type))
  1923. changed = true;
  1924. if (to_send)
  1925. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  1926. return changed;
  1927. }
  1928. /* State Msg type New state Actions
  1929. * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
  1930. * Send Q(G,X-A)
  1931. * Send Q(G)
  1932. */
  1933. static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
  1934. struct net_bridge_mcast_port *pmctx,
  1935. struct net_bridge_port_group *pg, void *h_addr,
  1936. void *srcs, u32 nsrcs, size_t addr_size,
  1937. int grec_type)
  1938. {
  1939. u32 src_idx, to_send = pg->src_ents;
  1940. struct net_bridge_group_src *ent;
  1941. unsigned long now = jiffies;
  1942. bool changed = false;
  1943. struct br_ip src_ip;
  1944. hlist_for_each_entry(ent, &pg->src_list, node)
  1945. if (timer_pending(&ent->timer))
  1946. ent->flags |= BR_SGRP_F_SEND;
  1947. memset(&src_ip, 0, sizeof(src_ip));
  1948. src_ip.proto = pg->key.addr.proto;
  1949. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1950. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1951. ent = br_multicast_find_group_src(pg, &src_ip);
  1952. if (ent) {
  1953. if (timer_pending(&ent->timer)) {
  1954. ent->flags &= ~BR_SGRP_F_SEND;
  1955. to_send--;
  1956. }
  1957. } else {
  1958. ent = br_multicast_new_group_src(pg, &src_ip);
  1959. if (ent)
  1960. changed = true;
  1961. }
  1962. if (ent)
  1963. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  1964. }
  1965. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1966. grec_type))
  1967. changed = true;
  1968. if (to_send)
  1969. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  1970. __grp_send_query_and_rexmit(brmctx, pmctx, pg);
  1971. return changed;
  1972. }
  1973. static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
  1974. struct net_bridge_mcast_port *pmctx,
  1975. struct net_bridge_port_group *pg, void *h_addr,
  1976. void *srcs, u32 nsrcs, size_t addr_size,
  1977. int grec_type)
  1978. {
  1979. bool changed = false;
  1980. switch (pg->filter_mode) {
  1981. case MCAST_INCLUDE:
  1982. changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
  1983. nsrcs, addr_size, grec_type);
  1984. break;
  1985. case MCAST_EXCLUDE:
  1986. changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
  1987. nsrcs, addr_size, grec_type);
  1988. break;
  1989. }
  1990. if (br_multicast_eht_should_del_pg(pg)) {
  1991. pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  1992. br_multicast_find_del_pg(pg->key.port->br, pg);
  1993. /* a notification has already been sent and we shouldn't
  1994. * access pg after the delete so we have to return false
  1995. */
  1996. changed = false;
  1997. }
  1998. return changed;
  1999. }
  2000. /* State Msg type New state Actions
  2001. * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  2002. * Delete (A-B)
  2003. * Send Q(G,A*B)
  2004. * Group Timer=GMI
  2005. */
  2006. static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
  2007. struct net_bridge_mcast_port *pmctx,
  2008. struct net_bridge_port_group *pg, void *h_addr,
  2009. void *srcs, u32 nsrcs, size_t addr_size,
  2010. int grec_type)
  2011. {
  2012. struct net_bridge_group_src *ent;
  2013. u32 src_idx, to_send = 0;
  2014. struct br_ip src_ip;
  2015. hlist_for_each_entry(ent, &pg->src_list, node)
  2016. ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
  2017. memset(&src_ip, 0, sizeof(src_ip));
  2018. src_ip.proto = pg->key.addr.proto;
  2019. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2020. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2021. ent = br_multicast_find_group_src(pg, &src_ip);
  2022. if (ent) {
  2023. ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
  2024. BR_SGRP_F_SEND;
  2025. to_send++;
  2026. } else {
  2027. ent = br_multicast_new_group_src(pg, &src_ip);
  2028. }
  2029. if (ent)
  2030. br_multicast_fwd_src_handle(ent);
  2031. }
  2032. br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2033. grec_type);
  2034. __grp_src_delete_marked(pg);
  2035. if (to_send)
  2036. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2037. }
  2038. /* State Msg type New state Actions
  2039. * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
  2040. * Delete (X-A)
  2041. * Delete (Y-A)
  2042. * Send Q(G,A-Y)
  2043. * Group Timer=GMI
  2044. */
  2045. static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
  2046. struct net_bridge_mcast_port *pmctx,
  2047. struct net_bridge_port_group *pg, void *h_addr,
  2048. void *srcs, u32 nsrcs, size_t addr_size,
  2049. int grec_type)
  2050. {
  2051. struct net_bridge_group_src *ent;
  2052. u32 src_idx, to_send = 0;
  2053. bool changed = false;
  2054. struct br_ip src_ip;
  2055. hlist_for_each_entry(ent, &pg->src_list, node)
  2056. ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
  2057. memset(&src_ip, 0, sizeof(src_ip));
  2058. src_ip.proto = pg->key.addr.proto;
  2059. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2060. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2061. ent = br_multicast_find_group_src(pg, &src_ip);
  2062. if (ent) {
  2063. ent->flags &= ~BR_SGRP_F_DELETE;
  2064. } else {
  2065. ent = br_multicast_new_group_src(pg, &src_ip);
  2066. if (ent) {
  2067. __grp_src_mod_timer(ent, pg->timer.expires);
  2068. changed = true;
  2069. }
  2070. }
  2071. if (ent && timer_pending(&ent->timer)) {
  2072. ent->flags |= BR_SGRP_F_SEND;
  2073. to_send++;
  2074. }
  2075. }
  2076. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2077. grec_type))
  2078. changed = true;
  2079. if (__grp_src_delete_marked(pg))
  2080. changed = true;
  2081. if (to_send)
  2082. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2083. return changed;
  2084. }
  2085. static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
  2086. struct net_bridge_mcast_port *pmctx,
  2087. struct net_bridge_port_group *pg, void *h_addr,
  2088. void *srcs, u32 nsrcs, size_t addr_size,
  2089. int grec_type)
  2090. {
  2091. bool changed = false;
  2092. switch (pg->filter_mode) {
  2093. case MCAST_INCLUDE:
  2094. __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
  2095. addr_size, grec_type);
  2096. br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
  2097. changed = true;
  2098. break;
  2099. case MCAST_EXCLUDE:
  2100. changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
  2101. nsrcs, addr_size, grec_type);
  2102. break;
  2103. }
  2104. pg->filter_mode = MCAST_EXCLUDE;
  2105. mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
  2106. return changed;
  2107. }
  2108. /* State Msg type New state Actions
  2109. * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
  2110. */
  2111. static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
  2112. struct net_bridge_mcast_port *pmctx,
  2113. struct net_bridge_port_group *pg, void *h_addr,
  2114. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2115. {
  2116. struct net_bridge_group_src *ent;
  2117. u32 src_idx, to_send = 0;
  2118. bool changed = false;
  2119. struct br_ip src_ip;
  2120. hlist_for_each_entry(ent, &pg->src_list, node)
  2121. ent->flags &= ~BR_SGRP_F_SEND;
  2122. memset(&src_ip, 0, sizeof(src_ip));
  2123. src_ip.proto = pg->key.addr.proto;
  2124. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2125. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2126. ent = br_multicast_find_group_src(pg, &src_ip);
  2127. if (ent) {
  2128. ent->flags |= BR_SGRP_F_SEND;
  2129. to_send++;
  2130. }
  2131. }
  2132. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2133. grec_type))
  2134. changed = true;
  2135. if (to_send)
  2136. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2137. return changed;
  2138. }
  2139. /* State Msg type New state Actions
  2140. * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
  2141. * Send Q(G,A-Y)
  2142. */
  2143. static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
  2144. struct net_bridge_mcast_port *pmctx,
  2145. struct net_bridge_port_group *pg, void *h_addr,
  2146. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2147. {
  2148. struct net_bridge_group_src *ent;
  2149. u32 src_idx, to_send = 0;
  2150. bool changed = false;
  2151. struct br_ip src_ip;
  2152. hlist_for_each_entry(ent, &pg->src_list, node)
  2153. ent->flags &= ~BR_SGRP_F_SEND;
  2154. memset(&src_ip, 0, sizeof(src_ip));
  2155. src_ip.proto = pg->key.addr.proto;
  2156. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2157. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2158. ent = br_multicast_find_group_src(pg, &src_ip);
  2159. if (!ent) {
  2160. ent = br_multicast_new_group_src(pg, &src_ip);
  2161. if (ent) {
  2162. __grp_src_mod_timer(ent, pg->timer.expires);
  2163. changed = true;
  2164. }
  2165. }
  2166. if (ent && timer_pending(&ent->timer)) {
  2167. ent->flags |= BR_SGRP_F_SEND;
  2168. to_send++;
  2169. }
  2170. }
  2171. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2172. grec_type))
  2173. changed = true;
  2174. if (to_send)
  2175. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2176. return changed;
  2177. }
  2178. static bool br_multicast_block(struct net_bridge_mcast *brmctx,
  2179. struct net_bridge_mcast_port *pmctx,
  2180. struct net_bridge_port_group *pg, void *h_addr,
  2181. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2182. {
  2183. bool changed = false;
  2184. switch (pg->filter_mode) {
  2185. case MCAST_INCLUDE:
  2186. changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
  2187. nsrcs, addr_size, grec_type);
  2188. break;
  2189. case MCAST_EXCLUDE:
  2190. changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
  2191. nsrcs, addr_size, grec_type);
  2192. break;
  2193. }
  2194. if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
  2195. br_multicast_eht_should_del_pg(pg)) {
  2196. if (br_multicast_eht_should_del_pg(pg))
  2197. pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  2198. br_multicast_find_del_pg(pg->key.port->br, pg);
  2199. /* a notification has already been sent and we shouldn't
  2200. * access pg after the delete so we have to return false
  2201. */
  2202. changed = false;
  2203. }
  2204. return changed;
  2205. }
  2206. static struct net_bridge_port_group *
  2207. br_multicast_find_port(struct net_bridge_mdb_entry *mp,
  2208. struct net_bridge_port *p,
  2209. const unsigned char *src)
  2210. {
  2211. struct net_bridge *br __maybe_unused = mp->br;
  2212. struct net_bridge_port_group *pg;
  2213. for (pg = mlock_dereference(mp->ports, br);
  2214. pg;
  2215. pg = mlock_dereference(pg->next, br))
  2216. if (br_port_group_equal(pg, p, src))
  2217. return pg;
  2218. return NULL;
  2219. }
  2220. static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
  2221. struct net_bridge_mcast_port *pmctx,
  2222. struct sk_buff *skb,
  2223. u16 vid)
  2224. {
  2225. bool igmpv2 = brmctx->multicast_igmp_version == 2;
  2226. struct net_bridge_mdb_entry *mdst;
  2227. struct net_bridge_port_group *pg;
  2228. const unsigned char *src;
  2229. struct igmpv3_report *ih;
  2230. struct igmpv3_grec *grec;
  2231. int i, len, num, type;
  2232. __be32 group, *h_addr;
  2233. bool changed = false;
  2234. int err = 0;
  2235. u16 nsrcs;
  2236. ih = igmpv3_report_hdr(skb);
  2237. num = ntohs(ih->ngrec);
  2238. len = skb_transport_offset(skb) + sizeof(*ih);
  2239. for (i = 0; i < num; i++) {
  2240. len += sizeof(*grec);
  2241. if (!ip_mc_may_pull(skb, len))
  2242. return -EINVAL;
  2243. grec = (void *)(skb->data + len - sizeof(*grec));
  2244. group = grec->grec_mca;
  2245. type = grec->grec_type;
  2246. nsrcs = ntohs(grec->grec_nsrcs);
  2247. len += nsrcs * 4;
  2248. if (!ip_mc_may_pull(skb, len))
  2249. return -EINVAL;
  2250. switch (type) {
  2251. case IGMPV3_MODE_IS_INCLUDE:
  2252. case IGMPV3_MODE_IS_EXCLUDE:
  2253. case IGMPV3_CHANGE_TO_INCLUDE:
  2254. case IGMPV3_CHANGE_TO_EXCLUDE:
  2255. case IGMPV3_ALLOW_NEW_SOURCES:
  2256. case IGMPV3_BLOCK_OLD_SOURCES:
  2257. break;
  2258. default:
  2259. continue;
  2260. }
  2261. src = eth_hdr(skb)->h_source;
  2262. if (nsrcs == 0 &&
  2263. (type == IGMPV3_CHANGE_TO_INCLUDE ||
  2264. type == IGMPV3_MODE_IS_INCLUDE)) {
  2265. if (!pmctx || igmpv2) {
  2266. br_ip4_multicast_leave_group(brmctx, pmctx,
  2267. group, vid, src);
  2268. continue;
  2269. }
  2270. } else {
  2271. err = br_ip4_multicast_add_group(brmctx, pmctx, group,
  2272. vid, src, igmpv2);
  2273. if (err)
  2274. break;
  2275. }
  2276. if (!pmctx || igmpv2)
  2277. continue;
  2278. spin_lock_bh(&brmctx->br->multicast_lock);
  2279. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2280. goto unlock_continue;
  2281. mdst = br_mdb_ip4_get(brmctx->br, group, vid);
  2282. if (!mdst)
  2283. goto unlock_continue;
  2284. pg = br_multicast_find_port(mdst, pmctx->port, src);
  2285. if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
  2286. goto unlock_continue;
  2287. /* reload grec and host addr */
  2288. grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
  2289. h_addr = &ip_hdr(skb)->saddr;
  2290. switch (type) {
  2291. case IGMPV3_ALLOW_NEW_SOURCES:
  2292. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2293. grec->grec_src,
  2294. nsrcs, sizeof(__be32), type);
  2295. break;
  2296. case IGMPV3_MODE_IS_INCLUDE:
  2297. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2298. grec->grec_src,
  2299. nsrcs, sizeof(__be32), type);
  2300. break;
  2301. case IGMPV3_MODE_IS_EXCLUDE:
  2302. changed = br_multicast_isexc(brmctx, pg, h_addr,
  2303. grec->grec_src,
  2304. nsrcs, sizeof(__be32), type);
  2305. break;
  2306. case IGMPV3_CHANGE_TO_INCLUDE:
  2307. changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
  2308. grec->grec_src,
  2309. nsrcs, sizeof(__be32), type);
  2310. break;
  2311. case IGMPV3_CHANGE_TO_EXCLUDE:
  2312. changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
  2313. grec->grec_src,
  2314. nsrcs, sizeof(__be32), type);
  2315. break;
  2316. case IGMPV3_BLOCK_OLD_SOURCES:
  2317. changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
  2318. grec->grec_src,
  2319. nsrcs, sizeof(__be32), type);
  2320. break;
  2321. }
  2322. if (changed)
  2323. br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
  2324. unlock_continue:
  2325. spin_unlock_bh(&brmctx->br->multicast_lock);
  2326. }
  2327. return err;
  2328. }
  2329. #if IS_ENABLED(CONFIG_IPV6)
  2330. static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
  2331. struct net_bridge_mcast_port *pmctx,
  2332. struct sk_buff *skb,
  2333. u16 vid)
  2334. {
  2335. bool mldv1 = brmctx->multicast_mld_version == 1;
  2336. struct net_bridge_mdb_entry *mdst;
  2337. struct net_bridge_port_group *pg;
  2338. unsigned int nsrcs_offset;
  2339. struct mld2_report *mld2r;
  2340. const unsigned char *src;
  2341. struct in6_addr *h_addr;
  2342. struct mld2_grec *grec;
  2343. unsigned int grec_len;
  2344. bool changed = false;
  2345. int i, len, num;
  2346. int err = 0;
  2347. if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
  2348. return -EINVAL;
  2349. mld2r = (struct mld2_report *)icmp6_hdr(skb);
  2350. num = ntohs(mld2r->mld2r_ngrec);
  2351. len = skb_transport_offset(skb) + sizeof(*mld2r);
  2352. for (i = 0; i < num; i++) {
  2353. __be16 *_nsrcs, __nsrcs;
  2354. u16 nsrcs;
  2355. nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
  2356. if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
  2357. nsrcs_offset + sizeof(__nsrcs))
  2358. return -EINVAL;
  2359. _nsrcs = skb_header_pointer(skb, nsrcs_offset,
  2360. sizeof(__nsrcs), &__nsrcs);
  2361. if (!_nsrcs)
  2362. return -EINVAL;
  2363. nsrcs = ntohs(*_nsrcs);
  2364. grec_len = struct_size(grec, grec_src, nsrcs);
  2365. if (!ipv6_mc_may_pull(skb, len + grec_len))
  2366. return -EINVAL;
  2367. grec = (struct mld2_grec *)(skb->data + len);
  2368. len += grec_len;
  2369. switch (grec->grec_type) {
  2370. case MLD2_MODE_IS_INCLUDE:
  2371. case MLD2_MODE_IS_EXCLUDE:
  2372. case MLD2_CHANGE_TO_INCLUDE:
  2373. case MLD2_CHANGE_TO_EXCLUDE:
  2374. case MLD2_ALLOW_NEW_SOURCES:
  2375. case MLD2_BLOCK_OLD_SOURCES:
  2376. break;
  2377. default:
  2378. continue;
  2379. }
  2380. src = eth_hdr(skb)->h_source;
  2381. if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
  2382. grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
  2383. nsrcs == 0) {
  2384. if (!pmctx || mldv1) {
  2385. br_ip6_multicast_leave_group(brmctx, pmctx,
  2386. &grec->grec_mca,
  2387. vid, src);
  2388. continue;
  2389. }
  2390. } else {
  2391. err = br_ip6_multicast_add_group(brmctx, pmctx,
  2392. &grec->grec_mca, vid,
  2393. src, mldv1);
  2394. if (err)
  2395. break;
  2396. }
  2397. if (!pmctx || mldv1)
  2398. continue;
  2399. spin_lock_bh(&brmctx->br->multicast_lock);
  2400. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2401. goto unlock_continue;
  2402. mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
  2403. if (!mdst)
  2404. goto unlock_continue;
  2405. pg = br_multicast_find_port(mdst, pmctx->port, src);
  2406. if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
  2407. goto unlock_continue;
  2408. h_addr = &ipv6_hdr(skb)->saddr;
  2409. switch (grec->grec_type) {
  2410. case MLD2_ALLOW_NEW_SOURCES:
  2411. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2412. grec->grec_src, nsrcs,
  2413. sizeof(struct in6_addr),
  2414. grec->grec_type);
  2415. break;
  2416. case MLD2_MODE_IS_INCLUDE:
  2417. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2418. grec->grec_src, nsrcs,
  2419. sizeof(struct in6_addr),
  2420. grec->grec_type);
  2421. break;
  2422. case MLD2_MODE_IS_EXCLUDE:
  2423. changed = br_multicast_isexc(brmctx, pg, h_addr,
  2424. grec->grec_src, nsrcs,
  2425. sizeof(struct in6_addr),
  2426. grec->grec_type);
  2427. break;
  2428. case MLD2_CHANGE_TO_INCLUDE:
  2429. changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
  2430. grec->grec_src, nsrcs,
  2431. sizeof(struct in6_addr),
  2432. grec->grec_type);
  2433. break;
  2434. case MLD2_CHANGE_TO_EXCLUDE:
  2435. changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
  2436. grec->grec_src, nsrcs,
  2437. sizeof(struct in6_addr),
  2438. grec->grec_type);
  2439. break;
  2440. case MLD2_BLOCK_OLD_SOURCES:
  2441. changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
  2442. grec->grec_src, nsrcs,
  2443. sizeof(struct in6_addr),
  2444. grec->grec_type);
  2445. break;
  2446. }
  2447. if (changed)
  2448. br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
  2449. unlock_continue:
  2450. spin_unlock_bh(&brmctx->br->multicast_lock);
  2451. }
  2452. return err;
  2453. }
  2454. #endif
  2455. static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
  2456. struct net_bridge_mcast_port *pmctx,
  2457. struct br_ip *saddr)
  2458. {
  2459. int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
  2460. struct timer_list *own_timer, *other_timer;
  2461. struct bridge_mcast_querier *querier;
  2462. switch (saddr->proto) {
  2463. case htons(ETH_P_IP):
  2464. querier = &brmctx->ip4_querier;
  2465. own_timer = &brmctx->ip4_own_query.timer;
  2466. other_timer = &brmctx->ip4_other_query.timer;
  2467. if (!querier->addr.src.ip4 ||
  2468. ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
  2469. goto update;
  2470. break;
  2471. #if IS_ENABLED(CONFIG_IPV6)
  2472. case htons(ETH_P_IPV6):
  2473. querier = &brmctx->ip6_querier;
  2474. own_timer = &brmctx->ip6_own_query.timer;
  2475. other_timer = &brmctx->ip6_other_query.timer;
  2476. if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
  2477. goto update;
  2478. break;
  2479. #endif
  2480. default:
  2481. return false;
  2482. }
  2483. if (!timer_pending(own_timer) && !timer_pending(other_timer))
  2484. goto update;
  2485. return false;
  2486. update:
  2487. br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
  2488. return true;
  2489. }
  2490. static struct net_bridge_port *
  2491. __br_multicast_get_querier_port(struct net_bridge *br,
  2492. const struct bridge_mcast_querier *querier)
  2493. {
  2494. int port_ifidx = READ_ONCE(querier->port_ifidx);
  2495. struct net_bridge_port *p;
  2496. struct net_device *dev;
  2497. if (port_ifidx == 0)
  2498. return NULL;
  2499. dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
  2500. if (!dev)
  2501. return NULL;
  2502. p = br_port_get_rtnl_rcu(dev);
  2503. if (!p || p->br != br)
  2504. return NULL;
  2505. return p;
  2506. }
  2507. size_t br_multicast_querier_state_size(void)
  2508. {
  2509. return nla_total_size(0) + /* nest attribute */
  2510. nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
  2511. nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
  2512. nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
  2513. #if IS_ENABLED(CONFIG_IPV6)
  2514. nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
  2515. nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
  2516. nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
  2517. #endif
  2518. 0;
  2519. }
  2520. /* protected by rtnl or rcu */
  2521. int br_multicast_dump_querier_state(struct sk_buff *skb,
  2522. const struct net_bridge_mcast *brmctx,
  2523. int nest_attr)
  2524. {
  2525. struct bridge_mcast_querier querier = {};
  2526. struct net_bridge_port *p;
  2527. struct nlattr *nest;
  2528. if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
  2529. br_multicast_ctx_vlan_global_disabled(brmctx))
  2530. return 0;
  2531. nest = nla_nest_start(skb, nest_attr);
  2532. if (!nest)
  2533. return -EMSGSIZE;
  2534. rcu_read_lock();
  2535. if (!brmctx->multicast_querier &&
  2536. !timer_pending(&brmctx->ip4_other_query.timer))
  2537. goto out_v6;
  2538. br_multicast_read_querier(&brmctx->ip4_querier, &querier);
  2539. if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
  2540. querier.addr.src.ip4)) {
  2541. rcu_read_unlock();
  2542. goto out_err;
  2543. }
  2544. p = __br_multicast_get_querier_port(brmctx->br, &querier);
  2545. if (timer_pending(&brmctx->ip4_other_query.timer) &&
  2546. (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
  2547. br_timer_value(&brmctx->ip4_other_query.timer),
  2548. BRIDGE_QUERIER_PAD) ||
  2549. (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
  2550. rcu_read_unlock();
  2551. goto out_err;
  2552. }
  2553. out_v6:
  2554. #if IS_ENABLED(CONFIG_IPV6)
  2555. if (!brmctx->multicast_querier &&
  2556. !timer_pending(&brmctx->ip6_other_query.timer))
  2557. goto out;
  2558. br_multicast_read_querier(&brmctx->ip6_querier, &querier);
  2559. if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
  2560. &querier.addr.src.ip6)) {
  2561. rcu_read_unlock();
  2562. goto out_err;
  2563. }
  2564. p = __br_multicast_get_querier_port(brmctx->br, &querier);
  2565. if (timer_pending(&brmctx->ip6_other_query.timer) &&
  2566. (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
  2567. br_timer_value(&brmctx->ip6_other_query.timer),
  2568. BRIDGE_QUERIER_PAD) ||
  2569. (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
  2570. p->dev->ifindex)))) {
  2571. rcu_read_unlock();
  2572. goto out_err;
  2573. }
  2574. out:
  2575. #endif
  2576. rcu_read_unlock();
  2577. nla_nest_end(skb, nest);
  2578. if (!nla_len(nest))
  2579. nla_nest_cancel(skb, nest);
  2580. return 0;
  2581. out_err:
  2582. nla_nest_cancel(skb, nest);
  2583. return -EMSGSIZE;
  2584. }
  2585. static void
  2586. br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
  2587. struct bridge_mcast_other_query *query,
  2588. unsigned long max_delay)
  2589. {
  2590. if (!timer_pending(&query->timer))
  2591. query->delay_time = jiffies + max_delay;
  2592. mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
  2593. }
  2594. static void br_port_mc_router_state_change(struct net_bridge_port *p,
  2595. bool is_mc_router)
  2596. {
  2597. struct switchdev_attr attr = {
  2598. .orig_dev = p->dev,
  2599. .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
  2600. .flags = SWITCHDEV_F_DEFER,
  2601. .u.mrouter = is_mc_router,
  2602. };
  2603. switchdev_port_attr_set(p->dev, &attr, NULL);
  2604. }
  2605. static struct net_bridge_port *
  2606. br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
  2607. struct hlist_head *mc_router_list,
  2608. struct hlist_node *rlist)
  2609. {
  2610. struct net_bridge_mcast_port *pmctx;
  2611. #if IS_ENABLED(CONFIG_IPV6)
  2612. if (mc_router_list == &brmctx->ip6_mc_router_list)
  2613. pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
  2614. ip6_rlist);
  2615. else
  2616. #endif
  2617. pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
  2618. ip4_rlist);
  2619. return pmctx->port;
  2620. }
  2621. static struct hlist_node *
  2622. br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
  2623. struct net_bridge_port *port,
  2624. struct hlist_head *mc_router_list)
  2625. {
  2626. struct hlist_node *slot = NULL;
  2627. struct net_bridge_port *p;
  2628. struct hlist_node *rlist;
  2629. hlist_for_each(rlist, mc_router_list) {
  2630. p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
  2631. if ((unsigned long)port >= (unsigned long)p)
  2632. break;
  2633. slot = rlist;
  2634. }
  2635. return slot;
  2636. }
  2637. static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
  2638. struct hlist_node *rnode)
  2639. {
  2640. #if IS_ENABLED(CONFIG_IPV6)
  2641. if (rnode != &pmctx->ip6_rlist)
  2642. return hlist_unhashed(&pmctx->ip6_rlist);
  2643. else
  2644. return hlist_unhashed(&pmctx->ip4_rlist);
  2645. #else
  2646. return true;
  2647. #endif
  2648. }
  2649. /* Add port to router_list
  2650. * list is maintained ordered by pointer value
  2651. * and locked by br->multicast_lock and RCU
  2652. */
  2653. static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
  2654. struct net_bridge_mcast_port *pmctx,
  2655. struct hlist_node *rlist,
  2656. struct hlist_head *mc_router_list)
  2657. {
  2658. struct hlist_node *slot;
  2659. if (!hlist_unhashed(rlist))
  2660. return;
  2661. slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
  2662. if (slot)
  2663. hlist_add_behind_rcu(rlist, slot);
  2664. else
  2665. hlist_add_head_rcu(rlist, mc_router_list);
  2666. /* For backwards compatibility for now, only notify if we
  2667. * switched from no IPv4/IPv6 multicast router to a new
  2668. * IPv4 or IPv6 multicast router.
  2669. */
  2670. if (br_multicast_no_router_otherpf(pmctx, rlist)) {
  2671. br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
  2672. br_port_mc_router_state_change(pmctx->port, true);
  2673. }
  2674. }
  2675. /* Add port to router_list
  2676. * list is maintained ordered by pointer value
  2677. * and locked by br->multicast_lock and RCU
  2678. */
  2679. static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
  2680. struct net_bridge_mcast_port *pmctx)
  2681. {
  2682. br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
  2683. &brmctx->ip4_mc_router_list);
  2684. }
  2685. /* Add port to router_list
  2686. * list is maintained ordered by pointer value
  2687. * and locked by br->multicast_lock and RCU
  2688. */
  2689. static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
  2690. struct net_bridge_mcast_port *pmctx)
  2691. {
  2692. #if IS_ENABLED(CONFIG_IPV6)
  2693. br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
  2694. &brmctx->ip6_mc_router_list);
  2695. #endif
  2696. }
  2697. static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2698. struct net_bridge_mcast_port *pmctx,
  2699. struct timer_list *timer,
  2700. struct hlist_node *rlist,
  2701. struct hlist_head *mc_router_list)
  2702. {
  2703. unsigned long now = jiffies;
  2704. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2705. return;
  2706. if (!pmctx) {
  2707. if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
  2708. if (!br_ip4_multicast_is_router(brmctx) &&
  2709. !br_ip6_multicast_is_router(brmctx))
  2710. br_mc_router_state_change(brmctx->br, true);
  2711. mod_timer(timer, now + brmctx->multicast_querier_interval);
  2712. }
  2713. return;
  2714. }
  2715. if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  2716. pmctx->multicast_router == MDB_RTR_TYPE_PERM)
  2717. return;
  2718. br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
  2719. mod_timer(timer, now + brmctx->multicast_querier_interval);
  2720. }
  2721. static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2722. struct net_bridge_mcast_port *pmctx)
  2723. {
  2724. struct timer_list *timer = &brmctx->ip4_mc_router_timer;
  2725. struct hlist_node *rlist = NULL;
  2726. if (pmctx) {
  2727. timer = &pmctx->ip4_mc_router_timer;
  2728. rlist = &pmctx->ip4_rlist;
  2729. }
  2730. br_multicast_mark_router(brmctx, pmctx, timer, rlist,
  2731. &brmctx->ip4_mc_router_list);
  2732. }
  2733. static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2734. struct net_bridge_mcast_port *pmctx)
  2735. {
  2736. #if IS_ENABLED(CONFIG_IPV6)
  2737. struct timer_list *timer = &brmctx->ip6_mc_router_timer;
  2738. struct hlist_node *rlist = NULL;
  2739. if (pmctx) {
  2740. timer = &pmctx->ip6_mc_router_timer;
  2741. rlist = &pmctx->ip6_rlist;
  2742. }
  2743. br_multicast_mark_router(brmctx, pmctx, timer, rlist,
  2744. &brmctx->ip6_mc_router_list);
  2745. #endif
  2746. }
  2747. static void
  2748. br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
  2749. struct net_bridge_mcast_port *pmctx,
  2750. struct bridge_mcast_other_query *query,
  2751. struct br_ip *saddr,
  2752. unsigned long max_delay)
  2753. {
  2754. if (!br_multicast_select_querier(brmctx, pmctx, saddr))
  2755. return;
  2756. br_multicast_update_query_timer(brmctx, query, max_delay);
  2757. br_ip4_multicast_mark_router(brmctx, pmctx);
  2758. }
  2759. #if IS_ENABLED(CONFIG_IPV6)
  2760. static void
  2761. br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
  2762. struct net_bridge_mcast_port *pmctx,
  2763. struct bridge_mcast_other_query *query,
  2764. struct br_ip *saddr,
  2765. unsigned long max_delay)
  2766. {
  2767. if (!br_multicast_select_querier(brmctx, pmctx, saddr))
  2768. return;
  2769. br_multicast_update_query_timer(brmctx, query, max_delay);
  2770. br_ip6_multicast_mark_router(brmctx, pmctx);
  2771. }
  2772. #endif
  2773. static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
  2774. struct net_bridge_mcast_port *pmctx,
  2775. struct sk_buff *skb,
  2776. u16 vid)
  2777. {
  2778. unsigned int transport_len = ip_transport_len(skb);
  2779. const struct iphdr *iph = ip_hdr(skb);
  2780. struct igmphdr *ih = igmp_hdr(skb);
  2781. struct net_bridge_mdb_entry *mp;
  2782. struct igmpv3_query *ih3;
  2783. struct net_bridge_port_group *p;
  2784. struct net_bridge_port_group __rcu **pp;
  2785. struct br_ip saddr = {};
  2786. unsigned long max_delay;
  2787. unsigned long now = jiffies;
  2788. __be32 group;
  2789. spin_lock(&brmctx->br->multicast_lock);
  2790. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2791. goto out;
  2792. group = ih->group;
  2793. if (transport_len == sizeof(*ih)) {
  2794. max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
  2795. if (!max_delay) {
  2796. max_delay = 10 * HZ;
  2797. group = 0;
  2798. }
  2799. } else if (transport_len >= sizeof(*ih3)) {
  2800. ih3 = igmpv3_query_hdr(skb);
  2801. if (ih3->nsrcs ||
  2802. (brmctx->multicast_igmp_version == 3 && group &&
  2803. ih3->suppress))
  2804. goto out;
  2805. max_delay = ih3->code ?
  2806. IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
  2807. } else {
  2808. goto out;
  2809. }
  2810. if (!group) {
  2811. saddr.proto = htons(ETH_P_IP);
  2812. saddr.src.ip4 = iph->saddr;
  2813. br_ip4_multicast_query_received(brmctx, pmctx,
  2814. &brmctx->ip4_other_query,
  2815. &saddr, max_delay);
  2816. goto out;
  2817. }
  2818. mp = br_mdb_ip4_get(brmctx->br, group, vid);
  2819. if (!mp)
  2820. goto out;
  2821. max_delay *= brmctx->multicast_last_member_count;
  2822. if (mp->host_joined &&
  2823. (timer_pending(&mp->timer) ?
  2824. time_after(mp->timer.expires, now + max_delay) :
  2825. try_to_del_timer_sync(&mp->timer) >= 0))
  2826. mod_timer(&mp->timer, now + max_delay);
  2827. for (pp = &mp->ports;
  2828. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  2829. pp = &p->next) {
  2830. if (timer_pending(&p->timer) ?
  2831. time_after(p->timer.expires, now + max_delay) :
  2832. try_to_del_timer_sync(&p->timer) >= 0 &&
  2833. (brmctx->multicast_igmp_version == 2 ||
  2834. p->filter_mode == MCAST_EXCLUDE))
  2835. mod_timer(&p->timer, now + max_delay);
  2836. }
  2837. out:
  2838. spin_unlock(&brmctx->br->multicast_lock);
  2839. }
  2840. #if IS_ENABLED(CONFIG_IPV6)
  2841. static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
  2842. struct net_bridge_mcast_port *pmctx,
  2843. struct sk_buff *skb,
  2844. u16 vid)
  2845. {
  2846. unsigned int transport_len = ipv6_transport_len(skb);
  2847. struct mld_msg *mld;
  2848. struct net_bridge_mdb_entry *mp;
  2849. struct mld2_query *mld2q;
  2850. struct net_bridge_port_group *p;
  2851. struct net_bridge_port_group __rcu **pp;
  2852. struct br_ip saddr = {};
  2853. unsigned long max_delay;
  2854. unsigned long now = jiffies;
  2855. unsigned int offset = skb_transport_offset(skb);
  2856. const struct in6_addr *group = NULL;
  2857. bool is_general_query;
  2858. int err = 0;
  2859. spin_lock(&brmctx->br->multicast_lock);
  2860. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2861. goto out;
  2862. if (transport_len == sizeof(*mld)) {
  2863. if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
  2864. err = -EINVAL;
  2865. goto out;
  2866. }
  2867. mld = (struct mld_msg *) icmp6_hdr(skb);
  2868. max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
  2869. if (max_delay)
  2870. group = &mld->mld_mca;
  2871. } else {
  2872. if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
  2873. err = -EINVAL;
  2874. goto out;
  2875. }
  2876. mld2q = (struct mld2_query *)icmp6_hdr(skb);
  2877. if (!mld2q->mld2q_nsrcs)
  2878. group = &mld2q->mld2q_mca;
  2879. if (brmctx->multicast_mld_version == 2 &&
  2880. !ipv6_addr_any(&mld2q->mld2q_mca) &&
  2881. mld2q->mld2q_suppress)
  2882. goto out;
  2883. max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
  2884. }
  2885. is_general_query = group && ipv6_addr_any(group);
  2886. if (is_general_query) {
  2887. saddr.proto = htons(ETH_P_IPV6);
  2888. saddr.src.ip6 = ipv6_hdr(skb)->saddr;
  2889. br_ip6_multicast_query_received(brmctx, pmctx,
  2890. &brmctx->ip6_other_query,
  2891. &saddr, max_delay);
  2892. goto out;
  2893. } else if (!group) {
  2894. goto out;
  2895. }
  2896. mp = br_mdb_ip6_get(brmctx->br, group, vid);
  2897. if (!mp)
  2898. goto out;
  2899. max_delay *= brmctx->multicast_last_member_count;
  2900. if (mp->host_joined &&
  2901. (timer_pending(&mp->timer) ?
  2902. time_after(mp->timer.expires, now + max_delay) :
  2903. try_to_del_timer_sync(&mp->timer) >= 0))
  2904. mod_timer(&mp->timer, now + max_delay);
  2905. for (pp = &mp->ports;
  2906. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  2907. pp = &p->next) {
  2908. if (timer_pending(&p->timer) ?
  2909. time_after(p->timer.expires, now + max_delay) :
  2910. try_to_del_timer_sync(&p->timer) >= 0 &&
  2911. (brmctx->multicast_mld_version == 1 ||
  2912. p->filter_mode == MCAST_EXCLUDE))
  2913. mod_timer(&p->timer, now + max_delay);
  2914. }
  2915. out:
  2916. spin_unlock(&brmctx->br->multicast_lock);
  2917. return err;
  2918. }
  2919. #endif
  2920. static void
  2921. br_multicast_leave_group(struct net_bridge_mcast *brmctx,
  2922. struct net_bridge_mcast_port *pmctx,
  2923. struct br_ip *group,
  2924. struct bridge_mcast_other_query *other_query,
  2925. struct bridge_mcast_own_query *own_query,
  2926. const unsigned char *src)
  2927. {
  2928. struct net_bridge_mdb_entry *mp;
  2929. struct net_bridge_port_group *p;
  2930. unsigned long now;
  2931. unsigned long time;
  2932. spin_lock(&brmctx->br->multicast_lock);
  2933. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2934. goto out;
  2935. mp = br_mdb_ip_get(brmctx->br, group);
  2936. if (!mp)
  2937. goto out;
  2938. if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
  2939. struct net_bridge_port_group __rcu **pp;
  2940. for (pp = &mp->ports;
  2941. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  2942. pp = &p->next) {
  2943. if (!br_port_group_equal(p, pmctx->port, src))
  2944. continue;
  2945. if (p->flags & MDB_PG_FLAGS_PERMANENT)
  2946. break;
  2947. p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  2948. br_multicast_del_pg(mp, p, pp);
  2949. }
  2950. goto out;
  2951. }
  2952. if (timer_pending(&other_query->timer))
  2953. goto out;
  2954. if (brmctx->multicast_querier) {
  2955. __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
  2956. false, 0, NULL);
  2957. time = jiffies + brmctx->multicast_last_member_count *
  2958. brmctx->multicast_last_member_interval;
  2959. mod_timer(&own_query->timer, time);
  2960. for (p = mlock_dereference(mp->ports, brmctx->br);
  2961. p != NULL && pmctx != NULL;
  2962. p = mlock_dereference(p->next, brmctx->br)) {
  2963. if (!br_port_group_equal(p, pmctx->port, src))
  2964. continue;
  2965. if (!hlist_unhashed(&p->mglist) &&
  2966. (timer_pending(&p->timer) ?
  2967. time_after(p->timer.expires, time) :
  2968. try_to_del_timer_sync(&p->timer) >= 0)) {
  2969. mod_timer(&p->timer, time);
  2970. }
  2971. break;
  2972. }
  2973. }
  2974. now = jiffies;
  2975. time = now + brmctx->multicast_last_member_count *
  2976. brmctx->multicast_last_member_interval;
  2977. if (!pmctx) {
  2978. if (mp->host_joined &&
  2979. (timer_pending(&mp->timer) ?
  2980. time_after(mp->timer.expires, time) :
  2981. try_to_del_timer_sync(&mp->timer) >= 0)) {
  2982. mod_timer(&mp->timer, time);
  2983. }
  2984. goto out;
  2985. }
  2986. for (p = mlock_dereference(mp->ports, brmctx->br);
  2987. p != NULL;
  2988. p = mlock_dereference(p->next, brmctx->br)) {
  2989. if (p->key.port != pmctx->port)
  2990. continue;
  2991. if (!hlist_unhashed(&p->mglist) &&
  2992. (timer_pending(&p->timer) ?
  2993. time_after(p->timer.expires, time) :
  2994. try_to_del_timer_sync(&p->timer) >= 0)) {
  2995. mod_timer(&p->timer, time);
  2996. }
  2997. break;
  2998. }
  2999. out:
  3000. spin_unlock(&brmctx->br->multicast_lock);
  3001. }
  3002. static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3003. struct net_bridge_mcast_port *pmctx,
  3004. __be32 group,
  3005. __u16 vid,
  3006. const unsigned char *src)
  3007. {
  3008. struct br_ip br_group;
  3009. struct bridge_mcast_own_query *own_query;
  3010. if (ipv4_is_local_multicast(group))
  3011. return;
  3012. own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
  3013. memset(&br_group, 0, sizeof(br_group));
  3014. br_group.dst.ip4 = group;
  3015. br_group.proto = htons(ETH_P_IP);
  3016. br_group.vid = vid;
  3017. br_multicast_leave_group(brmctx, pmctx, &br_group,
  3018. &brmctx->ip4_other_query,
  3019. own_query, src);
  3020. }
  3021. #if IS_ENABLED(CONFIG_IPV6)
  3022. static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3023. struct net_bridge_mcast_port *pmctx,
  3024. const struct in6_addr *group,
  3025. __u16 vid,
  3026. const unsigned char *src)
  3027. {
  3028. struct br_ip br_group;
  3029. struct bridge_mcast_own_query *own_query;
  3030. if (ipv6_addr_is_ll_all_nodes(group))
  3031. return;
  3032. own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
  3033. memset(&br_group, 0, sizeof(br_group));
  3034. br_group.dst.ip6 = *group;
  3035. br_group.proto = htons(ETH_P_IPV6);
  3036. br_group.vid = vid;
  3037. br_multicast_leave_group(brmctx, pmctx, &br_group,
  3038. &brmctx->ip6_other_query,
  3039. own_query, src);
  3040. }
  3041. #endif
  3042. static void br_multicast_err_count(const struct net_bridge *br,
  3043. const struct net_bridge_port *p,
  3044. __be16 proto)
  3045. {
  3046. struct bridge_mcast_stats __percpu *stats;
  3047. struct bridge_mcast_stats *pstats;
  3048. if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
  3049. return;
  3050. if (p)
  3051. stats = p->mcast_stats;
  3052. else
  3053. stats = br->mcast_stats;
  3054. if (WARN_ON(!stats))
  3055. return;
  3056. pstats = this_cpu_ptr(stats);
  3057. u64_stats_update_begin(&pstats->syncp);
  3058. switch (proto) {
  3059. case htons(ETH_P_IP):
  3060. pstats->mstats.igmp_parse_errors++;
  3061. break;
  3062. #if IS_ENABLED(CONFIG_IPV6)
  3063. case htons(ETH_P_IPV6):
  3064. pstats->mstats.mld_parse_errors++;
  3065. break;
  3066. #endif
  3067. }
  3068. u64_stats_update_end(&pstats->syncp);
  3069. }
  3070. static void br_multicast_pim(struct net_bridge_mcast *brmctx,
  3071. struct net_bridge_mcast_port *pmctx,
  3072. const struct sk_buff *skb)
  3073. {
  3074. unsigned int offset = skb_transport_offset(skb);
  3075. struct pimhdr *pimhdr, _pimhdr;
  3076. pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
  3077. if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
  3078. pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
  3079. return;
  3080. spin_lock(&brmctx->br->multicast_lock);
  3081. br_ip4_multicast_mark_router(brmctx, pmctx);
  3082. spin_unlock(&brmctx->br->multicast_lock);
  3083. }
  3084. static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
  3085. struct net_bridge_mcast_port *pmctx,
  3086. struct sk_buff *skb)
  3087. {
  3088. if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
  3089. igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
  3090. return -ENOMSG;
  3091. spin_lock(&brmctx->br->multicast_lock);
  3092. br_ip4_multicast_mark_router(brmctx, pmctx);
  3093. spin_unlock(&brmctx->br->multicast_lock);
  3094. return 0;
  3095. }
  3096. static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
  3097. struct net_bridge_mcast_port *pmctx,
  3098. struct sk_buff *skb,
  3099. u16 vid)
  3100. {
  3101. struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
  3102. const unsigned char *src;
  3103. struct igmphdr *ih;
  3104. int err;
  3105. err = ip_mc_check_igmp(skb);
  3106. if (err == -ENOMSG) {
  3107. if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
  3108. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3109. } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
  3110. if (ip_hdr(skb)->protocol == IPPROTO_PIM)
  3111. br_multicast_pim(brmctx, pmctx, skb);
  3112. } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
  3113. br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
  3114. }
  3115. return 0;
  3116. } else if (err < 0) {
  3117. br_multicast_err_count(brmctx->br, p, skb->protocol);
  3118. return err;
  3119. }
  3120. ih = igmp_hdr(skb);
  3121. src = eth_hdr(skb)->h_source;
  3122. BR_INPUT_SKB_CB(skb)->igmp = ih->type;
  3123. switch (ih->type) {
  3124. case IGMP_HOST_MEMBERSHIP_REPORT:
  3125. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  3126. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3127. err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
  3128. src, true);
  3129. break;
  3130. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  3131. err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
  3132. break;
  3133. case IGMP_HOST_MEMBERSHIP_QUERY:
  3134. br_ip4_multicast_query(brmctx, pmctx, skb, vid);
  3135. break;
  3136. case IGMP_HOST_LEAVE_MESSAGE:
  3137. br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
  3138. break;
  3139. }
  3140. br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
  3141. BR_MCAST_DIR_RX);
  3142. return err;
  3143. }
  3144. #if IS_ENABLED(CONFIG_IPV6)
  3145. static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
  3146. struct net_bridge_mcast_port *pmctx,
  3147. struct sk_buff *skb)
  3148. {
  3149. if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
  3150. return;
  3151. spin_lock(&brmctx->br->multicast_lock);
  3152. br_ip6_multicast_mark_router(brmctx, pmctx);
  3153. spin_unlock(&brmctx->br->multicast_lock);
  3154. }
  3155. static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
  3156. struct net_bridge_mcast_port *pmctx,
  3157. struct sk_buff *skb,
  3158. u16 vid)
  3159. {
  3160. struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
  3161. const unsigned char *src;
  3162. struct mld_msg *mld;
  3163. int err;
  3164. err = ipv6_mc_check_mld(skb);
  3165. if (err == -ENOMSG || err == -ENODATA) {
  3166. if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
  3167. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3168. if (err == -ENODATA &&
  3169. ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
  3170. br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
  3171. return 0;
  3172. } else if (err < 0) {
  3173. br_multicast_err_count(brmctx->br, p, skb->protocol);
  3174. return err;
  3175. }
  3176. mld = (struct mld_msg *)skb_transport_header(skb);
  3177. BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
  3178. switch (mld->mld_type) {
  3179. case ICMPV6_MGM_REPORT:
  3180. src = eth_hdr(skb)->h_source;
  3181. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3182. err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
  3183. vid, src, true);
  3184. break;
  3185. case ICMPV6_MLD2_REPORT:
  3186. err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
  3187. break;
  3188. case ICMPV6_MGM_QUERY:
  3189. err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
  3190. break;
  3191. case ICMPV6_MGM_REDUCTION:
  3192. src = eth_hdr(skb)->h_source;
  3193. br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
  3194. src);
  3195. break;
  3196. }
  3197. br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
  3198. BR_MCAST_DIR_RX);
  3199. return err;
  3200. }
  3201. #endif
  3202. int br_multicast_rcv(struct net_bridge_mcast **brmctx,
  3203. struct net_bridge_mcast_port **pmctx,
  3204. struct net_bridge_vlan *vlan,
  3205. struct sk_buff *skb, u16 vid)
  3206. {
  3207. int ret = 0;
  3208. BR_INPUT_SKB_CB(skb)->igmp = 0;
  3209. BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
  3210. if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
  3211. return 0;
  3212. if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
  3213. const struct net_bridge_vlan *masterv;
  3214. /* the vlan has the master flag set only when transmitting
  3215. * through the bridge device
  3216. */
  3217. if (br_vlan_is_master(vlan)) {
  3218. masterv = vlan;
  3219. *brmctx = &vlan->br_mcast_ctx;
  3220. *pmctx = NULL;
  3221. } else {
  3222. masterv = vlan->brvlan;
  3223. *brmctx = &vlan->brvlan->br_mcast_ctx;
  3224. *pmctx = &vlan->port_mcast_ctx;
  3225. }
  3226. if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
  3227. return 0;
  3228. }
  3229. switch (skb->protocol) {
  3230. case htons(ETH_P_IP):
  3231. ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
  3232. break;
  3233. #if IS_ENABLED(CONFIG_IPV6)
  3234. case htons(ETH_P_IPV6):
  3235. ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
  3236. break;
  3237. #endif
  3238. }
  3239. return ret;
  3240. }
  3241. static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
  3242. struct bridge_mcast_own_query *query,
  3243. struct bridge_mcast_querier *querier)
  3244. {
  3245. spin_lock(&brmctx->br->multicast_lock);
  3246. if (br_multicast_ctx_vlan_disabled(brmctx))
  3247. goto out;
  3248. if (query->startup_sent < brmctx->multicast_startup_query_count)
  3249. query->startup_sent++;
  3250. br_multicast_send_query(brmctx, NULL, query);
  3251. out:
  3252. spin_unlock(&brmctx->br->multicast_lock);
  3253. }
  3254. static void br_ip4_multicast_query_expired(struct timer_list *t)
  3255. {
  3256. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  3257. ip4_own_query.timer);
  3258. br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
  3259. &brmctx->ip4_querier);
  3260. }
  3261. #if IS_ENABLED(CONFIG_IPV6)
  3262. static void br_ip6_multicast_query_expired(struct timer_list *t)
  3263. {
  3264. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  3265. ip6_own_query.timer);
  3266. br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
  3267. &brmctx->ip6_querier);
  3268. }
  3269. #endif
  3270. static void br_multicast_gc_work(struct work_struct *work)
  3271. {
  3272. struct net_bridge *br = container_of(work, struct net_bridge,
  3273. mcast_gc_work);
  3274. HLIST_HEAD(deleted_head);
  3275. spin_lock_bh(&br->multicast_lock);
  3276. hlist_move_list(&br->mcast_gc_list, &deleted_head);
  3277. spin_unlock_bh(&br->multicast_lock);
  3278. br_multicast_gc(&deleted_head);
  3279. }
  3280. void br_multicast_ctx_init(struct net_bridge *br,
  3281. struct net_bridge_vlan *vlan,
  3282. struct net_bridge_mcast *brmctx)
  3283. {
  3284. brmctx->br = br;
  3285. brmctx->vlan = vlan;
  3286. brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3287. brmctx->multicast_last_member_count = 2;
  3288. brmctx->multicast_startup_query_count = 2;
  3289. brmctx->multicast_last_member_interval = HZ;
  3290. brmctx->multicast_query_response_interval = 10 * HZ;
  3291. brmctx->multicast_startup_query_interval = 125 * HZ / 4;
  3292. brmctx->multicast_query_interval = 125 * HZ;
  3293. brmctx->multicast_querier_interval = 255 * HZ;
  3294. brmctx->multicast_membership_interval = 260 * HZ;
  3295. brmctx->ip4_other_query.delay_time = 0;
  3296. brmctx->ip4_querier.port_ifidx = 0;
  3297. seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
  3298. brmctx->multicast_igmp_version = 2;
  3299. #if IS_ENABLED(CONFIG_IPV6)
  3300. brmctx->multicast_mld_version = 1;
  3301. brmctx->ip6_other_query.delay_time = 0;
  3302. brmctx->ip6_querier.port_ifidx = 0;
  3303. seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
  3304. #endif
  3305. timer_setup(&brmctx->ip4_mc_router_timer,
  3306. br_ip4_multicast_local_router_expired, 0);
  3307. timer_setup(&brmctx->ip4_other_query.timer,
  3308. br_ip4_multicast_querier_expired, 0);
  3309. timer_setup(&brmctx->ip4_own_query.timer,
  3310. br_ip4_multicast_query_expired, 0);
  3311. #if IS_ENABLED(CONFIG_IPV6)
  3312. timer_setup(&brmctx->ip6_mc_router_timer,
  3313. br_ip6_multicast_local_router_expired, 0);
  3314. timer_setup(&brmctx->ip6_other_query.timer,
  3315. br_ip6_multicast_querier_expired, 0);
  3316. timer_setup(&brmctx->ip6_own_query.timer,
  3317. br_ip6_multicast_query_expired, 0);
  3318. #endif
  3319. }
  3320. void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
  3321. {
  3322. __br_multicast_stop(brmctx);
  3323. }
  3324. void br_multicast_init(struct net_bridge *br)
  3325. {
  3326. br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
  3327. br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
  3328. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
  3329. br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
  3330. spin_lock_init(&br->multicast_lock);
  3331. INIT_HLIST_HEAD(&br->mdb_list);
  3332. INIT_HLIST_HEAD(&br->mcast_gc_list);
  3333. INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
  3334. }
  3335. static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
  3336. {
  3337. struct in_device *in_dev = in_dev_get(br->dev);
  3338. if (!in_dev)
  3339. return;
  3340. __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
  3341. in_dev_put(in_dev);
  3342. }
  3343. #if IS_ENABLED(CONFIG_IPV6)
  3344. static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
  3345. {
  3346. struct in6_addr addr;
  3347. ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
  3348. ipv6_dev_mc_inc(br->dev, &addr);
  3349. }
  3350. #else
  3351. static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
  3352. {
  3353. }
  3354. #endif
  3355. void br_multicast_join_snoopers(struct net_bridge *br)
  3356. {
  3357. br_ip4_multicast_join_snoopers(br);
  3358. br_ip6_multicast_join_snoopers(br);
  3359. }
  3360. static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
  3361. {
  3362. struct in_device *in_dev = in_dev_get(br->dev);
  3363. if (WARN_ON(!in_dev))
  3364. return;
  3365. __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
  3366. in_dev_put(in_dev);
  3367. }
  3368. #if IS_ENABLED(CONFIG_IPV6)
  3369. static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
  3370. {
  3371. struct in6_addr addr;
  3372. ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
  3373. ipv6_dev_mc_dec(br->dev, &addr);
  3374. }
  3375. #else
  3376. static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
  3377. {
  3378. }
  3379. #endif
  3380. void br_multicast_leave_snoopers(struct net_bridge *br)
  3381. {
  3382. br_ip4_multicast_leave_snoopers(br);
  3383. br_ip6_multicast_leave_snoopers(br);
  3384. }
  3385. static void __br_multicast_open_query(struct net_bridge *br,
  3386. struct bridge_mcast_own_query *query)
  3387. {
  3388. query->startup_sent = 0;
  3389. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
  3390. return;
  3391. mod_timer(&query->timer, jiffies);
  3392. }
  3393. static void __br_multicast_open(struct net_bridge_mcast *brmctx)
  3394. {
  3395. __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
  3396. #if IS_ENABLED(CONFIG_IPV6)
  3397. __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
  3398. #endif
  3399. }
  3400. void br_multicast_open(struct net_bridge *br)
  3401. {
  3402. ASSERT_RTNL();
  3403. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  3404. struct net_bridge_vlan_group *vg;
  3405. struct net_bridge_vlan *vlan;
  3406. vg = br_vlan_group(br);
  3407. if (vg) {
  3408. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  3409. struct net_bridge_mcast *brmctx;
  3410. brmctx = &vlan->br_mcast_ctx;
  3411. if (br_vlan_is_brentry(vlan) &&
  3412. !br_multicast_ctx_vlan_disabled(brmctx))
  3413. __br_multicast_open(&vlan->br_mcast_ctx);
  3414. }
  3415. }
  3416. } else {
  3417. __br_multicast_open(&br->multicast_ctx);
  3418. }
  3419. }
  3420. static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
  3421. {
  3422. del_timer_sync(&brmctx->ip4_mc_router_timer);
  3423. del_timer_sync(&brmctx->ip4_other_query.timer);
  3424. del_timer_sync(&brmctx->ip4_own_query.timer);
  3425. #if IS_ENABLED(CONFIG_IPV6)
  3426. del_timer_sync(&brmctx->ip6_mc_router_timer);
  3427. del_timer_sync(&brmctx->ip6_other_query.timer);
  3428. del_timer_sync(&brmctx->ip6_own_query.timer);
  3429. #endif
  3430. }
  3431. void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
  3432. {
  3433. struct net_bridge *br;
  3434. /* it's okay to check for the flag without the multicast lock because it
  3435. * can only change under RTNL -> multicast_lock, we need the latter to
  3436. * sync with timers and packets
  3437. */
  3438. if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
  3439. return;
  3440. if (br_vlan_is_master(vlan)) {
  3441. br = vlan->br;
  3442. if (!br_vlan_is_brentry(vlan) ||
  3443. (on &&
  3444. br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
  3445. return;
  3446. spin_lock_bh(&br->multicast_lock);
  3447. vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
  3448. spin_unlock_bh(&br->multicast_lock);
  3449. if (on)
  3450. __br_multicast_open(&vlan->br_mcast_ctx);
  3451. else
  3452. __br_multicast_stop(&vlan->br_mcast_ctx);
  3453. } else {
  3454. struct net_bridge_mcast *brmctx;
  3455. brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
  3456. if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
  3457. return;
  3458. br = vlan->port->br;
  3459. spin_lock_bh(&br->multicast_lock);
  3460. vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
  3461. if (on)
  3462. __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
  3463. else
  3464. __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
  3465. spin_unlock_bh(&br->multicast_lock);
  3466. }
  3467. }
  3468. static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
  3469. {
  3470. struct net_bridge_port *p;
  3471. if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
  3472. return;
  3473. list_for_each_entry(p, &vlan->br->port_list, list) {
  3474. struct net_bridge_vlan *vport;
  3475. vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
  3476. if (!vport)
  3477. continue;
  3478. br_multicast_toggle_one_vlan(vport, on);
  3479. }
  3480. if (br_vlan_is_brentry(vlan))
  3481. br_multicast_toggle_one_vlan(vlan, on);
  3482. }
  3483. int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
  3484. struct netlink_ext_ack *extack)
  3485. {
  3486. struct net_bridge_vlan_group *vg;
  3487. struct net_bridge_vlan *vlan;
  3488. struct net_bridge_port *p;
  3489. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
  3490. return 0;
  3491. if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
  3492. NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
  3493. return -EINVAL;
  3494. }
  3495. vg = br_vlan_group(br);
  3496. if (!vg)
  3497. return 0;
  3498. br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
  3499. /* disable/enable non-vlan mcast contexts based on vlan snooping */
  3500. if (on)
  3501. __br_multicast_stop(&br->multicast_ctx);
  3502. else
  3503. __br_multicast_open(&br->multicast_ctx);
  3504. list_for_each_entry(p, &br->port_list, list) {
  3505. if (on)
  3506. br_multicast_disable_port(p);
  3507. else
  3508. br_multicast_enable_port(p);
  3509. }
  3510. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  3511. br_multicast_toggle_vlan(vlan, on);
  3512. return 0;
  3513. }
  3514. bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
  3515. {
  3516. ASSERT_RTNL();
  3517. /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
  3518. * requires only RTNL to change
  3519. */
  3520. if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
  3521. return false;
  3522. vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
  3523. br_multicast_toggle_vlan(vlan, on);
  3524. return true;
  3525. }
  3526. void br_multicast_stop(struct net_bridge *br)
  3527. {
  3528. ASSERT_RTNL();
  3529. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  3530. struct net_bridge_vlan_group *vg;
  3531. struct net_bridge_vlan *vlan;
  3532. vg = br_vlan_group(br);
  3533. if (vg) {
  3534. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  3535. struct net_bridge_mcast *brmctx;
  3536. brmctx = &vlan->br_mcast_ctx;
  3537. if (br_vlan_is_brentry(vlan) &&
  3538. !br_multicast_ctx_vlan_disabled(brmctx))
  3539. __br_multicast_stop(&vlan->br_mcast_ctx);
  3540. }
  3541. }
  3542. } else {
  3543. __br_multicast_stop(&br->multicast_ctx);
  3544. }
  3545. }
  3546. void br_multicast_dev_del(struct net_bridge *br)
  3547. {
  3548. struct net_bridge_mdb_entry *mp;
  3549. HLIST_HEAD(deleted_head);
  3550. struct hlist_node *tmp;
  3551. spin_lock_bh(&br->multicast_lock);
  3552. hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
  3553. br_multicast_del_mdb_entry(mp);
  3554. hlist_move_list(&br->mcast_gc_list, &deleted_head);
  3555. spin_unlock_bh(&br->multicast_lock);
  3556. br_multicast_ctx_deinit(&br->multicast_ctx);
  3557. br_multicast_gc(&deleted_head);
  3558. cancel_work_sync(&br->mcast_gc_work);
  3559. rcu_barrier();
  3560. }
  3561. int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
  3562. {
  3563. int err = -EINVAL;
  3564. spin_lock_bh(&brmctx->br->multicast_lock);
  3565. switch (val) {
  3566. case MDB_RTR_TYPE_DISABLED:
  3567. case MDB_RTR_TYPE_PERM:
  3568. br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
  3569. del_timer(&brmctx->ip4_mc_router_timer);
  3570. #if IS_ENABLED(CONFIG_IPV6)
  3571. del_timer(&brmctx->ip6_mc_router_timer);
  3572. #endif
  3573. brmctx->multicast_router = val;
  3574. err = 0;
  3575. break;
  3576. case MDB_RTR_TYPE_TEMP_QUERY:
  3577. if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
  3578. br_mc_router_state_change(brmctx->br, false);
  3579. brmctx->multicast_router = val;
  3580. err = 0;
  3581. break;
  3582. }
  3583. spin_unlock_bh(&brmctx->br->multicast_lock);
  3584. return err;
  3585. }
  3586. static void
  3587. br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
  3588. {
  3589. if (!deleted)
  3590. return;
  3591. /* For backwards compatibility for now, only notify if there is
  3592. * no multicast router anymore for both IPv4 and IPv6.
  3593. */
  3594. if (!hlist_unhashed(&pmctx->ip4_rlist))
  3595. return;
  3596. #if IS_ENABLED(CONFIG_IPV6)
  3597. if (!hlist_unhashed(&pmctx->ip6_rlist))
  3598. return;
  3599. #endif
  3600. br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
  3601. br_port_mc_router_state_change(pmctx->port, false);
  3602. /* don't allow timer refresh */
  3603. if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
  3604. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3605. }
  3606. int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
  3607. unsigned long val)
  3608. {
  3609. struct net_bridge_mcast *brmctx;
  3610. unsigned long now = jiffies;
  3611. int err = -EINVAL;
  3612. bool del = false;
  3613. brmctx = br_multicast_port_ctx_get_global(pmctx);
  3614. spin_lock_bh(&brmctx->br->multicast_lock);
  3615. if (pmctx->multicast_router == val) {
  3616. /* Refresh the temp router port timer */
  3617. if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
  3618. mod_timer(&pmctx->ip4_mc_router_timer,
  3619. now + brmctx->multicast_querier_interval);
  3620. #if IS_ENABLED(CONFIG_IPV6)
  3621. mod_timer(&pmctx->ip6_mc_router_timer,
  3622. now + brmctx->multicast_querier_interval);
  3623. #endif
  3624. }
  3625. err = 0;
  3626. goto unlock;
  3627. }
  3628. switch (val) {
  3629. case MDB_RTR_TYPE_DISABLED:
  3630. pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
  3631. del |= br_ip4_multicast_rport_del(pmctx);
  3632. del_timer(&pmctx->ip4_mc_router_timer);
  3633. del |= br_ip6_multicast_rport_del(pmctx);
  3634. #if IS_ENABLED(CONFIG_IPV6)
  3635. del_timer(&pmctx->ip6_mc_router_timer);
  3636. #endif
  3637. br_multicast_rport_del_notify(pmctx, del);
  3638. break;
  3639. case MDB_RTR_TYPE_TEMP_QUERY:
  3640. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3641. del |= br_ip4_multicast_rport_del(pmctx);
  3642. del |= br_ip6_multicast_rport_del(pmctx);
  3643. br_multicast_rport_del_notify(pmctx, del);
  3644. break;
  3645. case MDB_RTR_TYPE_PERM:
  3646. pmctx->multicast_router = MDB_RTR_TYPE_PERM;
  3647. del_timer(&pmctx->ip4_mc_router_timer);
  3648. br_ip4_multicast_add_router(brmctx, pmctx);
  3649. #if IS_ENABLED(CONFIG_IPV6)
  3650. del_timer(&pmctx->ip6_mc_router_timer);
  3651. #endif
  3652. br_ip6_multicast_add_router(brmctx, pmctx);
  3653. break;
  3654. case MDB_RTR_TYPE_TEMP:
  3655. pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
  3656. br_ip4_multicast_mark_router(brmctx, pmctx);
  3657. br_ip6_multicast_mark_router(brmctx, pmctx);
  3658. break;
  3659. default:
  3660. goto unlock;
  3661. }
  3662. err = 0;
  3663. unlock:
  3664. spin_unlock_bh(&brmctx->br->multicast_lock);
  3665. return err;
  3666. }
  3667. int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
  3668. {
  3669. int err;
  3670. if (br_vlan_is_master(v))
  3671. err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
  3672. else
  3673. err = br_multicast_set_port_router(&v->port_mcast_ctx,
  3674. mcast_router);
  3675. return err;
  3676. }
  3677. static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
  3678. struct bridge_mcast_own_query *query)
  3679. {
  3680. struct net_bridge_port *port;
  3681. if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
  3682. return;
  3683. __br_multicast_open_query(brmctx->br, query);
  3684. rcu_read_lock();
  3685. list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
  3686. struct bridge_mcast_own_query *ip4_own_query;
  3687. #if IS_ENABLED(CONFIG_IPV6)
  3688. struct bridge_mcast_own_query *ip6_own_query;
  3689. #endif
  3690. if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
  3691. continue;
  3692. if (br_multicast_ctx_is_vlan(brmctx)) {
  3693. struct net_bridge_vlan *vlan;
  3694. vlan = br_vlan_find(nbp_vlan_group_rcu(port),
  3695. brmctx->vlan->vid);
  3696. if (!vlan ||
  3697. br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
  3698. continue;
  3699. ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
  3700. #if IS_ENABLED(CONFIG_IPV6)
  3701. ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
  3702. #endif
  3703. } else {
  3704. ip4_own_query = &port->multicast_ctx.ip4_own_query;
  3705. #if IS_ENABLED(CONFIG_IPV6)
  3706. ip6_own_query = &port->multicast_ctx.ip6_own_query;
  3707. #endif
  3708. }
  3709. if (query == &brmctx->ip4_own_query)
  3710. br_multicast_enable(ip4_own_query);
  3711. #if IS_ENABLED(CONFIG_IPV6)
  3712. else
  3713. br_multicast_enable(ip6_own_query);
  3714. #endif
  3715. }
  3716. rcu_read_unlock();
  3717. }
  3718. int br_multicast_toggle(struct net_bridge *br, unsigned long val,
  3719. struct netlink_ext_ack *extack)
  3720. {
  3721. struct net_bridge_port *port;
  3722. bool change_snoopers = false;
  3723. int err = 0;
  3724. spin_lock_bh(&br->multicast_lock);
  3725. if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
  3726. goto unlock;
  3727. err = br_mc_disabled_update(br->dev, val, extack);
  3728. if (err == -EOPNOTSUPP)
  3729. err = 0;
  3730. if (err)
  3731. goto unlock;
  3732. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
  3733. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
  3734. change_snoopers = true;
  3735. goto unlock;
  3736. }
  3737. if (!netif_running(br->dev))
  3738. goto unlock;
  3739. br_multicast_open(br);
  3740. list_for_each_entry(port, &br->port_list, list)
  3741. __br_multicast_enable_port_ctx(&port->multicast_ctx);
  3742. change_snoopers = true;
  3743. unlock:
  3744. spin_unlock_bh(&br->multicast_lock);
  3745. /* br_multicast_join_snoopers has the potential to cause
  3746. * an MLD Report/Leave to be delivered to br_multicast_rcv,
  3747. * which would in turn call br_multicast_add_group, which would
  3748. * attempt to acquire multicast_lock. This function should be
  3749. * called after the lock has been released to avoid deadlocks on
  3750. * multicast_lock.
  3751. *
  3752. * br_multicast_leave_snoopers does not have the problem since
  3753. * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
  3754. * returns without calling br_multicast_ipv4/6_rcv if it's not
  3755. * enabled. Moved both functions out just for symmetry.
  3756. */
  3757. if (change_snoopers) {
  3758. if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
  3759. br_multicast_join_snoopers(br);
  3760. else
  3761. br_multicast_leave_snoopers(br);
  3762. }
  3763. return err;
  3764. }
  3765. bool br_multicast_enabled(const struct net_device *dev)
  3766. {
  3767. struct net_bridge *br = netdev_priv(dev);
  3768. return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
  3769. }
  3770. EXPORT_SYMBOL_GPL(br_multicast_enabled);
  3771. bool br_multicast_router(const struct net_device *dev)
  3772. {
  3773. struct net_bridge *br = netdev_priv(dev);
  3774. bool is_router;
  3775. spin_lock_bh(&br->multicast_lock);
  3776. is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
  3777. spin_unlock_bh(&br->multicast_lock);
  3778. return is_router;
  3779. }
  3780. EXPORT_SYMBOL_GPL(br_multicast_router);
  3781. int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
  3782. {
  3783. unsigned long max_delay;
  3784. val = !!val;
  3785. spin_lock_bh(&brmctx->br->multicast_lock);
  3786. if (brmctx->multicast_querier == val)
  3787. goto unlock;
  3788. WRITE_ONCE(brmctx->multicast_querier, val);
  3789. if (!val)
  3790. goto unlock;
  3791. max_delay = brmctx->multicast_query_response_interval;
  3792. if (!timer_pending(&brmctx->ip4_other_query.timer))
  3793. brmctx->ip4_other_query.delay_time = jiffies + max_delay;
  3794. br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
  3795. #if IS_ENABLED(CONFIG_IPV6)
  3796. if (!timer_pending(&brmctx->ip6_other_query.timer))
  3797. brmctx->ip6_other_query.delay_time = jiffies + max_delay;
  3798. br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
  3799. #endif
  3800. unlock:
  3801. spin_unlock_bh(&brmctx->br->multicast_lock);
  3802. return 0;
  3803. }
  3804. int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
  3805. unsigned long val)
  3806. {
  3807. /* Currently we support only version 2 and 3 */
  3808. switch (val) {
  3809. case 2:
  3810. case 3:
  3811. break;
  3812. default:
  3813. return -EINVAL;
  3814. }
  3815. spin_lock_bh(&brmctx->br->multicast_lock);
  3816. brmctx->multicast_igmp_version = val;
  3817. spin_unlock_bh(&brmctx->br->multicast_lock);
  3818. return 0;
  3819. }
  3820. #if IS_ENABLED(CONFIG_IPV6)
  3821. int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
  3822. unsigned long val)
  3823. {
  3824. /* Currently we support version 1 and 2 */
  3825. switch (val) {
  3826. case 1:
  3827. case 2:
  3828. break;
  3829. default:
  3830. return -EINVAL;
  3831. }
  3832. spin_lock_bh(&brmctx->br->multicast_lock);
  3833. brmctx->multicast_mld_version = val;
  3834. spin_unlock_bh(&brmctx->br->multicast_lock);
  3835. return 0;
  3836. }
  3837. #endif
  3838. void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
  3839. unsigned long val)
  3840. {
  3841. unsigned long intvl_jiffies = clock_t_to_jiffies(val);
  3842. if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
  3843. br_info(brmctx->br,
  3844. "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
  3845. jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
  3846. jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
  3847. intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
  3848. }
  3849. brmctx->multicast_query_interval = intvl_jiffies;
  3850. }
  3851. void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
  3852. unsigned long val)
  3853. {
  3854. unsigned long intvl_jiffies = clock_t_to_jiffies(val);
  3855. if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
  3856. br_info(brmctx->br,
  3857. "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
  3858. jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
  3859. jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
  3860. intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
  3861. }
  3862. brmctx->multicast_startup_query_interval = intvl_jiffies;
  3863. }
  3864. /**
  3865. * br_multicast_list_adjacent - Returns snooped multicast addresses
  3866. * @dev: The bridge port adjacent to which to retrieve addresses
  3867. * @br_ip_list: The list to store found, snooped multicast IP addresses in
  3868. *
  3869. * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
  3870. * snooping feature on all bridge ports of dev's bridge device, excluding
  3871. * the addresses from dev itself.
  3872. *
  3873. * Returns the number of items added to br_ip_list.
  3874. *
  3875. * Notes:
  3876. * - br_ip_list needs to be initialized by caller
  3877. * - br_ip_list might contain duplicates in the end
  3878. * (needs to be taken care of by caller)
  3879. * - br_ip_list needs to be freed by caller
  3880. */
  3881. int br_multicast_list_adjacent(struct net_device *dev,
  3882. struct list_head *br_ip_list)
  3883. {
  3884. struct net_bridge *br;
  3885. struct net_bridge_port *port;
  3886. struct net_bridge_port_group *group;
  3887. struct br_ip_list *entry;
  3888. int count = 0;
  3889. rcu_read_lock();
  3890. if (!br_ip_list || !netif_is_bridge_port(dev))
  3891. goto unlock;
  3892. port = br_port_get_rcu(dev);
  3893. if (!port || !port->br)
  3894. goto unlock;
  3895. br = port->br;
  3896. list_for_each_entry_rcu(port, &br->port_list, list) {
  3897. if (!port->dev || port->dev == dev)
  3898. continue;
  3899. hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
  3900. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  3901. if (!entry)
  3902. goto unlock;
  3903. entry->addr = group->key.addr;
  3904. list_add(&entry->list, br_ip_list);
  3905. count++;
  3906. }
  3907. }
  3908. unlock:
  3909. rcu_read_unlock();
  3910. return count;
  3911. }
  3912. EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
  3913. /**
  3914. * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
  3915. * @dev: The bridge port providing the bridge on which to check for a querier
  3916. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  3917. *
  3918. * Checks whether the given interface has a bridge on top and if so returns
  3919. * true if a valid querier exists anywhere on the bridged link layer.
  3920. * Otherwise returns false.
  3921. */
  3922. bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
  3923. {
  3924. struct net_bridge *br;
  3925. struct net_bridge_port *port;
  3926. struct ethhdr eth;
  3927. bool ret = false;
  3928. rcu_read_lock();
  3929. if (!netif_is_bridge_port(dev))
  3930. goto unlock;
  3931. port = br_port_get_rcu(dev);
  3932. if (!port || !port->br)
  3933. goto unlock;
  3934. br = port->br;
  3935. memset(&eth, 0, sizeof(eth));
  3936. eth.h_proto = htons(proto);
  3937. ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
  3938. unlock:
  3939. rcu_read_unlock();
  3940. return ret;
  3941. }
  3942. EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
  3943. /**
  3944. * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
  3945. * @dev: The bridge port adjacent to which to check for a querier
  3946. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  3947. *
  3948. * Checks whether the given interface has a bridge on top and if so returns
  3949. * true if a selected querier is behind one of the other ports of this
  3950. * bridge. Otherwise returns false.
  3951. */
  3952. bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
  3953. {
  3954. struct net_bridge_mcast *brmctx;
  3955. struct net_bridge *br;
  3956. struct net_bridge_port *port;
  3957. bool ret = false;
  3958. int port_ifidx;
  3959. rcu_read_lock();
  3960. if (!netif_is_bridge_port(dev))
  3961. goto unlock;
  3962. port = br_port_get_rcu(dev);
  3963. if (!port || !port->br)
  3964. goto unlock;
  3965. br = port->br;
  3966. brmctx = &br->multicast_ctx;
  3967. switch (proto) {
  3968. case ETH_P_IP:
  3969. port_ifidx = brmctx->ip4_querier.port_ifidx;
  3970. if (!timer_pending(&brmctx->ip4_other_query.timer) ||
  3971. port_ifidx == port->dev->ifindex)
  3972. goto unlock;
  3973. break;
  3974. #if IS_ENABLED(CONFIG_IPV6)
  3975. case ETH_P_IPV6:
  3976. port_ifidx = brmctx->ip6_querier.port_ifidx;
  3977. if (!timer_pending(&brmctx->ip6_other_query.timer) ||
  3978. port_ifidx == port->dev->ifindex)
  3979. goto unlock;
  3980. break;
  3981. #endif
  3982. default:
  3983. goto unlock;
  3984. }
  3985. ret = true;
  3986. unlock:
  3987. rcu_read_unlock();
  3988. return ret;
  3989. }
  3990. EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
  3991. /**
  3992. * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
  3993. * @dev: The bridge port adjacent to which to check for a multicast router
  3994. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  3995. *
  3996. * Checks whether the given interface has a bridge on top and if so returns
  3997. * true if a multicast router is behind one of the other ports of this
  3998. * bridge. Otherwise returns false.
  3999. */
  4000. bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
  4001. {
  4002. struct net_bridge_mcast_port *pmctx;
  4003. struct net_bridge_mcast *brmctx;
  4004. struct net_bridge_port *port;
  4005. bool ret = false;
  4006. rcu_read_lock();
  4007. port = br_port_get_check_rcu(dev);
  4008. if (!port)
  4009. goto unlock;
  4010. brmctx = &port->br->multicast_ctx;
  4011. switch (proto) {
  4012. case ETH_P_IP:
  4013. hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
  4014. ip4_rlist) {
  4015. if (pmctx->port == port)
  4016. continue;
  4017. ret = true;
  4018. goto unlock;
  4019. }
  4020. break;
  4021. #if IS_ENABLED(CONFIG_IPV6)
  4022. case ETH_P_IPV6:
  4023. hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
  4024. ip6_rlist) {
  4025. if (pmctx->port == port)
  4026. continue;
  4027. ret = true;
  4028. goto unlock;
  4029. }
  4030. break;
  4031. #endif
  4032. default:
  4033. /* when compiled without IPv6 support, be conservative and
  4034. * always assume presence of an IPv6 multicast router
  4035. */
  4036. ret = true;
  4037. }
  4038. unlock:
  4039. rcu_read_unlock();
  4040. return ret;
  4041. }
  4042. EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
  4043. static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
  4044. const struct sk_buff *skb, u8 type, u8 dir)
  4045. {
  4046. struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
  4047. __be16 proto = skb->protocol;
  4048. unsigned int t_len;
  4049. u64_stats_update_begin(&pstats->syncp);
  4050. switch (proto) {
  4051. case htons(ETH_P_IP):
  4052. t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
  4053. switch (type) {
  4054. case IGMP_HOST_MEMBERSHIP_REPORT:
  4055. pstats->mstats.igmp_v1reports[dir]++;
  4056. break;
  4057. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  4058. pstats->mstats.igmp_v2reports[dir]++;
  4059. break;
  4060. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  4061. pstats->mstats.igmp_v3reports[dir]++;
  4062. break;
  4063. case IGMP_HOST_MEMBERSHIP_QUERY:
  4064. if (t_len != sizeof(struct igmphdr)) {
  4065. pstats->mstats.igmp_v3queries[dir]++;
  4066. } else {
  4067. unsigned int offset = skb_transport_offset(skb);
  4068. struct igmphdr *ih, _ihdr;
  4069. ih = skb_header_pointer(skb, offset,
  4070. sizeof(_ihdr), &_ihdr);
  4071. if (!ih)
  4072. break;
  4073. if (!ih->code)
  4074. pstats->mstats.igmp_v1queries[dir]++;
  4075. else
  4076. pstats->mstats.igmp_v2queries[dir]++;
  4077. }
  4078. break;
  4079. case IGMP_HOST_LEAVE_MESSAGE:
  4080. pstats->mstats.igmp_leaves[dir]++;
  4081. break;
  4082. }
  4083. break;
  4084. #if IS_ENABLED(CONFIG_IPV6)
  4085. case htons(ETH_P_IPV6):
  4086. t_len = ntohs(ipv6_hdr(skb)->payload_len) +
  4087. sizeof(struct ipv6hdr);
  4088. t_len -= skb_network_header_len(skb);
  4089. switch (type) {
  4090. case ICMPV6_MGM_REPORT:
  4091. pstats->mstats.mld_v1reports[dir]++;
  4092. break;
  4093. case ICMPV6_MLD2_REPORT:
  4094. pstats->mstats.mld_v2reports[dir]++;
  4095. break;
  4096. case ICMPV6_MGM_QUERY:
  4097. if (t_len != sizeof(struct mld_msg))
  4098. pstats->mstats.mld_v2queries[dir]++;
  4099. else
  4100. pstats->mstats.mld_v1queries[dir]++;
  4101. break;
  4102. case ICMPV6_MGM_REDUCTION:
  4103. pstats->mstats.mld_leaves[dir]++;
  4104. break;
  4105. }
  4106. break;
  4107. #endif /* CONFIG_IPV6 */
  4108. }
  4109. u64_stats_update_end(&pstats->syncp);
  4110. }
  4111. void br_multicast_count(struct net_bridge *br,
  4112. const struct net_bridge_port *p,
  4113. const struct sk_buff *skb, u8 type, u8 dir)
  4114. {
  4115. struct bridge_mcast_stats __percpu *stats;
  4116. /* if multicast_disabled is true then igmp type can't be set */
  4117. if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
  4118. return;
  4119. if (p)
  4120. stats = p->mcast_stats;
  4121. else
  4122. stats = br->mcast_stats;
  4123. if (WARN_ON(!stats))
  4124. return;
  4125. br_mcast_stats_add(stats, skb, type, dir);
  4126. }
  4127. int br_multicast_init_stats(struct net_bridge *br)
  4128. {
  4129. br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
  4130. if (!br->mcast_stats)
  4131. return -ENOMEM;
  4132. return 0;
  4133. }
  4134. void br_multicast_uninit_stats(struct net_bridge *br)
  4135. {
  4136. free_percpu(br->mcast_stats);
  4137. }
  4138. /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
  4139. static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
  4140. {
  4141. dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
  4142. dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
  4143. }
  4144. void br_multicast_get_stats(const struct net_bridge *br,
  4145. const struct net_bridge_port *p,
  4146. struct br_mcast_stats *dest)
  4147. {
  4148. struct bridge_mcast_stats __percpu *stats;
  4149. struct br_mcast_stats tdst;
  4150. int i;
  4151. memset(dest, 0, sizeof(*dest));
  4152. if (p)
  4153. stats = p->mcast_stats;
  4154. else
  4155. stats = br->mcast_stats;
  4156. if (WARN_ON(!stats))
  4157. return;
  4158. memset(&tdst, 0, sizeof(tdst));
  4159. for_each_possible_cpu(i) {
  4160. struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
  4161. struct br_mcast_stats temp;
  4162. unsigned int start;
  4163. do {
  4164. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  4165. memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
  4166. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  4167. mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
  4168. mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
  4169. mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
  4170. mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
  4171. mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
  4172. mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
  4173. mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
  4174. tdst.igmp_parse_errors += temp.igmp_parse_errors;
  4175. mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
  4176. mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
  4177. mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
  4178. mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
  4179. mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
  4180. tdst.mld_parse_errors += temp.mld_parse_errors;
  4181. }
  4182. memcpy(dest, &tdst, sizeof(*dest));
  4183. }
  4184. int br_mdb_hash_init(struct net_bridge *br)
  4185. {
  4186. int err;
  4187. err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
  4188. if (err)
  4189. return err;
  4190. err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
  4191. if (err) {
  4192. rhashtable_destroy(&br->sg_port_tbl);
  4193. return err;
  4194. }
  4195. return 0;
  4196. }
  4197. void br_mdb_hash_fini(struct net_bridge *br)
  4198. {
  4199. rhashtable_destroy(&br->sg_port_tbl);
  4200. rhashtable_destroy(&br->mdb_hash_tbl);
  4201. }