amt.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Copyright (c) 2021 Taehee Yoo <[email protected]> */
  3. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  4. #include <linux/module.h>
  5. #include <linux/skbuff.h>
  6. #include <linux/udp.h>
  7. #include <linux/jhash.h>
  8. #include <linux/if_tunnel.h>
  9. #include <linux/net.h>
  10. #include <linux/igmp.h>
  11. #include <linux/workqueue.h>
  12. #include <net/sch_generic.h>
  13. #include <net/net_namespace.h>
  14. #include <net/ip.h>
  15. #include <net/udp.h>
  16. #include <net/udp_tunnel.h>
  17. #include <net/icmp.h>
  18. #include <net/mld.h>
  19. #include <net/amt.h>
  20. #include <uapi/linux/amt.h>
  21. #include <linux/security.h>
  22. #include <net/gro_cells.h>
  23. #include <net/ipv6.h>
  24. #include <net/if_inet6.h>
  25. #include <net/ndisc.h>
  26. #include <net/addrconf.h>
  27. #include <net/ip6_route.h>
  28. #include <net/inet_common.h>
  29. #include <net/ip6_checksum.h>
  30. static struct workqueue_struct *amt_wq;
  31. static HLIST_HEAD(source_gc_list);
  32. /* Lock for source_gc_list */
  33. static spinlock_t source_gc_lock;
  34. static struct delayed_work source_gc_wq;
  35. static char *status_str[] = {
  36. "AMT_STATUS_INIT",
  37. "AMT_STATUS_SENT_DISCOVERY",
  38. "AMT_STATUS_RECEIVED_DISCOVERY",
  39. "AMT_STATUS_SENT_ADVERTISEMENT",
  40. "AMT_STATUS_RECEIVED_ADVERTISEMENT",
  41. "AMT_STATUS_SENT_REQUEST",
  42. "AMT_STATUS_RECEIVED_REQUEST",
  43. "AMT_STATUS_SENT_QUERY",
  44. "AMT_STATUS_RECEIVED_QUERY",
  45. "AMT_STATUS_SENT_UPDATE",
  46. "AMT_STATUS_RECEIVED_UPDATE",
  47. };
  48. static char *type_str[] = {
  49. "", /* Type 0 is not defined */
  50. "AMT_MSG_DISCOVERY",
  51. "AMT_MSG_ADVERTISEMENT",
  52. "AMT_MSG_REQUEST",
  53. "AMT_MSG_MEMBERSHIP_QUERY",
  54. "AMT_MSG_MEMBERSHIP_UPDATE",
  55. "AMT_MSG_MULTICAST_DATA",
  56. "AMT_MSG_TEARDOWN",
  57. };
  58. static char *action_str[] = {
  59. "AMT_ACT_GMI",
  60. "AMT_ACT_GMI_ZERO",
  61. "AMT_ACT_GT",
  62. "AMT_ACT_STATUS_FWD_NEW",
  63. "AMT_ACT_STATUS_D_FWD_NEW",
  64. "AMT_ACT_STATUS_NONE_NEW",
  65. };
  66. static struct igmpv3_grec igmpv3_zero_grec;
  67. #if IS_ENABLED(CONFIG_IPV6)
  68. #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
  69. static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
  70. static struct mld2_grec mldv2_zero_grec;
  71. #endif
  72. static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
  73. {
  74. BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
  75. sizeof_field(struct sk_buff, cb));
  76. return (struct amt_skb_cb *)((void *)skb->cb +
  77. sizeof(struct qdisc_skb_cb));
  78. }
  79. static void __amt_source_gc_work(void)
  80. {
  81. struct amt_source_node *snode;
  82. struct hlist_head gc_list;
  83. struct hlist_node *t;
  84. spin_lock_bh(&source_gc_lock);
  85. hlist_move_list(&source_gc_list, &gc_list);
  86. spin_unlock_bh(&source_gc_lock);
  87. hlist_for_each_entry_safe(snode, t, &gc_list, node) {
  88. hlist_del_rcu(&snode->node);
  89. kfree_rcu(snode, rcu);
  90. }
  91. }
  92. static void amt_source_gc_work(struct work_struct *work)
  93. {
  94. __amt_source_gc_work();
  95. spin_lock_bh(&source_gc_lock);
  96. mod_delayed_work(amt_wq, &source_gc_wq,
  97. msecs_to_jiffies(AMT_GC_INTERVAL));
  98. spin_unlock_bh(&source_gc_lock);
  99. }
  100. static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
  101. {
  102. return !memcmp(a, b, sizeof(union amt_addr));
  103. }
  104. static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
  105. {
  106. u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
  107. return reciprocal_scale(hash, tunnel->amt->hash_buckets);
  108. }
  109. static bool amt_status_filter(struct amt_source_node *snode,
  110. enum amt_filter filter)
  111. {
  112. bool rc = false;
  113. switch (filter) {
  114. case AMT_FILTER_FWD:
  115. if (snode->status == AMT_SOURCE_STATUS_FWD &&
  116. snode->flags == AMT_SOURCE_OLD)
  117. rc = true;
  118. break;
  119. case AMT_FILTER_D_FWD:
  120. if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
  121. snode->flags == AMT_SOURCE_OLD)
  122. rc = true;
  123. break;
  124. case AMT_FILTER_FWD_NEW:
  125. if (snode->status == AMT_SOURCE_STATUS_FWD &&
  126. snode->flags == AMT_SOURCE_NEW)
  127. rc = true;
  128. break;
  129. case AMT_FILTER_D_FWD_NEW:
  130. if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
  131. snode->flags == AMT_SOURCE_NEW)
  132. rc = true;
  133. break;
  134. case AMT_FILTER_ALL:
  135. rc = true;
  136. break;
  137. case AMT_FILTER_NONE_NEW:
  138. if (snode->status == AMT_SOURCE_STATUS_NONE &&
  139. snode->flags == AMT_SOURCE_NEW)
  140. rc = true;
  141. break;
  142. case AMT_FILTER_BOTH:
  143. if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
  144. snode->status == AMT_SOURCE_STATUS_FWD) &&
  145. snode->flags == AMT_SOURCE_OLD)
  146. rc = true;
  147. break;
  148. case AMT_FILTER_BOTH_NEW:
  149. if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
  150. snode->status == AMT_SOURCE_STATUS_FWD) &&
  151. snode->flags == AMT_SOURCE_NEW)
  152. rc = true;
  153. break;
  154. default:
  155. WARN_ON_ONCE(1);
  156. break;
  157. }
  158. return rc;
  159. }
  160. static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
  161. struct amt_group_node *gnode,
  162. enum amt_filter filter,
  163. union amt_addr *src)
  164. {
  165. u32 hash = amt_source_hash(tunnel, src);
  166. struct amt_source_node *snode;
  167. hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
  168. if (amt_status_filter(snode, filter) &&
  169. amt_addr_equal(&snode->source_addr, src))
  170. return snode;
  171. return NULL;
  172. }
  173. static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
  174. {
  175. u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
  176. return reciprocal_scale(hash, tunnel->amt->hash_buckets);
  177. }
  178. static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
  179. union amt_addr *group,
  180. union amt_addr *host,
  181. bool v6)
  182. {
  183. u32 hash = amt_group_hash(tunnel, group);
  184. struct amt_group_node *gnode;
  185. hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
  186. if (amt_addr_equal(&gnode->group_addr, group) &&
  187. amt_addr_equal(&gnode->host_addr, host) &&
  188. gnode->v6 == v6)
  189. return gnode;
  190. }
  191. return NULL;
  192. }
  193. static void amt_destroy_source(struct amt_source_node *snode)
  194. {
  195. struct amt_group_node *gnode = snode->gnode;
  196. struct amt_tunnel_list *tunnel;
  197. tunnel = gnode->tunnel_list;
  198. if (!gnode->v6) {
  199. netdev_dbg(snode->gnode->amt->dev,
  200. "Delete source %pI4 from %pI4\n",
  201. &snode->source_addr.ip4,
  202. &gnode->group_addr.ip4);
  203. #if IS_ENABLED(CONFIG_IPV6)
  204. } else {
  205. netdev_dbg(snode->gnode->amt->dev,
  206. "Delete source %pI6 from %pI6\n",
  207. &snode->source_addr.ip6,
  208. &gnode->group_addr.ip6);
  209. #endif
  210. }
  211. cancel_delayed_work(&snode->source_timer);
  212. hlist_del_init_rcu(&snode->node);
  213. tunnel->nr_sources--;
  214. gnode->nr_sources--;
  215. spin_lock_bh(&source_gc_lock);
  216. hlist_add_head_rcu(&snode->node, &source_gc_list);
  217. spin_unlock_bh(&source_gc_lock);
  218. }
  219. static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
  220. {
  221. struct amt_source_node *snode;
  222. struct hlist_node *t;
  223. int i;
  224. if (cancel_delayed_work(&gnode->group_timer))
  225. dev_put(amt->dev);
  226. hlist_del_rcu(&gnode->node);
  227. gnode->tunnel_list->nr_groups--;
  228. if (!gnode->v6)
  229. netdev_dbg(amt->dev, "Leave group %pI4\n",
  230. &gnode->group_addr.ip4);
  231. #if IS_ENABLED(CONFIG_IPV6)
  232. else
  233. netdev_dbg(amt->dev, "Leave group %pI6\n",
  234. &gnode->group_addr.ip6);
  235. #endif
  236. for (i = 0; i < amt->hash_buckets; i++)
  237. hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
  238. amt_destroy_source(snode);
  239. /* tunnel->lock was acquired outside of amt_del_group()
  240. * But rcu_read_lock() was acquired too so It's safe.
  241. */
  242. kfree_rcu(gnode, rcu);
  243. }
  244. /* If a source timer expires with a router filter-mode for the group of
  245. * INCLUDE, the router concludes that traffic from this particular
  246. * source is no longer desired on the attached network, and deletes the
  247. * associated source record.
  248. */
  249. static void amt_source_work(struct work_struct *work)
  250. {
  251. struct amt_source_node *snode = container_of(to_delayed_work(work),
  252. struct amt_source_node,
  253. source_timer);
  254. struct amt_group_node *gnode = snode->gnode;
  255. struct amt_dev *amt = gnode->amt;
  256. struct amt_tunnel_list *tunnel;
  257. tunnel = gnode->tunnel_list;
  258. spin_lock_bh(&tunnel->lock);
  259. rcu_read_lock();
  260. if (gnode->filter_mode == MCAST_INCLUDE) {
  261. amt_destroy_source(snode);
  262. if (!gnode->nr_sources)
  263. amt_del_group(amt, gnode);
  264. } else {
  265. /* When a router filter-mode for a group is EXCLUDE,
  266. * source records are only deleted when the group timer expires
  267. */
  268. snode->status = AMT_SOURCE_STATUS_D_FWD;
  269. }
  270. rcu_read_unlock();
  271. spin_unlock_bh(&tunnel->lock);
  272. }
  273. static void amt_act_src(struct amt_tunnel_list *tunnel,
  274. struct amt_group_node *gnode,
  275. struct amt_source_node *snode,
  276. enum amt_act act)
  277. {
  278. struct amt_dev *amt = tunnel->amt;
  279. switch (act) {
  280. case AMT_ACT_GMI:
  281. mod_delayed_work(amt_wq, &snode->source_timer,
  282. msecs_to_jiffies(amt_gmi(amt)));
  283. break;
  284. case AMT_ACT_GMI_ZERO:
  285. cancel_delayed_work(&snode->source_timer);
  286. break;
  287. case AMT_ACT_GT:
  288. mod_delayed_work(amt_wq, &snode->source_timer,
  289. gnode->group_timer.timer.expires);
  290. break;
  291. case AMT_ACT_STATUS_FWD_NEW:
  292. snode->status = AMT_SOURCE_STATUS_FWD;
  293. snode->flags = AMT_SOURCE_NEW;
  294. break;
  295. case AMT_ACT_STATUS_D_FWD_NEW:
  296. snode->status = AMT_SOURCE_STATUS_D_FWD;
  297. snode->flags = AMT_SOURCE_NEW;
  298. break;
  299. case AMT_ACT_STATUS_NONE_NEW:
  300. cancel_delayed_work(&snode->source_timer);
  301. snode->status = AMT_SOURCE_STATUS_NONE;
  302. snode->flags = AMT_SOURCE_NEW;
  303. break;
  304. default:
  305. WARN_ON_ONCE(1);
  306. return;
  307. }
  308. if (!gnode->v6)
  309. netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
  310. &snode->source_addr.ip4,
  311. &gnode->group_addr.ip4,
  312. action_str[act]);
  313. #if IS_ENABLED(CONFIG_IPV6)
  314. else
  315. netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
  316. &snode->source_addr.ip6,
  317. &gnode->group_addr.ip6,
  318. action_str[act]);
  319. #endif
  320. }
  321. static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
  322. union amt_addr *src)
  323. {
  324. struct amt_source_node *snode;
  325. snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
  326. if (!snode)
  327. return NULL;
  328. memcpy(&snode->source_addr, src, sizeof(union amt_addr));
  329. snode->gnode = gnode;
  330. snode->status = AMT_SOURCE_STATUS_NONE;
  331. snode->flags = AMT_SOURCE_NEW;
  332. INIT_HLIST_NODE(&snode->node);
  333. INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
  334. return snode;
  335. }
  336. /* RFC 3810 - 7.2.2. Definition of Filter Timers
  337. *
  338. * Router Mode Filter Timer Actions/Comments
  339. * ----------- ----------------- ----------------
  340. *
  341. * INCLUDE Not Used All listeners in
  342. * INCLUDE mode.
  343. *
  344. * EXCLUDE Timer > 0 At least one listener
  345. * in EXCLUDE mode.
  346. *
  347. * EXCLUDE Timer == 0 No more listeners in
  348. * EXCLUDE mode for the
  349. * multicast address.
  350. * If the Requested List
  351. * is empty, delete
  352. * Multicast Address
  353. * Record. If not, switch
  354. * to INCLUDE filter mode;
  355. * the sources in the
  356. * Requested List are
  357. * moved to the Include
  358. * List, and the Exclude
  359. * List is deleted.
  360. */
  361. static void amt_group_work(struct work_struct *work)
  362. {
  363. struct amt_group_node *gnode = container_of(to_delayed_work(work),
  364. struct amt_group_node,
  365. group_timer);
  366. struct amt_tunnel_list *tunnel = gnode->tunnel_list;
  367. struct amt_dev *amt = gnode->amt;
  368. struct amt_source_node *snode;
  369. bool delete_group = true;
  370. struct hlist_node *t;
  371. int i, buckets;
  372. buckets = amt->hash_buckets;
  373. spin_lock_bh(&tunnel->lock);
  374. if (gnode->filter_mode == MCAST_INCLUDE) {
  375. /* Not Used */
  376. spin_unlock_bh(&tunnel->lock);
  377. goto out;
  378. }
  379. rcu_read_lock();
  380. for (i = 0; i < buckets; i++) {
  381. hlist_for_each_entry_safe(snode, t,
  382. &gnode->sources[i], node) {
  383. if (!delayed_work_pending(&snode->source_timer) ||
  384. snode->status == AMT_SOURCE_STATUS_D_FWD) {
  385. amt_destroy_source(snode);
  386. } else {
  387. delete_group = false;
  388. snode->status = AMT_SOURCE_STATUS_FWD;
  389. }
  390. }
  391. }
  392. if (delete_group)
  393. amt_del_group(amt, gnode);
  394. else
  395. gnode->filter_mode = MCAST_INCLUDE;
  396. rcu_read_unlock();
  397. spin_unlock_bh(&tunnel->lock);
  398. out:
  399. dev_put(amt->dev);
  400. }
  401. /* Non-existent group is created as INCLUDE {empty}:
  402. *
  403. * RFC 3376 - 5.1. Action on Change of Interface State
  404. *
  405. * If no interface state existed for that multicast address before
  406. * the change (i.e., the change consisted of creating a new
  407. * per-interface record), or if no state exists after the change
  408. * (i.e., the change consisted of deleting a per-interface record),
  409. * then the "non-existent" state is considered to have a filter mode
  410. * of INCLUDE and an empty source list.
  411. */
  412. static struct amt_group_node *amt_add_group(struct amt_dev *amt,
  413. struct amt_tunnel_list *tunnel,
  414. union amt_addr *group,
  415. union amt_addr *host,
  416. bool v6)
  417. {
  418. struct amt_group_node *gnode;
  419. u32 hash;
  420. int i;
  421. if (tunnel->nr_groups >= amt->max_groups)
  422. return ERR_PTR(-ENOSPC);
  423. gnode = kzalloc(sizeof(*gnode) +
  424. (sizeof(struct hlist_head) * amt->hash_buckets),
  425. GFP_ATOMIC);
  426. if (unlikely(!gnode))
  427. return ERR_PTR(-ENOMEM);
  428. gnode->amt = amt;
  429. gnode->group_addr = *group;
  430. gnode->host_addr = *host;
  431. gnode->v6 = v6;
  432. gnode->tunnel_list = tunnel;
  433. gnode->filter_mode = MCAST_INCLUDE;
  434. INIT_HLIST_NODE(&gnode->node);
  435. INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
  436. for (i = 0; i < amt->hash_buckets; i++)
  437. INIT_HLIST_HEAD(&gnode->sources[i]);
  438. hash = amt_group_hash(tunnel, group);
  439. hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
  440. tunnel->nr_groups++;
  441. if (!gnode->v6)
  442. netdev_dbg(amt->dev, "Join group %pI4\n",
  443. &gnode->group_addr.ip4);
  444. #if IS_ENABLED(CONFIG_IPV6)
  445. else
  446. netdev_dbg(amt->dev, "Join group %pI6\n",
  447. &gnode->group_addr.ip6);
  448. #endif
  449. return gnode;
  450. }
  451. static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
  452. {
  453. u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
  454. int hlen = LL_RESERVED_SPACE(amt->dev);
  455. int tlen = amt->dev->needed_tailroom;
  456. struct igmpv3_query *ihv3;
  457. void *csum_start = NULL;
  458. __sum16 *csum = NULL;
  459. struct sk_buff *skb;
  460. struct ethhdr *eth;
  461. struct iphdr *iph;
  462. unsigned int len;
  463. int offset;
  464. len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
  465. skb = netdev_alloc_skb_ip_align(amt->dev, len);
  466. if (!skb)
  467. return NULL;
  468. skb_reserve(skb, hlen);
  469. skb_push(skb, sizeof(*eth));
  470. skb->protocol = htons(ETH_P_IP);
  471. skb_reset_mac_header(skb);
  472. skb->priority = TC_PRIO_CONTROL;
  473. skb_put(skb, sizeof(*iph));
  474. skb_put_data(skb, ra, sizeof(ra));
  475. skb_put(skb, sizeof(*ihv3));
  476. skb_pull(skb, sizeof(*eth));
  477. skb_reset_network_header(skb);
  478. iph = ip_hdr(skb);
  479. iph->version = 4;
  480. iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
  481. iph->tos = AMT_TOS;
  482. iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
  483. iph->frag_off = htons(IP_DF);
  484. iph->ttl = 1;
  485. iph->id = 0;
  486. iph->protocol = IPPROTO_IGMP;
  487. iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
  488. iph->saddr = htonl(INADDR_ANY);
  489. ip_send_check(iph);
  490. eth = eth_hdr(skb);
  491. ether_addr_copy(eth->h_source, amt->dev->dev_addr);
  492. ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
  493. eth->h_proto = htons(ETH_P_IP);
  494. ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
  495. skb_reset_transport_header(skb);
  496. ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
  497. ihv3->code = 1;
  498. ihv3->group = 0;
  499. ihv3->qqic = amt->qi;
  500. ihv3->nsrcs = 0;
  501. ihv3->resv = 0;
  502. ihv3->suppress = false;
  503. ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
  504. ihv3->csum = 0;
  505. csum = &ihv3->csum;
  506. csum_start = (void *)ihv3;
  507. *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
  508. offset = skb_transport_offset(skb);
  509. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  510. skb->ip_summed = CHECKSUM_NONE;
  511. skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
  512. return skb;
  513. }
  514. static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
  515. bool validate)
  516. {
  517. if (validate && amt->status >= status)
  518. return;
  519. netdev_dbg(amt->dev, "Update GW status %s -> %s",
  520. status_str[amt->status], status_str[status]);
  521. WRITE_ONCE(amt->status, status);
  522. }
  523. static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
  524. enum amt_status status,
  525. bool validate)
  526. {
  527. if (validate && tunnel->status >= status)
  528. return;
  529. netdev_dbg(tunnel->amt->dev,
  530. "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
  531. &tunnel->ip4, ntohs(tunnel->source_port),
  532. status_str[tunnel->status], status_str[status]);
  533. tunnel->status = status;
  534. }
  535. static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
  536. enum amt_status status, bool validate)
  537. {
  538. spin_lock_bh(&tunnel->lock);
  539. __amt_update_relay_status(tunnel, status, validate);
  540. spin_unlock_bh(&tunnel->lock);
  541. }
  542. static void amt_send_discovery(struct amt_dev *amt)
  543. {
  544. struct amt_header_discovery *amtd;
  545. int hlen, tlen, offset;
  546. struct socket *sock;
  547. struct udphdr *udph;
  548. struct sk_buff *skb;
  549. struct iphdr *iph;
  550. struct rtable *rt;
  551. struct flowi4 fl4;
  552. u32 len;
  553. int err;
  554. rcu_read_lock();
  555. sock = rcu_dereference(amt->sock);
  556. if (!sock)
  557. goto out;
  558. if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
  559. goto out;
  560. rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
  561. amt->discovery_ip, amt->local_ip,
  562. amt->gw_port, amt->relay_port,
  563. IPPROTO_UDP, 0,
  564. amt->stream_dev->ifindex);
  565. if (IS_ERR(rt)) {
  566. amt->dev->stats.tx_errors++;
  567. goto out;
  568. }
  569. hlen = LL_RESERVED_SPACE(amt->dev);
  570. tlen = amt->dev->needed_tailroom;
  571. len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
  572. skb = netdev_alloc_skb_ip_align(amt->dev, len);
  573. if (!skb) {
  574. ip_rt_put(rt);
  575. amt->dev->stats.tx_errors++;
  576. goto out;
  577. }
  578. skb->priority = TC_PRIO_CONTROL;
  579. skb_dst_set(skb, &rt->dst);
  580. len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
  581. skb_reset_network_header(skb);
  582. skb_put(skb, len);
  583. amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
  584. amtd->version = 0;
  585. amtd->type = AMT_MSG_DISCOVERY;
  586. amtd->reserved = 0;
  587. amtd->nonce = amt->nonce;
  588. skb_push(skb, sizeof(*udph));
  589. skb_reset_transport_header(skb);
  590. udph = udp_hdr(skb);
  591. udph->source = amt->gw_port;
  592. udph->dest = amt->relay_port;
  593. udph->len = htons(sizeof(*udph) + sizeof(*amtd));
  594. udph->check = 0;
  595. offset = skb_transport_offset(skb);
  596. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  597. udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
  598. sizeof(*udph) + sizeof(*amtd),
  599. IPPROTO_UDP, skb->csum);
  600. skb_push(skb, sizeof(*iph));
  601. iph = ip_hdr(skb);
  602. iph->version = 4;
  603. iph->ihl = (sizeof(struct iphdr)) >> 2;
  604. iph->tos = AMT_TOS;
  605. iph->frag_off = 0;
  606. iph->ttl = ip4_dst_hoplimit(&rt->dst);
  607. iph->daddr = amt->discovery_ip;
  608. iph->saddr = amt->local_ip;
  609. iph->protocol = IPPROTO_UDP;
  610. iph->tot_len = htons(len);
  611. skb->ip_summed = CHECKSUM_NONE;
  612. ip_select_ident(amt->net, skb, NULL);
  613. ip_send_check(iph);
  614. err = ip_local_out(amt->net, sock->sk, skb);
  615. if (unlikely(net_xmit_eval(err)))
  616. amt->dev->stats.tx_errors++;
  617. amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
  618. out:
  619. rcu_read_unlock();
  620. }
  621. static void amt_send_request(struct amt_dev *amt, bool v6)
  622. {
  623. struct amt_header_request *amtrh;
  624. int hlen, tlen, offset;
  625. struct socket *sock;
  626. struct udphdr *udph;
  627. struct sk_buff *skb;
  628. struct iphdr *iph;
  629. struct rtable *rt;
  630. struct flowi4 fl4;
  631. u32 len;
  632. int err;
  633. rcu_read_lock();
  634. sock = rcu_dereference(amt->sock);
  635. if (!sock)
  636. goto out;
  637. if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
  638. goto out;
  639. rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
  640. amt->remote_ip, amt->local_ip,
  641. amt->gw_port, amt->relay_port,
  642. IPPROTO_UDP, 0,
  643. amt->stream_dev->ifindex);
  644. if (IS_ERR(rt)) {
  645. amt->dev->stats.tx_errors++;
  646. goto out;
  647. }
  648. hlen = LL_RESERVED_SPACE(amt->dev);
  649. tlen = amt->dev->needed_tailroom;
  650. len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
  651. skb = netdev_alloc_skb_ip_align(amt->dev, len);
  652. if (!skb) {
  653. ip_rt_put(rt);
  654. amt->dev->stats.tx_errors++;
  655. goto out;
  656. }
  657. skb->priority = TC_PRIO_CONTROL;
  658. skb_dst_set(skb, &rt->dst);
  659. len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
  660. skb_reset_network_header(skb);
  661. skb_put(skb, len);
  662. amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
  663. amtrh->version = 0;
  664. amtrh->type = AMT_MSG_REQUEST;
  665. amtrh->reserved1 = 0;
  666. amtrh->p = v6;
  667. amtrh->reserved2 = 0;
  668. amtrh->nonce = amt->nonce;
  669. skb_push(skb, sizeof(*udph));
  670. skb_reset_transport_header(skb);
  671. udph = udp_hdr(skb);
  672. udph->source = amt->gw_port;
  673. udph->dest = amt->relay_port;
  674. udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
  675. udph->check = 0;
  676. offset = skb_transport_offset(skb);
  677. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  678. udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
  679. sizeof(*udph) + sizeof(*amtrh),
  680. IPPROTO_UDP, skb->csum);
  681. skb_push(skb, sizeof(*iph));
  682. iph = ip_hdr(skb);
  683. iph->version = 4;
  684. iph->ihl = (sizeof(struct iphdr)) >> 2;
  685. iph->tos = AMT_TOS;
  686. iph->frag_off = 0;
  687. iph->ttl = ip4_dst_hoplimit(&rt->dst);
  688. iph->daddr = amt->remote_ip;
  689. iph->saddr = amt->local_ip;
  690. iph->protocol = IPPROTO_UDP;
  691. iph->tot_len = htons(len);
  692. skb->ip_summed = CHECKSUM_NONE;
  693. ip_select_ident(amt->net, skb, NULL);
  694. ip_send_check(iph);
  695. err = ip_local_out(amt->net, sock->sk, skb);
  696. if (unlikely(net_xmit_eval(err)))
  697. amt->dev->stats.tx_errors++;
  698. out:
  699. rcu_read_unlock();
  700. }
  701. static void amt_send_igmp_gq(struct amt_dev *amt,
  702. struct amt_tunnel_list *tunnel)
  703. {
  704. struct sk_buff *skb;
  705. skb = amt_build_igmp_gq(amt);
  706. if (!skb)
  707. return;
  708. amt_skb_cb(skb)->tunnel = tunnel;
  709. dev_queue_xmit(skb);
  710. }
  711. #if IS_ENABLED(CONFIG_IPV6)
  712. static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
  713. {
  714. u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
  715. 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
  716. int hlen = LL_RESERVED_SPACE(amt->dev);
  717. int tlen = amt->dev->needed_tailroom;
  718. struct mld2_query *mld2q;
  719. void *csum_start = NULL;
  720. struct ipv6hdr *ip6h;
  721. struct sk_buff *skb;
  722. struct ethhdr *eth;
  723. u32 len;
  724. len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
  725. skb = netdev_alloc_skb_ip_align(amt->dev, len);
  726. if (!skb)
  727. return NULL;
  728. skb_reserve(skb, hlen);
  729. skb_push(skb, sizeof(*eth));
  730. skb_reset_mac_header(skb);
  731. eth = eth_hdr(skb);
  732. skb->priority = TC_PRIO_CONTROL;
  733. skb->protocol = htons(ETH_P_IPV6);
  734. skb_put_zero(skb, sizeof(*ip6h));
  735. skb_put_data(skb, ra, sizeof(ra));
  736. skb_put_zero(skb, sizeof(*mld2q));
  737. skb_pull(skb, sizeof(*eth));
  738. skb_reset_network_header(skb);
  739. ip6h = ipv6_hdr(skb);
  740. ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
  741. ip6h->nexthdr = NEXTHDR_HOP;
  742. ip6h->hop_limit = 1;
  743. ip6h->daddr = mld2_all_node;
  744. ip6_flow_hdr(ip6h, 0, 0);
  745. if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
  746. &ip6h->saddr)) {
  747. amt->dev->stats.tx_errors++;
  748. kfree_skb(skb);
  749. return NULL;
  750. }
  751. eth->h_proto = htons(ETH_P_IPV6);
  752. ether_addr_copy(eth->h_source, amt->dev->dev_addr);
  753. ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
  754. skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
  755. skb_reset_transport_header(skb);
  756. mld2q = (struct mld2_query *)icmp6_hdr(skb);
  757. mld2q->mld2q_mrc = htons(1);
  758. mld2q->mld2q_type = ICMPV6_MGM_QUERY;
  759. mld2q->mld2q_code = 0;
  760. mld2q->mld2q_cksum = 0;
  761. mld2q->mld2q_resv1 = 0;
  762. mld2q->mld2q_resv2 = 0;
  763. mld2q->mld2q_suppress = 0;
  764. mld2q->mld2q_qrv = amt->qrv;
  765. mld2q->mld2q_nsrcs = 0;
  766. mld2q->mld2q_qqic = amt->qi;
  767. csum_start = (void *)mld2q;
  768. mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
  769. sizeof(*mld2q),
  770. IPPROTO_ICMPV6,
  771. csum_partial(csum_start,
  772. sizeof(*mld2q), 0));
  773. skb->ip_summed = CHECKSUM_NONE;
  774. skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
  775. return skb;
  776. }
  777. static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
  778. {
  779. struct sk_buff *skb;
  780. skb = amt_build_mld_gq(amt);
  781. if (!skb)
  782. return;
  783. amt_skb_cb(skb)->tunnel = tunnel;
  784. dev_queue_xmit(skb);
  785. }
  786. #else
  787. static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
  788. {
  789. }
  790. #endif
  791. static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
  792. struct sk_buff *skb)
  793. {
  794. int index;
  795. spin_lock_bh(&amt->lock);
  796. if (amt->nr_events >= AMT_MAX_EVENTS) {
  797. spin_unlock_bh(&amt->lock);
  798. return 1;
  799. }
  800. index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
  801. amt->events[index].event = event;
  802. amt->events[index].skb = skb;
  803. amt->nr_events++;
  804. amt->event_idx %= AMT_MAX_EVENTS;
  805. queue_work(amt_wq, &amt->event_wq);
  806. spin_unlock_bh(&amt->lock);
  807. return 0;
  808. }
  809. static void amt_secret_work(struct work_struct *work)
  810. {
  811. struct amt_dev *amt = container_of(to_delayed_work(work),
  812. struct amt_dev,
  813. secret_wq);
  814. spin_lock_bh(&amt->lock);
  815. get_random_bytes(&amt->key, sizeof(siphash_key_t));
  816. spin_unlock_bh(&amt->lock);
  817. mod_delayed_work(amt_wq, &amt->secret_wq,
  818. msecs_to_jiffies(AMT_SECRET_TIMEOUT));
  819. }
  820. static void amt_event_send_discovery(struct amt_dev *amt)
  821. {
  822. if (amt->status > AMT_STATUS_SENT_DISCOVERY)
  823. goto out;
  824. get_random_bytes(&amt->nonce, sizeof(__be32));
  825. amt_send_discovery(amt);
  826. out:
  827. mod_delayed_work(amt_wq, &amt->discovery_wq,
  828. msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
  829. }
  830. static void amt_discovery_work(struct work_struct *work)
  831. {
  832. struct amt_dev *amt = container_of(to_delayed_work(work),
  833. struct amt_dev,
  834. discovery_wq);
  835. if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
  836. mod_delayed_work(amt_wq, &amt->discovery_wq,
  837. msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
  838. }
  839. static void amt_event_send_request(struct amt_dev *amt)
  840. {
  841. u32 exp;
  842. if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
  843. goto out;
  844. if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
  845. netdev_dbg(amt->dev, "Gateway is not ready");
  846. amt->qi = AMT_INIT_REQ_TIMEOUT;
  847. WRITE_ONCE(amt->ready4, false);
  848. WRITE_ONCE(amt->ready6, false);
  849. amt->remote_ip = 0;
  850. amt_update_gw_status(amt, AMT_STATUS_INIT, false);
  851. amt->req_cnt = 0;
  852. amt->nonce = 0;
  853. goto out;
  854. }
  855. if (!amt->req_cnt) {
  856. WRITE_ONCE(amt->ready4, false);
  857. WRITE_ONCE(amt->ready6, false);
  858. get_random_bytes(&amt->nonce, sizeof(__be32));
  859. }
  860. amt_send_request(amt, false);
  861. amt_send_request(amt, true);
  862. amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
  863. amt->req_cnt++;
  864. out:
  865. exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
  866. mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
  867. }
  868. static void amt_req_work(struct work_struct *work)
  869. {
  870. struct amt_dev *amt = container_of(to_delayed_work(work),
  871. struct amt_dev,
  872. req_wq);
  873. if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
  874. mod_delayed_work(amt_wq, &amt->req_wq,
  875. msecs_to_jiffies(100));
  876. }
  877. static bool amt_send_membership_update(struct amt_dev *amt,
  878. struct sk_buff *skb,
  879. bool v6)
  880. {
  881. struct amt_header_membership_update *amtmu;
  882. struct socket *sock;
  883. struct iphdr *iph;
  884. struct flowi4 fl4;
  885. struct rtable *rt;
  886. int err;
  887. sock = rcu_dereference_bh(amt->sock);
  888. if (!sock)
  889. return true;
  890. err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
  891. sizeof(*iph) + sizeof(struct udphdr));
  892. if (err)
  893. return true;
  894. skb_reset_inner_headers(skb);
  895. memset(&fl4, 0, sizeof(struct flowi4));
  896. fl4.flowi4_oif = amt->stream_dev->ifindex;
  897. fl4.daddr = amt->remote_ip;
  898. fl4.saddr = amt->local_ip;
  899. fl4.flowi4_tos = AMT_TOS;
  900. fl4.flowi4_proto = IPPROTO_UDP;
  901. rt = ip_route_output_key(amt->net, &fl4);
  902. if (IS_ERR(rt)) {
  903. netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
  904. return true;
  905. }
  906. amtmu = skb_push(skb, sizeof(*amtmu));
  907. amtmu->version = 0;
  908. amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
  909. amtmu->reserved = 0;
  910. amtmu->nonce = amt->nonce;
  911. amtmu->response_mac = amt->mac;
  912. if (!v6)
  913. skb_set_inner_protocol(skb, htons(ETH_P_IP));
  914. else
  915. skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
  916. udp_tunnel_xmit_skb(rt, sock->sk, skb,
  917. fl4.saddr,
  918. fl4.daddr,
  919. AMT_TOS,
  920. ip4_dst_hoplimit(&rt->dst),
  921. 0,
  922. amt->gw_port,
  923. amt->relay_port,
  924. false,
  925. false);
  926. amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
  927. return false;
  928. }
  929. static void amt_send_multicast_data(struct amt_dev *amt,
  930. const struct sk_buff *oskb,
  931. struct amt_tunnel_list *tunnel,
  932. bool v6)
  933. {
  934. struct amt_header_mcast_data *amtmd;
  935. struct socket *sock;
  936. struct sk_buff *skb;
  937. struct iphdr *iph;
  938. struct flowi4 fl4;
  939. struct rtable *rt;
  940. sock = rcu_dereference_bh(amt->sock);
  941. if (!sock)
  942. return;
  943. skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
  944. sizeof(struct udphdr), 0, GFP_ATOMIC);
  945. if (!skb)
  946. return;
  947. skb_reset_inner_headers(skb);
  948. memset(&fl4, 0, sizeof(struct flowi4));
  949. fl4.flowi4_oif = amt->stream_dev->ifindex;
  950. fl4.daddr = tunnel->ip4;
  951. fl4.saddr = amt->local_ip;
  952. fl4.flowi4_proto = IPPROTO_UDP;
  953. rt = ip_route_output_key(amt->net, &fl4);
  954. if (IS_ERR(rt)) {
  955. netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
  956. kfree_skb(skb);
  957. return;
  958. }
  959. amtmd = skb_push(skb, sizeof(*amtmd));
  960. amtmd->version = 0;
  961. amtmd->reserved = 0;
  962. amtmd->type = AMT_MSG_MULTICAST_DATA;
  963. if (!v6)
  964. skb_set_inner_protocol(skb, htons(ETH_P_IP));
  965. else
  966. skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
  967. udp_tunnel_xmit_skb(rt, sock->sk, skb,
  968. fl4.saddr,
  969. fl4.daddr,
  970. AMT_TOS,
  971. ip4_dst_hoplimit(&rt->dst),
  972. 0,
  973. amt->relay_port,
  974. tunnel->source_port,
  975. false,
  976. false);
  977. }
  978. static bool amt_send_membership_query(struct amt_dev *amt,
  979. struct sk_buff *skb,
  980. struct amt_tunnel_list *tunnel,
  981. bool v6)
  982. {
  983. struct amt_header_membership_query *amtmq;
  984. struct socket *sock;
  985. struct rtable *rt;
  986. struct flowi4 fl4;
  987. int err;
  988. sock = rcu_dereference_bh(amt->sock);
  989. if (!sock)
  990. return true;
  991. err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
  992. sizeof(struct iphdr) + sizeof(struct udphdr));
  993. if (err)
  994. return true;
  995. skb_reset_inner_headers(skb);
  996. memset(&fl4, 0, sizeof(struct flowi4));
  997. fl4.flowi4_oif = amt->stream_dev->ifindex;
  998. fl4.daddr = tunnel->ip4;
  999. fl4.saddr = amt->local_ip;
  1000. fl4.flowi4_tos = AMT_TOS;
  1001. fl4.flowi4_proto = IPPROTO_UDP;
  1002. rt = ip_route_output_key(amt->net, &fl4);
  1003. if (IS_ERR(rt)) {
  1004. netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
  1005. return true;
  1006. }
  1007. amtmq = skb_push(skb, sizeof(*amtmq));
  1008. amtmq->version = 0;
  1009. amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
  1010. amtmq->reserved = 0;
  1011. amtmq->l = 0;
  1012. amtmq->g = 0;
  1013. amtmq->nonce = tunnel->nonce;
  1014. amtmq->response_mac = tunnel->mac;
  1015. if (!v6)
  1016. skb_set_inner_protocol(skb, htons(ETH_P_IP));
  1017. else
  1018. skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
  1019. udp_tunnel_xmit_skb(rt, sock->sk, skb,
  1020. fl4.saddr,
  1021. fl4.daddr,
  1022. AMT_TOS,
  1023. ip4_dst_hoplimit(&rt->dst),
  1024. 0,
  1025. amt->relay_port,
  1026. tunnel->source_port,
  1027. false,
  1028. false);
  1029. amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
  1030. return false;
  1031. }
  1032. static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
  1033. {
  1034. struct amt_dev *amt = netdev_priv(dev);
  1035. struct amt_tunnel_list *tunnel;
  1036. struct amt_group_node *gnode;
  1037. union amt_addr group = {0,};
  1038. #if IS_ENABLED(CONFIG_IPV6)
  1039. struct ipv6hdr *ip6h;
  1040. struct mld_msg *mld;
  1041. #endif
  1042. bool report = false;
  1043. struct igmphdr *ih;
  1044. bool query = false;
  1045. struct iphdr *iph;
  1046. bool data = false;
  1047. bool v6 = false;
  1048. u32 hash;
  1049. iph = ip_hdr(skb);
  1050. if (iph->version == 4) {
  1051. if (!ipv4_is_multicast(iph->daddr))
  1052. goto free;
  1053. if (!ip_mc_check_igmp(skb)) {
  1054. ih = igmp_hdr(skb);
  1055. switch (ih->type) {
  1056. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  1057. case IGMP_HOST_MEMBERSHIP_REPORT:
  1058. report = true;
  1059. break;
  1060. case IGMP_HOST_MEMBERSHIP_QUERY:
  1061. query = true;
  1062. break;
  1063. default:
  1064. goto free;
  1065. }
  1066. } else {
  1067. data = true;
  1068. }
  1069. v6 = false;
  1070. group.ip4 = iph->daddr;
  1071. #if IS_ENABLED(CONFIG_IPV6)
  1072. } else if (iph->version == 6) {
  1073. ip6h = ipv6_hdr(skb);
  1074. if (!ipv6_addr_is_multicast(&ip6h->daddr))
  1075. goto free;
  1076. if (!ipv6_mc_check_mld(skb)) {
  1077. mld = (struct mld_msg *)skb_transport_header(skb);
  1078. switch (mld->mld_type) {
  1079. case ICMPV6_MGM_REPORT:
  1080. case ICMPV6_MLD2_REPORT:
  1081. report = true;
  1082. break;
  1083. case ICMPV6_MGM_QUERY:
  1084. query = true;
  1085. break;
  1086. default:
  1087. goto free;
  1088. }
  1089. } else {
  1090. data = true;
  1091. }
  1092. v6 = true;
  1093. group.ip6 = ip6h->daddr;
  1094. #endif
  1095. } else {
  1096. dev->stats.tx_errors++;
  1097. goto free;
  1098. }
  1099. if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
  1100. goto free;
  1101. skb_pull(skb, sizeof(struct ethhdr));
  1102. if (amt->mode == AMT_MODE_GATEWAY) {
  1103. /* Gateway only passes IGMP/MLD packets */
  1104. if (!report)
  1105. goto free;
  1106. if ((!v6 && !READ_ONCE(amt->ready4)) ||
  1107. (v6 && !READ_ONCE(amt->ready6)))
  1108. goto free;
  1109. if (amt_send_membership_update(amt, skb, v6))
  1110. goto free;
  1111. goto unlock;
  1112. } else if (amt->mode == AMT_MODE_RELAY) {
  1113. if (query) {
  1114. tunnel = amt_skb_cb(skb)->tunnel;
  1115. if (!tunnel) {
  1116. WARN_ON(1);
  1117. goto free;
  1118. }
  1119. /* Do not forward unexpected query */
  1120. if (amt_send_membership_query(amt, skb, tunnel, v6))
  1121. goto free;
  1122. goto unlock;
  1123. }
  1124. if (!data)
  1125. goto free;
  1126. list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
  1127. hash = amt_group_hash(tunnel, &group);
  1128. hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
  1129. node) {
  1130. if (!v6) {
  1131. if (gnode->group_addr.ip4 == iph->daddr)
  1132. goto found;
  1133. #if IS_ENABLED(CONFIG_IPV6)
  1134. } else {
  1135. if (ipv6_addr_equal(&gnode->group_addr.ip6,
  1136. &ip6h->daddr))
  1137. goto found;
  1138. #endif
  1139. }
  1140. }
  1141. continue;
  1142. found:
  1143. amt_send_multicast_data(amt, skb, tunnel, v6);
  1144. }
  1145. }
  1146. dev_kfree_skb(skb);
  1147. return NETDEV_TX_OK;
  1148. free:
  1149. dev_kfree_skb(skb);
  1150. unlock:
  1151. dev->stats.tx_dropped++;
  1152. return NETDEV_TX_OK;
  1153. }
  1154. static int amt_parse_type(struct sk_buff *skb)
  1155. {
  1156. struct amt_header *amth;
  1157. if (!pskb_may_pull(skb, sizeof(struct udphdr) +
  1158. sizeof(struct amt_header)))
  1159. return -1;
  1160. amth = (struct amt_header *)(udp_hdr(skb) + 1);
  1161. if (amth->version != 0)
  1162. return -1;
  1163. if (amth->type >= __AMT_MSG_MAX || !amth->type)
  1164. return -1;
  1165. return amth->type;
  1166. }
  1167. static void amt_clear_groups(struct amt_tunnel_list *tunnel)
  1168. {
  1169. struct amt_dev *amt = tunnel->amt;
  1170. struct amt_group_node *gnode;
  1171. struct hlist_node *t;
  1172. int i;
  1173. spin_lock_bh(&tunnel->lock);
  1174. rcu_read_lock();
  1175. for (i = 0; i < amt->hash_buckets; i++)
  1176. hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
  1177. amt_del_group(amt, gnode);
  1178. rcu_read_unlock();
  1179. spin_unlock_bh(&tunnel->lock);
  1180. }
  1181. static void amt_tunnel_expire(struct work_struct *work)
  1182. {
  1183. struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
  1184. struct amt_tunnel_list,
  1185. gc_wq);
  1186. struct amt_dev *amt = tunnel->amt;
  1187. spin_lock_bh(&amt->lock);
  1188. rcu_read_lock();
  1189. list_del_rcu(&tunnel->list);
  1190. amt->nr_tunnels--;
  1191. amt_clear_groups(tunnel);
  1192. rcu_read_unlock();
  1193. spin_unlock_bh(&amt->lock);
  1194. kfree_rcu(tunnel, rcu);
  1195. }
  1196. static void amt_cleanup_srcs(struct amt_dev *amt,
  1197. struct amt_tunnel_list *tunnel,
  1198. struct amt_group_node *gnode)
  1199. {
  1200. struct amt_source_node *snode;
  1201. struct hlist_node *t;
  1202. int i;
  1203. /* Delete old sources */
  1204. for (i = 0; i < amt->hash_buckets; i++) {
  1205. hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
  1206. if (snode->flags == AMT_SOURCE_OLD)
  1207. amt_destroy_source(snode);
  1208. }
  1209. }
  1210. /* switch from new to old */
  1211. for (i = 0; i < amt->hash_buckets; i++) {
  1212. hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
  1213. snode->flags = AMT_SOURCE_OLD;
  1214. if (!gnode->v6)
  1215. netdev_dbg(snode->gnode->amt->dev,
  1216. "Add source as OLD %pI4 from %pI4\n",
  1217. &snode->source_addr.ip4,
  1218. &gnode->group_addr.ip4);
  1219. #if IS_ENABLED(CONFIG_IPV6)
  1220. else
  1221. netdev_dbg(snode->gnode->amt->dev,
  1222. "Add source as OLD %pI6 from %pI6\n",
  1223. &snode->source_addr.ip6,
  1224. &gnode->group_addr.ip6);
  1225. #endif
  1226. }
  1227. }
  1228. }
  1229. static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
  1230. struct amt_group_node *gnode, void *grec,
  1231. bool v6)
  1232. {
  1233. struct igmpv3_grec *igmp_grec;
  1234. struct amt_source_node *snode;
  1235. #if IS_ENABLED(CONFIG_IPV6)
  1236. struct mld2_grec *mld_grec;
  1237. #endif
  1238. union amt_addr src = {0,};
  1239. u16 nsrcs;
  1240. u32 hash;
  1241. int i;
  1242. if (!v6) {
  1243. igmp_grec = grec;
  1244. nsrcs = ntohs(igmp_grec->grec_nsrcs);
  1245. } else {
  1246. #if IS_ENABLED(CONFIG_IPV6)
  1247. mld_grec = grec;
  1248. nsrcs = ntohs(mld_grec->grec_nsrcs);
  1249. #else
  1250. return;
  1251. #endif
  1252. }
  1253. for (i = 0; i < nsrcs; i++) {
  1254. if (tunnel->nr_sources >= amt->max_sources)
  1255. return;
  1256. if (!v6)
  1257. src.ip4 = igmp_grec->grec_src[i];
  1258. #if IS_ENABLED(CONFIG_IPV6)
  1259. else
  1260. memcpy(&src.ip6, &mld_grec->grec_src[i],
  1261. sizeof(struct in6_addr));
  1262. #endif
  1263. if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
  1264. continue;
  1265. snode = amt_alloc_snode(gnode, &src);
  1266. if (snode) {
  1267. hash = amt_source_hash(tunnel, &snode->source_addr);
  1268. hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
  1269. tunnel->nr_sources++;
  1270. gnode->nr_sources++;
  1271. if (!gnode->v6)
  1272. netdev_dbg(snode->gnode->amt->dev,
  1273. "Add source as NEW %pI4 from %pI4\n",
  1274. &snode->source_addr.ip4,
  1275. &gnode->group_addr.ip4);
  1276. #if IS_ENABLED(CONFIG_IPV6)
  1277. else
  1278. netdev_dbg(snode->gnode->amt->dev,
  1279. "Add source as NEW %pI6 from %pI6\n",
  1280. &snode->source_addr.ip6,
  1281. &gnode->group_addr.ip6);
  1282. #endif
  1283. }
  1284. }
  1285. }
  1286. /* Router State Report Rec'd New Router State
  1287. * ------------ ------------ ----------------
  1288. * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
  1289. *
  1290. * -----------+-----------+-----------+
  1291. * | OLD | NEW |
  1292. * -----------+-----------+-----------+
  1293. * FWD | X | X+A |
  1294. * -----------+-----------+-----------+
  1295. * D_FWD | Y | Y-A |
  1296. * -----------+-----------+-----------+
  1297. * NONE | | A |
  1298. * -----------+-----------+-----------+
  1299. *
  1300. * a) Received sources are NONE/NEW
  1301. * b) All NONE will be deleted by amt_cleanup_srcs().
  1302. * c) All OLD will be deleted by amt_cleanup_srcs().
  1303. * d) After delete, NEW source will be switched to OLD.
  1304. */
  1305. static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
  1306. struct amt_group_node *gnode,
  1307. void *grec,
  1308. enum amt_ops ops,
  1309. enum amt_filter filter,
  1310. enum amt_act act,
  1311. bool v6)
  1312. {
  1313. struct amt_dev *amt = tunnel->amt;
  1314. struct amt_source_node *snode;
  1315. struct igmpv3_grec *igmp_grec;
  1316. #if IS_ENABLED(CONFIG_IPV6)
  1317. struct mld2_grec *mld_grec;
  1318. #endif
  1319. union amt_addr src = {0,};
  1320. struct hlist_node *t;
  1321. u16 nsrcs;
  1322. int i, j;
  1323. if (!v6) {
  1324. igmp_grec = grec;
  1325. nsrcs = ntohs(igmp_grec->grec_nsrcs);
  1326. } else {
  1327. #if IS_ENABLED(CONFIG_IPV6)
  1328. mld_grec = grec;
  1329. nsrcs = ntohs(mld_grec->grec_nsrcs);
  1330. #else
  1331. return;
  1332. #endif
  1333. }
  1334. memset(&src, 0, sizeof(union amt_addr));
  1335. switch (ops) {
  1336. case AMT_OPS_INT:
  1337. /* A*B */
  1338. for (i = 0; i < nsrcs; i++) {
  1339. if (!v6)
  1340. src.ip4 = igmp_grec->grec_src[i];
  1341. #if IS_ENABLED(CONFIG_IPV6)
  1342. else
  1343. memcpy(&src.ip6, &mld_grec->grec_src[i],
  1344. sizeof(struct in6_addr));
  1345. #endif
  1346. snode = amt_lookup_src(tunnel, gnode, filter, &src);
  1347. if (!snode)
  1348. continue;
  1349. amt_act_src(tunnel, gnode, snode, act);
  1350. }
  1351. break;
  1352. case AMT_OPS_UNI:
  1353. /* A+B */
  1354. for (i = 0; i < amt->hash_buckets; i++) {
  1355. hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
  1356. node) {
  1357. if (amt_status_filter(snode, filter))
  1358. amt_act_src(tunnel, gnode, snode, act);
  1359. }
  1360. }
  1361. for (i = 0; i < nsrcs; i++) {
  1362. if (!v6)
  1363. src.ip4 = igmp_grec->grec_src[i];
  1364. #if IS_ENABLED(CONFIG_IPV6)
  1365. else
  1366. memcpy(&src.ip6, &mld_grec->grec_src[i],
  1367. sizeof(struct in6_addr));
  1368. #endif
  1369. snode = amt_lookup_src(tunnel, gnode, filter, &src);
  1370. if (!snode)
  1371. continue;
  1372. amt_act_src(tunnel, gnode, snode, act);
  1373. }
  1374. break;
  1375. case AMT_OPS_SUB:
  1376. /* A-B */
  1377. for (i = 0; i < amt->hash_buckets; i++) {
  1378. hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
  1379. node) {
  1380. if (!amt_status_filter(snode, filter))
  1381. continue;
  1382. for (j = 0; j < nsrcs; j++) {
  1383. if (!v6)
  1384. src.ip4 = igmp_grec->grec_src[j];
  1385. #if IS_ENABLED(CONFIG_IPV6)
  1386. else
  1387. memcpy(&src.ip6,
  1388. &mld_grec->grec_src[j],
  1389. sizeof(struct in6_addr));
  1390. #endif
  1391. if (amt_addr_equal(&snode->source_addr,
  1392. &src))
  1393. goto out_sub;
  1394. }
  1395. amt_act_src(tunnel, gnode, snode, act);
  1396. continue;
  1397. out_sub:;
  1398. }
  1399. }
  1400. break;
  1401. case AMT_OPS_SUB_REV:
  1402. /* B-A */
  1403. for (i = 0; i < nsrcs; i++) {
  1404. if (!v6)
  1405. src.ip4 = igmp_grec->grec_src[i];
  1406. #if IS_ENABLED(CONFIG_IPV6)
  1407. else
  1408. memcpy(&src.ip6, &mld_grec->grec_src[i],
  1409. sizeof(struct in6_addr));
  1410. #endif
  1411. snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
  1412. &src);
  1413. if (!snode) {
  1414. snode = amt_lookup_src(tunnel, gnode,
  1415. filter, &src);
  1416. if (snode)
  1417. amt_act_src(tunnel, gnode, snode, act);
  1418. }
  1419. }
  1420. break;
  1421. default:
  1422. netdev_dbg(amt->dev, "Invalid type\n");
  1423. return;
  1424. }
  1425. }
  1426. static void amt_mcast_is_in_handler(struct amt_dev *amt,
  1427. struct amt_tunnel_list *tunnel,
  1428. struct amt_group_node *gnode,
  1429. void *grec, void *zero_grec, bool v6)
  1430. {
  1431. if (gnode->filter_mode == MCAST_INCLUDE) {
  1432. /* Router State Report Rec'd New Router State Actions
  1433. * ------------ ------------ ---------------- -------
  1434. * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
  1435. */
  1436. /* Update IS_IN (B) as FWD/NEW */
  1437. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1438. AMT_FILTER_NONE_NEW,
  1439. AMT_ACT_STATUS_FWD_NEW,
  1440. v6);
  1441. /* Update INCLUDE (A) as NEW */
  1442. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1443. AMT_FILTER_FWD,
  1444. AMT_ACT_STATUS_FWD_NEW,
  1445. v6);
  1446. /* (B)=GMI */
  1447. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1448. AMT_FILTER_FWD_NEW,
  1449. AMT_ACT_GMI,
  1450. v6);
  1451. } else {
  1452. /* State Actions
  1453. * ------------ ------------ ---------------- -------
  1454. * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
  1455. */
  1456. /* Update (A) in (X, Y) as NONE/NEW */
  1457. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1458. AMT_FILTER_BOTH,
  1459. AMT_ACT_STATUS_NONE_NEW,
  1460. v6);
  1461. /* Update FWD/OLD as FWD/NEW */
  1462. amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
  1463. AMT_FILTER_FWD,
  1464. AMT_ACT_STATUS_FWD_NEW,
  1465. v6);
  1466. /* Update IS_IN (A) as FWD/NEW */
  1467. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1468. AMT_FILTER_NONE_NEW,
  1469. AMT_ACT_STATUS_FWD_NEW,
  1470. v6);
  1471. /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
  1472. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
  1473. AMT_FILTER_D_FWD,
  1474. AMT_ACT_STATUS_D_FWD_NEW,
  1475. v6);
  1476. }
  1477. }
  1478. static void amt_mcast_is_ex_handler(struct amt_dev *amt,
  1479. struct amt_tunnel_list *tunnel,
  1480. struct amt_group_node *gnode,
  1481. void *grec, void *zero_grec, bool v6)
  1482. {
  1483. if (gnode->filter_mode == MCAST_INCLUDE) {
  1484. /* Router State Report Rec'd New Router State Actions
  1485. * ------------ ------------ ---------------- -------
  1486. * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  1487. * Delete (A-B)
  1488. * Group Timer=GMI
  1489. */
  1490. /* EXCLUDE(A*B, ) */
  1491. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1492. AMT_FILTER_FWD,
  1493. AMT_ACT_STATUS_FWD_NEW,
  1494. v6);
  1495. /* EXCLUDE(, B-A) */
  1496. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1497. AMT_FILTER_FWD,
  1498. AMT_ACT_STATUS_D_FWD_NEW,
  1499. v6);
  1500. /* (B-A)=0 */
  1501. amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
  1502. AMT_FILTER_D_FWD_NEW,
  1503. AMT_ACT_GMI_ZERO,
  1504. v6);
  1505. /* Group Timer=GMI */
  1506. if (!mod_delayed_work(amt_wq, &gnode->group_timer,
  1507. msecs_to_jiffies(amt_gmi(amt))))
  1508. dev_hold(amt->dev);
  1509. gnode->filter_mode = MCAST_EXCLUDE;
  1510. /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
  1511. } else {
  1512. /* Router State Report Rec'd New Router State Actions
  1513. * ------------ ------------ ---------------- -------
  1514. * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
  1515. * Delete (X-A)
  1516. * Delete (Y-A)
  1517. * Group Timer=GMI
  1518. */
  1519. /* EXCLUDE (A-Y, ) */
  1520. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1521. AMT_FILTER_D_FWD,
  1522. AMT_ACT_STATUS_FWD_NEW,
  1523. v6);
  1524. /* EXCLUDE (, Y*A ) */
  1525. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1526. AMT_FILTER_D_FWD,
  1527. AMT_ACT_STATUS_D_FWD_NEW,
  1528. v6);
  1529. /* (A-X-Y)=GMI */
  1530. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1531. AMT_FILTER_BOTH_NEW,
  1532. AMT_ACT_GMI,
  1533. v6);
  1534. /* Group Timer=GMI */
  1535. if (!mod_delayed_work(amt_wq, &gnode->group_timer,
  1536. msecs_to_jiffies(amt_gmi(amt))))
  1537. dev_hold(amt->dev);
  1538. /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
  1539. }
  1540. }
  1541. static void amt_mcast_to_in_handler(struct amt_dev *amt,
  1542. struct amt_tunnel_list *tunnel,
  1543. struct amt_group_node *gnode,
  1544. void *grec, void *zero_grec, bool v6)
  1545. {
  1546. if (gnode->filter_mode == MCAST_INCLUDE) {
  1547. /* Router State Report Rec'd New Router State Actions
  1548. * ------------ ------------ ---------------- -------
  1549. * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
  1550. * Send Q(G,A-B)
  1551. */
  1552. /* Update TO_IN (B) sources as FWD/NEW */
  1553. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1554. AMT_FILTER_NONE_NEW,
  1555. AMT_ACT_STATUS_FWD_NEW,
  1556. v6);
  1557. /* Update INCLUDE (A) sources as NEW */
  1558. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1559. AMT_FILTER_FWD,
  1560. AMT_ACT_STATUS_FWD_NEW,
  1561. v6);
  1562. /* (B)=GMI */
  1563. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1564. AMT_FILTER_FWD_NEW,
  1565. AMT_ACT_GMI,
  1566. v6);
  1567. } else {
  1568. /* Router State Report Rec'd New Router State Actions
  1569. * ------------ ------------ ---------------- -------
  1570. * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
  1571. * Send Q(G,X-A)
  1572. * Send Q(G)
  1573. */
  1574. /* Update TO_IN (A) sources as FWD/NEW */
  1575. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1576. AMT_FILTER_NONE_NEW,
  1577. AMT_ACT_STATUS_FWD_NEW,
  1578. v6);
  1579. /* Update EXCLUDE(X,) sources as FWD/NEW */
  1580. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1581. AMT_FILTER_FWD,
  1582. AMT_ACT_STATUS_FWD_NEW,
  1583. v6);
  1584. /* EXCLUDE (, Y-A)
  1585. * (A) are already switched to FWD_NEW.
  1586. * So, D_FWD/OLD -> D_FWD/NEW is okay.
  1587. */
  1588. amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
  1589. AMT_FILTER_D_FWD,
  1590. AMT_ACT_STATUS_D_FWD_NEW,
  1591. v6);
  1592. /* (A)=GMI
  1593. * Only FWD_NEW will have (A) sources.
  1594. */
  1595. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1596. AMT_FILTER_FWD_NEW,
  1597. AMT_ACT_GMI,
  1598. v6);
  1599. }
  1600. }
  1601. static void amt_mcast_to_ex_handler(struct amt_dev *amt,
  1602. struct amt_tunnel_list *tunnel,
  1603. struct amt_group_node *gnode,
  1604. void *grec, void *zero_grec, bool v6)
  1605. {
  1606. if (gnode->filter_mode == MCAST_INCLUDE) {
  1607. /* Router State Report Rec'd New Router State Actions
  1608. * ------------ ------------ ---------------- -------
  1609. * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  1610. * Delete (A-B)
  1611. * Send Q(G,A*B)
  1612. * Group Timer=GMI
  1613. */
  1614. /* EXCLUDE (A*B, ) */
  1615. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1616. AMT_FILTER_FWD,
  1617. AMT_ACT_STATUS_FWD_NEW,
  1618. v6);
  1619. /* EXCLUDE (, B-A) */
  1620. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1621. AMT_FILTER_FWD,
  1622. AMT_ACT_STATUS_D_FWD_NEW,
  1623. v6);
  1624. /* (B-A)=0 */
  1625. amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
  1626. AMT_FILTER_D_FWD_NEW,
  1627. AMT_ACT_GMI_ZERO,
  1628. v6);
  1629. /* Group Timer=GMI */
  1630. if (!mod_delayed_work(amt_wq, &gnode->group_timer,
  1631. msecs_to_jiffies(amt_gmi(amt))))
  1632. dev_hold(amt->dev);
  1633. gnode->filter_mode = MCAST_EXCLUDE;
  1634. /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
  1635. } else {
  1636. /* Router State Report Rec'd New Router State Actions
  1637. * ------------ ------------ ---------------- -------
  1638. * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
  1639. * Delete (X-A)
  1640. * Delete (Y-A)
  1641. * Send Q(G,A-Y)
  1642. * Group Timer=GMI
  1643. */
  1644. /* Update (A-X-Y) as NONE/OLD */
  1645. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1646. AMT_FILTER_BOTH,
  1647. AMT_ACT_GT,
  1648. v6);
  1649. /* EXCLUDE (A-Y, ) */
  1650. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1651. AMT_FILTER_D_FWD,
  1652. AMT_ACT_STATUS_FWD_NEW,
  1653. v6);
  1654. /* EXCLUDE (, Y*A) */
  1655. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1656. AMT_FILTER_D_FWD,
  1657. AMT_ACT_STATUS_D_FWD_NEW,
  1658. v6);
  1659. /* Group Timer=GMI */
  1660. if (!mod_delayed_work(amt_wq, &gnode->group_timer,
  1661. msecs_to_jiffies(amt_gmi(amt))))
  1662. dev_hold(amt->dev);
  1663. /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
  1664. }
  1665. }
  1666. static void amt_mcast_allow_handler(struct amt_dev *amt,
  1667. struct amt_tunnel_list *tunnel,
  1668. struct amt_group_node *gnode,
  1669. void *grec, void *zero_grec, bool v6)
  1670. {
  1671. if (gnode->filter_mode == MCAST_INCLUDE) {
  1672. /* Router State Report Rec'd New Router State Actions
  1673. * ------------ ------------ ---------------- -------
  1674. * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
  1675. */
  1676. /* INCLUDE (A+B) */
  1677. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1678. AMT_FILTER_FWD,
  1679. AMT_ACT_STATUS_FWD_NEW,
  1680. v6);
  1681. /* (B)=GMI */
  1682. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1683. AMT_FILTER_FWD_NEW,
  1684. AMT_ACT_GMI,
  1685. v6);
  1686. } else {
  1687. /* Router State Report Rec'd New Router State Actions
  1688. * ------------ ------------ ---------------- -------
  1689. * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
  1690. */
  1691. /* EXCLUDE (X+A, ) */
  1692. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1693. AMT_FILTER_FWD,
  1694. AMT_ACT_STATUS_FWD_NEW,
  1695. v6);
  1696. /* EXCLUDE (, Y-A) */
  1697. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
  1698. AMT_FILTER_D_FWD,
  1699. AMT_ACT_STATUS_D_FWD_NEW,
  1700. v6);
  1701. /* (A)=GMI
  1702. * All (A) source are now FWD/NEW status.
  1703. */
  1704. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
  1705. AMT_FILTER_FWD_NEW,
  1706. AMT_ACT_GMI,
  1707. v6);
  1708. }
  1709. }
  1710. static void amt_mcast_block_handler(struct amt_dev *amt,
  1711. struct amt_tunnel_list *tunnel,
  1712. struct amt_group_node *gnode,
  1713. void *grec, void *zero_grec, bool v6)
  1714. {
  1715. if (gnode->filter_mode == MCAST_INCLUDE) {
  1716. /* Router State Report Rec'd New Router State Actions
  1717. * ------------ ------------ ---------------- -------
  1718. * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
  1719. */
  1720. /* INCLUDE (A) */
  1721. amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
  1722. AMT_FILTER_FWD,
  1723. AMT_ACT_STATUS_FWD_NEW,
  1724. v6);
  1725. } else {
  1726. /* Router State Report Rec'd New Router State Actions
  1727. * ------------ ------------ ---------------- -------
  1728. * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
  1729. * Send Q(G,A-Y)
  1730. */
  1731. /* (A-X-Y)=Group Timer */
  1732. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1733. AMT_FILTER_BOTH,
  1734. AMT_ACT_GT,
  1735. v6);
  1736. /* EXCLUDE (X, ) */
  1737. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1738. AMT_FILTER_FWD,
  1739. AMT_ACT_STATUS_FWD_NEW,
  1740. v6);
  1741. /* EXCLUDE (X+(A-Y) */
  1742. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
  1743. AMT_FILTER_D_FWD,
  1744. AMT_ACT_STATUS_FWD_NEW,
  1745. v6);
  1746. /* EXCLUDE (, Y) */
  1747. amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
  1748. AMT_FILTER_D_FWD,
  1749. AMT_ACT_STATUS_D_FWD_NEW,
  1750. v6);
  1751. }
  1752. }
  1753. /* RFC 3376
  1754. * 7.3.2. In the Presence of Older Version Group Members
  1755. *
  1756. * When Group Compatibility Mode is IGMPv2, a router internally
  1757. * translates the following IGMPv2 messages for that group to their
  1758. * IGMPv3 equivalents:
  1759. *
  1760. * IGMPv2 Message IGMPv3 Equivalent
  1761. * -------------- -----------------
  1762. * Report IS_EX( {} )
  1763. * Leave TO_IN( {} )
  1764. */
  1765. static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
  1766. struct amt_tunnel_list *tunnel)
  1767. {
  1768. struct igmphdr *ih = igmp_hdr(skb);
  1769. struct iphdr *iph = ip_hdr(skb);
  1770. struct amt_group_node *gnode;
  1771. union amt_addr group, host;
  1772. memset(&group, 0, sizeof(union amt_addr));
  1773. group.ip4 = ih->group;
  1774. memset(&host, 0, sizeof(union amt_addr));
  1775. host.ip4 = iph->saddr;
  1776. gnode = amt_lookup_group(tunnel, &group, &host, false);
  1777. if (!gnode) {
  1778. gnode = amt_add_group(amt, tunnel, &group, &host, false);
  1779. if (!IS_ERR(gnode)) {
  1780. gnode->filter_mode = MCAST_EXCLUDE;
  1781. if (!mod_delayed_work(amt_wq, &gnode->group_timer,
  1782. msecs_to_jiffies(amt_gmi(amt))))
  1783. dev_hold(amt->dev);
  1784. }
  1785. }
  1786. }
  1787. /* RFC 3376
  1788. * 7.3.2. In the Presence of Older Version Group Members
  1789. *
  1790. * When Group Compatibility Mode is IGMPv2, a router internally
  1791. * translates the following IGMPv2 messages for that group to their
  1792. * IGMPv3 equivalents:
  1793. *
  1794. * IGMPv2 Message IGMPv3 Equivalent
  1795. * -------------- -----------------
  1796. * Report IS_EX( {} )
  1797. * Leave TO_IN( {} )
  1798. */
  1799. static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
  1800. struct amt_tunnel_list *tunnel)
  1801. {
  1802. struct igmphdr *ih = igmp_hdr(skb);
  1803. struct iphdr *iph = ip_hdr(skb);
  1804. struct amt_group_node *gnode;
  1805. union amt_addr group, host;
  1806. memset(&group, 0, sizeof(union amt_addr));
  1807. group.ip4 = ih->group;
  1808. memset(&host, 0, sizeof(union amt_addr));
  1809. host.ip4 = iph->saddr;
  1810. gnode = amt_lookup_group(tunnel, &group, &host, false);
  1811. if (gnode)
  1812. amt_del_group(amt, gnode);
  1813. }
  1814. static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
  1815. struct amt_tunnel_list *tunnel)
  1816. {
  1817. struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
  1818. int len = skb_transport_offset(skb) + sizeof(*ihrv3);
  1819. void *zero_grec = (void *)&igmpv3_zero_grec;
  1820. struct iphdr *iph = ip_hdr(skb);
  1821. struct amt_group_node *gnode;
  1822. union amt_addr group, host;
  1823. struct igmpv3_grec *grec;
  1824. u16 nsrcs;
  1825. int i;
  1826. for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
  1827. len += sizeof(*grec);
  1828. if (!ip_mc_may_pull(skb, len))
  1829. break;
  1830. grec = (void *)(skb->data + len - sizeof(*grec));
  1831. nsrcs = ntohs(grec->grec_nsrcs);
  1832. len += nsrcs * sizeof(__be32);
  1833. if (!ip_mc_may_pull(skb, len))
  1834. break;
  1835. memset(&group, 0, sizeof(union amt_addr));
  1836. group.ip4 = grec->grec_mca;
  1837. memset(&host, 0, sizeof(union amt_addr));
  1838. host.ip4 = iph->saddr;
  1839. gnode = amt_lookup_group(tunnel, &group, &host, false);
  1840. if (!gnode) {
  1841. gnode = amt_add_group(amt, tunnel, &group, &host,
  1842. false);
  1843. if (IS_ERR(gnode))
  1844. continue;
  1845. }
  1846. amt_add_srcs(amt, tunnel, gnode, grec, false);
  1847. switch (grec->grec_type) {
  1848. case IGMPV3_MODE_IS_INCLUDE:
  1849. amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
  1850. zero_grec, false);
  1851. break;
  1852. case IGMPV3_MODE_IS_EXCLUDE:
  1853. amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
  1854. zero_grec, false);
  1855. break;
  1856. case IGMPV3_CHANGE_TO_INCLUDE:
  1857. amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
  1858. zero_grec, false);
  1859. break;
  1860. case IGMPV3_CHANGE_TO_EXCLUDE:
  1861. amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
  1862. zero_grec, false);
  1863. break;
  1864. case IGMPV3_ALLOW_NEW_SOURCES:
  1865. amt_mcast_allow_handler(amt, tunnel, gnode, grec,
  1866. zero_grec, false);
  1867. break;
  1868. case IGMPV3_BLOCK_OLD_SOURCES:
  1869. amt_mcast_block_handler(amt, tunnel, gnode, grec,
  1870. zero_grec, false);
  1871. break;
  1872. default:
  1873. break;
  1874. }
  1875. amt_cleanup_srcs(amt, tunnel, gnode);
  1876. }
  1877. }
  1878. /* caller held tunnel->lock */
  1879. static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
  1880. struct amt_tunnel_list *tunnel)
  1881. {
  1882. struct igmphdr *ih = igmp_hdr(skb);
  1883. switch (ih->type) {
  1884. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  1885. amt_igmpv3_report_handler(amt, skb, tunnel);
  1886. break;
  1887. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  1888. amt_igmpv2_report_handler(amt, skb, tunnel);
  1889. break;
  1890. case IGMP_HOST_LEAVE_MESSAGE:
  1891. amt_igmpv2_leave_handler(amt, skb, tunnel);
  1892. break;
  1893. default:
  1894. break;
  1895. }
  1896. }
  1897. #if IS_ENABLED(CONFIG_IPV6)
  1898. /* RFC 3810
  1899. * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
  1900. *
  1901. * When Multicast Address Compatibility Mode is MLDv2, a router acts
  1902. * using the MLDv2 protocol for that multicast address. When Multicast
  1903. * Address Compatibility Mode is MLDv1, a router internally translates
  1904. * the following MLDv1 messages for that multicast address to their
  1905. * MLDv2 equivalents:
  1906. *
  1907. * MLDv1 Message MLDv2 Equivalent
  1908. * -------------- -----------------
  1909. * Report IS_EX( {} )
  1910. * Done TO_IN( {} )
  1911. */
  1912. static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
  1913. struct amt_tunnel_list *tunnel)
  1914. {
  1915. struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
  1916. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  1917. struct amt_group_node *gnode;
  1918. union amt_addr group, host;
  1919. memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
  1920. memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
  1921. gnode = amt_lookup_group(tunnel, &group, &host, true);
  1922. if (!gnode) {
  1923. gnode = amt_add_group(amt, tunnel, &group, &host, true);
  1924. if (!IS_ERR(gnode)) {
  1925. gnode->filter_mode = MCAST_EXCLUDE;
  1926. if (!mod_delayed_work(amt_wq, &gnode->group_timer,
  1927. msecs_to_jiffies(amt_gmi(amt))))
  1928. dev_hold(amt->dev);
  1929. }
  1930. }
  1931. }
  1932. /* RFC 3810
  1933. * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
  1934. *
  1935. * When Multicast Address Compatibility Mode is MLDv2, a router acts
  1936. * using the MLDv2 protocol for that multicast address. When Multicast
  1937. * Address Compatibility Mode is MLDv1, a router internally translates
  1938. * the following MLDv1 messages for that multicast address to their
  1939. * MLDv2 equivalents:
  1940. *
  1941. * MLDv1 Message MLDv2 Equivalent
  1942. * -------------- -----------------
  1943. * Report IS_EX( {} )
  1944. * Done TO_IN( {} )
  1945. */
  1946. static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
  1947. struct amt_tunnel_list *tunnel)
  1948. {
  1949. struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
  1950. struct iphdr *iph = ip_hdr(skb);
  1951. struct amt_group_node *gnode;
  1952. union amt_addr group, host;
  1953. memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
  1954. memset(&host, 0, sizeof(union amt_addr));
  1955. host.ip4 = iph->saddr;
  1956. gnode = amt_lookup_group(tunnel, &group, &host, true);
  1957. if (gnode) {
  1958. amt_del_group(amt, gnode);
  1959. return;
  1960. }
  1961. }
  1962. static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
  1963. struct amt_tunnel_list *tunnel)
  1964. {
  1965. struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
  1966. int len = skb_transport_offset(skb) + sizeof(*mld2r);
  1967. void *zero_grec = (void *)&mldv2_zero_grec;
  1968. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  1969. struct amt_group_node *gnode;
  1970. union amt_addr group, host;
  1971. struct mld2_grec *grec;
  1972. u16 nsrcs;
  1973. int i;
  1974. for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
  1975. len += sizeof(*grec);
  1976. if (!ipv6_mc_may_pull(skb, len))
  1977. break;
  1978. grec = (void *)(skb->data + len - sizeof(*grec));
  1979. nsrcs = ntohs(grec->grec_nsrcs);
  1980. len += nsrcs * sizeof(struct in6_addr);
  1981. if (!ipv6_mc_may_pull(skb, len))
  1982. break;
  1983. memset(&group, 0, sizeof(union amt_addr));
  1984. group.ip6 = grec->grec_mca;
  1985. memset(&host, 0, sizeof(union amt_addr));
  1986. host.ip6 = ip6h->saddr;
  1987. gnode = amt_lookup_group(tunnel, &group, &host, true);
  1988. if (!gnode) {
  1989. gnode = amt_add_group(amt, tunnel, &group, &host,
  1990. ETH_P_IPV6);
  1991. if (IS_ERR(gnode))
  1992. continue;
  1993. }
  1994. amt_add_srcs(amt, tunnel, gnode, grec, true);
  1995. switch (grec->grec_type) {
  1996. case MLD2_MODE_IS_INCLUDE:
  1997. amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
  1998. zero_grec, true);
  1999. break;
  2000. case MLD2_MODE_IS_EXCLUDE:
  2001. amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
  2002. zero_grec, true);
  2003. break;
  2004. case MLD2_CHANGE_TO_INCLUDE:
  2005. amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
  2006. zero_grec, true);
  2007. break;
  2008. case MLD2_CHANGE_TO_EXCLUDE:
  2009. amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
  2010. zero_grec, true);
  2011. break;
  2012. case MLD2_ALLOW_NEW_SOURCES:
  2013. amt_mcast_allow_handler(amt, tunnel, gnode, grec,
  2014. zero_grec, true);
  2015. break;
  2016. case MLD2_BLOCK_OLD_SOURCES:
  2017. amt_mcast_block_handler(amt, tunnel, gnode, grec,
  2018. zero_grec, true);
  2019. break;
  2020. default:
  2021. break;
  2022. }
  2023. amt_cleanup_srcs(amt, tunnel, gnode);
  2024. }
  2025. }
  2026. /* caller held tunnel->lock */
  2027. static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
  2028. struct amt_tunnel_list *tunnel)
  2029. {
  2030. struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
  2031. switch (mld->mld_type) {
  2032. case ICMPV6_MGM_REPORT:
  2033. amt_mldv1_report_handler(amt, skb, tunnel);
  2034. break;
  2035. case ICMPV6_MLD2_REPORT:
  2036. amt_mldv2_report_handler(amt, skb, tunnel);
  2037. break;
  2038. case ICMPV6_MGM_REDUCTION:
  2039. amt_mldv1_leave_handler(amt, skb, tunnel);
  2040. break;
  2041. default:
  2042. break;
  2043. }
  2044. }
  2045. #endif
  2046. static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
  2047. {
  2048. struct amt_header_advertisement *amta;
  2049. int hdr_size;
  2050. hdr_size = sizeof(*amta) + sizeof(struct udphdr);
  2051. if (!pskb_may_pull(skb, hdr_size))
  2052. return true;
  2053. amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
  2054. if (!amta->ip4)
  2055. return true;
  2056. if (amta->reserved || amta->version)
  2057. return true;
  2058. if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
  2059. ipv4_is_zeronet(amta->ip4))
  2060. return true;
  2061. if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
  2062. amt->nonce != amta->nonce)
  2063. return true;
  2064. amt->remote_ip = amta->ip4;
  2065. netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
  2066. mod_delayed_work(amt_wq, &amt->req_wq, 0);
  2067. amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
  2068. return false;
  2069. }
  2070. static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
  2071. {
  2072. struct amt_header_mcast_data *amtmd;
  2073. int hdr_size, len, err;
  2074. struct ethhdr *eth;
  2075. struct iphdr *iph;
  2076. if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
  2077. return true;
  2078. hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
  2079. if (!pskb_may_pull(skb, hdr_size))
  2080. return true;
  2081. amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
  2082. if (amtmd->reserved || amtmd->version)
  2083. return true;
  2084. if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
  2085. return true;
  2086. skb_reset_network_header(skb);
  2087. skb_push(skb, sizeof(*eth));
  2088. skb_reset_mac_header(skb);
  2089. skb_pull(skb, sizeof(*eth));
  2090. eth = eth_hdr(skb);
  2091. if (!pskb_may_pull(skb, sizeof(*iph)))
  2092. return true;
  2093. iph = ip_hdr(skb);
  2094. if (iph->version == 4) {
  2095. if (!ipv4_is_multicast(iph->daddr))
  2096. return true;
  2097. skb->protocol = htons(ETH_P_IP);
  2098. eth->h_proto = htons(ETH_P_IP);
  2099. ip_eth_mc_map(iph->daddr, eth->h_dest);
  2100. #if IS_ENABLED(CONFIG_IPV6)
  2101. } else if (iph->version == 6) {
  2102. struct ipv6hdr *ip6h;
  2103. if (!pskb_may_pull(skb, sizeof(*ip6h)))
  2104. return true;
  2105. ip6h = ipv6_hdr(skb);
  2106. if (!ipv6_addr_is_multicast(&ip6h->daddr))
  2107. return true;
  2108. skb->protocol = htons(ETH_P_IPV6);
  2109. eth->h_proto = htons(ETH_P_IPV6);
  2110. ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
  2111. #endif
  2112. } else {
  2113. return true;
  2114. }
  2115. skb->pkt_type = PACKET_MULTICAST;
  2116. skb->ip_summed = CHECKSUM_NONE;
  2117. len = skb->len;
  2118. err = gro_cells_receive(&amt->gro_cells, skb);
  2119. if (likely(err == NET_RX_SUCCESS))
  2120. dev_sw_netstats_rx_add(amt->dev, len);
  2121. else
  2122. amt->dev->stats.rx_dropped++;
  2123. return false;
  2124. }
  2125. static bool amt_membership_query_handler(struct amt_dev *amt,
  2126. struct sk_buff *skb)
  2127. {
  2128. struct amt_header_membership_query *amtmq;
  2129. struct igmpv3_query *ihv3;
  2130. struct ethhdr *eth, *oeth;
  2131. struct iphdr *iph;
  2132. int hdr_size, len;
  2133. hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
  2134. if (!pskb_may_pull(skb, hdr_size))
  2135. return true;
  2136. amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
  2137. if (amtmq->reserved || amtmq->version)
  2138. return true;
  2139. if (amtmq->nonce != amt->nonce)
  2140. return true;
  2141. hdr_size -= sizeof(*eth);
  2142. if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
  2143. return true;
  2144. oeth = eth_hdr(skb);
  2145. skb_reset_mac_header(skb);
  2146. skb_pull(skb, sizeof(*eth));
  2147. skb_reset_network_header(skb);
  2148. eth = eth_hdr(skb);
  2149. if (!pskb_may_pull(skb, sizeof(*iph)))
  2150. return true;
  2151. iph = ip_hdr(skb);
  2152. if (iph->version == 4) {
  2153. if (READ_ONCE(amt->ready4))
  2154. return true;
  2155. if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
  2156. sizeof(*ihv3)))
  2157. return true;
  2158. if (!ipv4_is_multicast(iph->daddr))
  2159. return true;
  2160. ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
  2161. skb_reset_transport_header(skb);
  2162. skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
  2163. WRITE_ONCE(amt->ready4, true);
  2164. amt->mac = amtmq->response_mac;
  2165. amt->req_cnt = 0;
  2166. amt->qi = ihv3->qqic;
  2167. skb->protocol = htons(ETH_P_IP);
  2168. eth->h_proto = htons(ETH_P_IP);
  2169. ip_eth_mc_map(iph->daddr, eth->h_dest);
  2170. #if IS_ENABLED(CONFIG_IPV6)
  2171. } else if (iph->version == 6) {
  2172. struct mld2_query *mld2q;
  2173. struct ipv6hdr *ip6h;
  2174. if (READ_ONCE(amt->ready6))
  2175. return true;
  2176. if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
  2177. sizeof(*mld2q)))
  2178. return true;
  2179. ip6h = ipv6_hdr(skb);
  2180. if (!ipv6_addr_is_multicast(&ip6h->daddr))
  2181. return true;
  2182. mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
  2183. skb_reset_transport_header(skb);
  2184. skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
  2185. WRITE_ONCE(amt->ready6, true);
  2186. amt->mac = amtmq->response_mac;
  2187. amt->req_cnt = 0;
  2188. amt->qi = mld2q->mld2q_qqic;
  2189. skb->protocol = htons(ETH_P_IPV6);
  2190. eth->h_proto = htons(ETH_P_IPV6);
  2191. ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
  2192. #endif
  2193. } else {
  2194. return true;
  2195. }
  2196. ether_addr_copy(eth->h_source, oeth->h_source);
  2197. skb->pkt_type = PACKET_MULTICAST;
  2198. skb->ip_summed = CHECKSUM_NONE;
  2199. len = skb->len;
  2200. local_bh_disable();
  2201. if (__netif_rx(skb) == NET_RX_SUCCESS) {
  2202. amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
  2203. dev_sw_netstats_rx_add(amt->dev, len);
  2204. } else {
  2205. amt->dev->stats.rx_dropped++;
  2206. }
  2207. local_bh_enable();
  2208. return false;
  2209. }
  2210. static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
  2211. {
  2212. struct amt_header_membership_update *amtmu;
  2213. struct amt_tunnel_list *tunnel;
  2214. struct ethhdr *eth;
  2215. struct iphdr *iph;
  2216. int len, hdr_size;
  2217. iph = ip_hdr(skb);
  2218. hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
  2219. if (!pskb_may_pull(skb, hdr_size))
  2220. return true;
  2221. amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
  2222. if (amtmu->reserved || amtmu->version)
  2223. return true;
  2224. if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
  2225. return true;
  2226. skb_reset_network_header(skb);
  2227. list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
  2228. if (tunnel->ip4 == iph->saddr) {
  2229. if ((amtmu->nonce == tunnel->nonce &&
  2230. amtmu->response_mac == tunnel->mac)) {
  2231. mod_delayed_work(amt_wq, &tunnel->gc_wq,
  2232. msecs_to_jiffies(amt_gmi(amt))
  2233. * 3);
  2234. goto report;
  2235. } else {
  2236. netdev_dbg(amt->dev, "Invalid MAC\n");
  2237. return true;
  2238. }
  2239. }
  2240. }
  2241. return true;
  2242. report:
  2243. if (!pskb_may_pull(skb, sizeof(*iph)))
  2244. return true;
  2245. iph = ip_hdr(skb);
  2246. if (iph->version == 4) {
  2247. if (ip_mc_check_igmp(skb)) {
  2248. netdev_dbg(amt->dev, "Invalid IGMP\n");
  2249. return true;
  2250. }
  2251. spin_lock_bh(&tunnel->lock);
  2252. amt_igmp_report_handler(amt, skb, tunnel);
  2253. spin_unlock_bh(&tunnel->lock);
  2254. skb_push(skb, sizeof(struct ethhdr));
  2255. skb_reset_mac_header(skb);
  2256. eth = eth_hdr(skb);
  2257. skb->protocol = htons(ETH_P_IP);
  2258. eth->h_proto = htons(ETH_P_IP);
  2259. ip_eth_mc_map(iph->daddr, eth->h_dest);
  2260. #if IS_ENABLED(CONFIG_IPV6)
  2261. } else if (iph->version == 6) {
  2262. struct ipv6hdr *ip6h = ipv6_hdr(skb);
  2263. if (ipv6_mc_check_mld(skb)) {
  2264. netdev_dbg(amt->dev, "Invalid MLD\n");
  2265. return true;
  2266. }
  2267. spin_lock_bh(&tunnel->lock);
  2268. amt_mld_report_handler(amt, skb, tunnel);
  2269. spin_unlock_bh(&tunnel->lock);
  2270. skb_push(skb, sizeof(struct ethhdr));
  2271. skb_reset_mac_header(skb);
  2272. eth = eth_hdr(skb);
  2273. skb->protocol = htons(ETH_P_IPV6);
  2274. eth->h_proto = htons(ETH_P_IPV6);
  2275. ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
  2276. #endif
  2277. } else {
  2278. netdev_dbg(amt->dev, "Unsupported Protocol\n");
  2279. return true;
  2280. }
  2281. skb_pull(skb, sizeof(struct ethhdr));
  2282. skb->pkt_type = PACKET_MULTICAST;
  2283. skb->ip_summed = CHECKSUM_NONE;
  2284. len = skb->len;
  2285. if (__netif_rx(skb) == NET_RX_SUCCESS) {
  2286. amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
  2287. true);
  2288. dev_sw_netstats_rx_add(amt->dev, len);
  2289. } else {
  2290. amt->dev->stats.rx_dropped++;
  2291. }
  2292. return false;
  2293. }
  2294. static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
  2295. __be32 daddr, __be16 dport)
  2296. {
  2297. struct amt_header_advertisement *amta;
  2298. int hlen, tlen, offset;
  2299. struct socket *sock;
  2300. struct udphdr *udph;
  2301. struct sk_buff *skb;
  2302. struct iphdr *iph;
  2303. struct rtable *rt;
  2304. struct flowi4 fl4;
  2305. u32 len;
  2306. int err;
  2307. rcu_read_lock();
  2308. sock = rcu_dereference(amt->sock);
  2309. if (!sock)
  2310. goto out;
  2311. if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
  2312. goto out;
  2313. rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
  2314. daddr, amt->local_ip,
  2315. dport, amt->relay_port,
  2316. IPPROTO_UDP, 0,
  2317. amt->stream_dev->ifindex);
  2318. if (IS_ERR(rt)) {
  2319. amt->dev->stats.tx_errors++;
  2320. goto out;
  2321. }
  2322. hlen = LL_RESERVED_SPACE(amt->dev);
  2323. tlen = amt->dev->needed_tailroom;
  2324. len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
  2325. skb = netdev_alloc_skb_ip_align(amt->dev, len);
  2326. if (!skb) {
  2327. ip_rt_put(rt);
  2328. amt->dev->stats.tx_errors++;
  2329. goto out;
  2330. }
  2331. skb->priority = TC_PRIO_CONTROL;
  2332. skb_dst_set(skb, &rt->dst);
  2333. len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
  2334. skb_reset_network_header(skb);
  2335. skb_put(skb, len);
  2336. amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
  2337. amta->version = 0;
  2338. amta->type = AMT_MSG_ADVERTISEMENT;
  2339. amta->reserved = 0;
  2340. amta->nonce = nonce;
  2341. amta->ip4 = amt->local_ip;
  2342. skb_push(skb, sizeof(*udph));
  2343. skb_reset_transport_header(skb);
  2344. udph = udp_hdr(skb);
  2345. udph->source = amt->relay_port;
  2346. udph->dest = dport;
  2347. udph->len = htons(sizeof(*amta) + sizeof(*udph));
  2348. udph->check = 0;
  2349. offset = skb_transport_offset(skb);
  2350. skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
  2351. udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
  2352. sizeof(*udph) + sizeof(*amta),
  2353. IPPROTO_UDP, skb->csum);
  2354. skb_push(skb, sizeof(*iph));
  2355. iph = ip_hdr(skb);
  2356. iph->version = 4;
  2357. iph->ihl = (sizeof(struct iphdr)) >> 2;
  2358. iph->tos = AMT_TOS;
  2359. iph->frag_off = 0;
  2360. iph->ttl = ip4_dst_hoplimit(&rt->dst);
  2361. iph->daddr = daddr;
  2362. iph->saddr = amt->local_ip;
  2363. iph->protocol = IPPROTO_UDP;
  2364. iph->tot_len = htons(len);
  2365. skb->ip_summed = CHECKSUM_NONE;
  2366. ip_select_ident(amt->net, skb, NULL);
  2367. ip_send_check(iph);
  2368. err = ip_local_out(amt->net, sock->sk, skb);
  2369. if (unlikely(net_xmit_eval(err)))
  2370. amt->dev->stats.tx_errors++;
  2371. out:
  2372. rcu_read_unlock();
  2373. }
  2374. static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
  2375. {
  2376. struct amt_header_discovery *amtd;
  2377. struct udphdr *udph;
  2378. struct iphdr *iph;
  2379. if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
  2380. return true;
  2381. iph = ip_hdr(skb);
  2382. udph = udp_hdr(skb);
  2383. amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
  2384. if (amtd->reserved || amtd->version)
  2385. return true;
  2386. amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
  2387. return false;
  2388. }
  2389. static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
  2390. {
  2391. struct amt_header_request *amtrh;
  2392. struct amt_tunnel_list *tunnel;
  2393. unsigned long long key;
  2394. struct udphdr *udph;
  2395. struct iphdr *iph;
  2396. u64 mac;
  2397. int i;
  2398. if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
  2399. return true;
  2400. iph = ip_hdr(skb);
  2401. udph = udp_hdr(skb);
  2402. amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
  2403. if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
  2404. return true;
  2405. list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
  2406. if (tunnel->ip4 == iph->saddr)
  2407. goto send;
  2408. spin_lock_bh(&amt->lock);
  2409. if (amt->nr_tunnels >= amt->max_tunnels) {
  2410. spin_unlock_bh(&amt->lock);
  2411. icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
  2412. return true;
  2413. }
  2414. tunnel = kzalloc(sizeof(*tunnel) +
  2415. (sizeof(struct hlist_head) * amt->hash_buckets),
  2416. GFP_ATOMIC);
  2417. if (!tunnel) {
  2418. spin_unlock_bh(&amt->lock);
  2419. return true;
  2420. }
  2421. tunnel->source_port = udph->source;
  2422. tunnel->ip4 = iph->saddr;
  2423. memcpy(&key, &tunnel->key, sizeof(unsigned long long));
  2424. tunnel->amt = amt;
  2425. spin_lock_init(&tunnel->lock);
  2426. for (i = 0; i < amt->hash_buckets; i++)
  2427. INIT_HLIST_HEAD(&tunnel->groups[i]);
  2428. INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
  2429. list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
  2430. tunnel->key = amt->key;
  2431. __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
  2432. amt->nr_tunnels++;
  2433. mod_delayed_work(amt_wq, &tunnel->gc_wq,
  2434. msecs_to_jiffies(amt_gmi(amt)));
  2435. spin_unlock_bh(&amt->lock);
  2436. send:
  2437. tunnel->nonce = amtrh->nonce;
  2438. mac = siphash_3u32((__force u32)tunnel->ip4,
  2439. (__force u32)tunnel->source_port,
  2440. (__force u32)tunnel->nonce,
  2441. &tunnel->key);
  2442. tunnel->mac = mac >> 16;
  2443. if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
  2444. return true;
  2445. if (!amtrh->p)
  2446. amt_send_igmp_gq(amt, tunnel);
  2447. else
  2448. amt_send_mld_gq(amt, tunnel);
  2449. return false;
  2450. }
  2451. static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
  2452. {
  2453. int type = amt_parse_type(skb);
  2454. int err = 1;
  2455. if (type == -1)
  2456. goto drop;
  2457. if (amt->mode == AMT_MODE_GATEWAY) {
  2458. switch (type) {
  2459. case AMT_MSG_ADVERTISEMENT:
  2460. err = amt_advertisement_handler(amt, skb);
  2461. break;
  2462. case AMT_MSG_MEMBERSHIP_QUERY:
  2463. err = amt_membership_query_handler(amt, skb);
  2464. if (!err)
  2465. return;
  2466. break;
  2467. default:
  2468. netdev_dbg(amt->dev, "Invalid type of Gateway\n");
  2469. break;
  2470. }
  2471. }
  2472. drop:
  2473. if (err) {
  2474. amt->dev->stats.rx_dropped++;
  2475. kfree_skb(skb);
  2476. } else {
  2477. consume_skb(skb);
  2478. }
  2479. }
  2480. static int amt_rcv(struct sock *sk, struct sk_buff *skb)
  2481. {
  2482. struct amt_dev *amt;
  2483. struct iphdr *iph;
  2484. int type;
  2485. bool err;
  2486. rcu_read_lock_bh();
  2487. amt = rcu_dereference_sk_user_data(sk);
  2488. if (!amt) {
  2489. err = true;
  2490. kfree_skb(skb);
  2491. goto out;
  2492. }
  2493. skb->dev = amt->dev;
  2494. iph = ip_hdr(skb);
  2495. type = amt_parse_type(skb);
  2496. if (type == -1) {
  2497. err = true;
  2498. goto drop;
  2499. }
  2500. if (amt->mode == AMT_MODE_GATEWAY) {
  2501. switch (type) {
  2502. case AMT_MSG_ADVERTISEMENT:
  2503. if (iph->saddr != amt->discovery_ip) {
  2504. netdev_dbg(amt->dev, "Invalid Relay IP\n");
  2505. err = true;
  2506. goto drop;
  2507. }
  2508. if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
  2509. netdev_dbg(amt->dev, "AMT Event queue full\n");
  2510. err = true;
  2511. goto drop;
  2512. }
  2513. goto out;
  2514. case AMT_MSG_MULTICAST_DATA:
  2515. if (iph->saddr != amt->remote_ip) {
  2516. netdev_dbg(amt->dev, "Invalid Relay IP\n");
  2517. err = true;
  2518. goto drop;
  2519. }
  2520. err = amt_multicast_data_handler(amt, skb);
  2521. if (err)
  2522. goto drop;
  2523. else
  2524. goto out;
  2525. case AMT_MSG_MEMBERSHIP_QUERY:
  2526. if (iph->saddr != amt->remote_ip) {
  2527. netdev_dbg(amt->dev, "Invalid Relay IP\n");
  2528. err = true;
  2529. goto drop;
  2530. }
  2531. if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
  2532. netdev_dbg(amt->dev, "AMT Event queue full\n");
  2533. err = true;
  2534. goto drop;
  2535. }
  2536. goto out;
  2537. default:
  2538. err = true;
  2539. netdev_dbg(amt->dev, "Invalid type of Gateway\n");
  2540. break;
  2541. }
  2542. } else {
  2543. switch (type) {
  2544. case AMT_MSG_DISCOVERY:
  2545. err = amt_discovery_handler(amt, skb);
  2546. break;
  2547. case AMT_MSG_REQUEST:
  2548. err = amt_request_handler(amt, skb);
  2549. break;
  2550. case AMT_MSG_MEMBERSHIP_UPDATE:
  2551. err = amt_update_handler(amt, skb);
  2552. if (err)
  2553. goto drop;
  2554. else
  2555. goto out;
  2556. default:
  2557. err = true;
  2558. netdev_dbg(amt->dev, "Invalid type of relay\n");
  2559. break;
  2560. }
  2561. }
  2562. drop:
  2563. if (err) {
  2564. amt->dev->stats.rx_dropped++;
  2565. kfree_skb(skb);
  2566. } else {
  2567. consume_skb(skb);
  2568. }
  2569. out:
  2570. rcu_read_unlock_bh();
  2571. return 0;
  2572. }
  2573. static void amt_event_work(struct work_struct *work)
  2574. {
  2575. struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
  2576. struct sk_buff *skb;
  2577. u8 event;
  2578. int i;
  2579. for (i = 0; i < AMT_MAX_EVENTS; i++) {
  2580. spin_lock_bh(&amt->lock);
  2581. if (amt->nr_events == 0) {
  2582. spin_unlock_bh(&amt->lock);
  2583. return;
  2584. }
  2585. event = amt->events[amt->event_idx].event;
  2586. skb = amt->events[amt->event_idx].skb;
  2587. amt->events[amt->event_idx].event = AMT_EVENT_NONE;
  2588. amt->events[amt->event_idx].skb = NULL;
  2589. amt->nr_events--;
  2590. amt->event_idx++;
  2591. amt->event_idx %= AMT_MAX_EVENTS;
  2592. spin_unlock_bh(&amt->lock);
  2593. switch (event) {
  2594. case AMT_EVENT_RECEIVE:
  2595. amt_gw_rcv(amt, skb);
  2596. break;
  2597. case AMT_EVENT_SEND_DISCOVERY:
  2598. amt_event_send_discovery(amt);
  2599. break;
  2600. case AMT_EVENT_SEND_REQUEST:
  2601. amt_event_send_request(amt);
  2602. break;
  2603. default:
  2604. kfree_skb(skb);
  2605. break;
  2606. }
  2607. }
  2608. }
  2609. static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
  2610. {
  2611. struct amt_dev *amt;
  2612. int type;
  2613. rcu_read_lock_bh();
  2614. amt = rcu_dereference_sk_user_data(sk);
  2615. if (!amt)
  2616. goto out;
  2617. if (amt->mode != AMT_MODE_GATEWAY)
  2618. goto drop;
  2619. type = amt_parse_type(skb);
  2620. if (type == -1)
  2621. goto drop;
  2622. netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
  2623. type_str[type]);
  2624. switch (type) {
  2625. case AMT_MSG_DISCOVERY:
  2626. break;
  2627. case AMT_MSG_REQUEST:
  2628. case AMT_MSG_MEMBERSHIP_UPDATE:
  2629. if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
  2630. mod_delayed_work(amt_wq, &amt->req_wq, 0);
  2631. break;
  2632. default:
  2633. goto drop;
  2634. }
  2635. out:
  2636. rcu_read_unlock_bh();
  2637. return 0;
  2638. drop:
  2639. rcu_read_unlock_bh();
  2640. amt->dev->stats.rx_dropped++;
  2641. return 0;
  2642. }
  2643. static struct socket *amt_create_sock(struct net *net, __be16 port)
  2644. {
  2645. struct udp_port_cfg udp_conf;
  2646. struct socket *sock;
  2647. int err;
  2648. memset(&udp_conf, 0, sizeof(udp_conf));
  2649. udp_conf.family = AF_INET;
  2650. udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
  2651. udp_conf.local_udp_port = port;
  2652. err = udp_sock_create(net, &udp_conf, &sock);
  2653. if (err < 0)
  2654. return ERR_PTR(err);
  2655. return sock;
  2656. }
  2657. static int amt_socket_create(struct amt_dev *amt)
  2658. {
  2659. struct udp_tunnel_sock_cfg tunnel_cfg;
  2660. struct socket *sock;
  2661. sock = amt_create_sock(amt->net, amt->relay_port);
  2662. if (IS_ERR(sock))
  2663. return PTR_ERR(sock);
  2664. /* Mark socket as an encapsulation socket */
  2665. memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
  2666. tunnel_cfg.sk_user_data = amt;
  2667. tunnel_cfg.encap_type = 1;
  2668. tunnel_cfg.encap_rcv = amt_rcv;
  2669. tunnel_cfg.encap_err_lookup = amt_err_lookup;
  2670. tunnel_cfg.encap_destroy = NULL;
  2671. setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
  2672. rcu_assign_pointer(amt->sock, sock);
  2673. return 0;
  2674. }
  2675. static int amt_dev_open(struct net_device *dev)
  2676. {
  2677. struct amt_dev *amt = netdev_priv(dev);
  2678. int err;
  2679. amt->ready4 = false;
  2680. amt->ready6 = false;
  2681. amt->event_idx = 0;
  2682. amt->nr_events = 0;
  2683. err = amt_socket_create(amt);
  2684. if (err)
  2685. return err;
  2686. amt->req_cnt = 0;
  2687. amt->remote_ip = 0;
  2688. amt->nonce = 0;
  2689. get_random_bytes(&amt->key, sizeof(siphash_key_t));
  2690. amt->status = AMT_STATUS_INIT;
  2691. if (amt->mode == AMT_MODE_GATEWAY) {
  2692. mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
  2693. mod_delayed_work(amt_wq, &amt->req_wq, 0);
  2694. } else if (amt->mode == AMT_MODE_RELAY) {
  2695. mod_delayed_work(amt_wq, &amt->secret_wq,
  2696. msecs_to_jiffies(AMT_SECRET_TIMEOUT));
  2697. }
  2698. return err;
  2699. }
  2700. static int amt_dev_stop(struct net_device *dev)
  2701. {
  2702. struct amt_dev *amt = netdev_priv(dev);
  2703. struct amt_tunnel_list *tunnel, *tmp;
  2704. struct socket *sock;
  2705. struct sk_buff *skb;
  2706. int i;
  2707. cancel_delayed_work_sync(&amt->req_wq);
  2708. cancel_delayed_work_sync(&amt->discovery_wq);
  2709. cancel_delayed_work_sync(&amt->secret_wq);
  2710. /* shutdown */
  2711. sock = rtnl_dereference(amt->sock);
  2712. RCU_INIT_POINTER(amt->sock, NULL);
  2713. synchronize_net();
  2714. if (sock)
  2715. udp_tunnel_sock_release(sock);
  2716. cancel_work_sync(&amt->event_wq);
  2717. for (i = 0; i < AMT_MAX_EVENTS; i++) {
  2718. skb = amt->events[i].skb;
  2719. kfree_skb(skb);
  2720. amt->events[i].event = AMT_EVENT_NONE;
  2721. amt->events[i].skb = NULL;
  2722. }
  2723. amt->ready4 = false;
  2724. amt->ready6 = false;
  2725. amt->req_cnt = 0;
  2726. amt->remote_ip = 0;
  2727. list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
  2728. list_del_rcu(&tunnel->list);
  2729. amt->nr_tunnels--;
  2730. cancel_delayed_work_sync(&tunnel->gc_wq);
  2731. amt_clear_groups(tunnel);
  2732. kfree_rcu(tunnel, rcu);
  2733. }
  2734. return 0;
  2735. }
  2736. static const struct device_type amt_type = {
  2737. .name = "amt",
  2738. };
  2739. static int amt_dev_init(struct net_device *dev)
  2740. {
  2741. struct amt_dev *amt = netdev_priv(dev);
  2742. int err;
  2743. amt->dev = dev;
  2744. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  2745. if (!dev->tstats)
  2746. return -ENOMEM;
  2747. err = gro_cells_init(&amt->gro_cells, dev);
  2748. if (err) {
  2749. free_percpu(dev->tstats);
  2750. return err;
  2751. }
  2752. return 0;
  2753. }
  2754. static void amt_dev_uninit(struct net_device *dev)
  2755. {
  2756. struct amt_dev *amt = netdev_priv(dev);
  2757. gro_cells_destroy(&amt->gro_cells);
  2758. free_percpu(dev->tstats);
  2759. }
  2760. static const struct net_device_ops amt_netdev_ops = {
  2761. .ndo_init = amt_dev_init,
  2762. .ndo_uninit = amt_dev_uninit,
  2763. .ndo_open = amt_dev_open,
  2764. .ndo_stop = amt_dev_stop,
  2765. .ndo_start_xmit = amt_dev_xmit,
  2766. .ndo_get_stats64 = dev_get_tstats64,
  2767. };
  2768. static void amt_link_setup(struct net_device *dev)
  2769. {
  2770. dev->netdev_ops = &amt_netdev_ops;
  2771. dev->needs_free_netdev = true;
  2772. SET_NETDEV_DEVTYPE(dev, &amt_type);
  2773. dev->min_mtu = ETH_MIN_MTU;
  2774. dev->max_mtu = ETH_MAX_MTU;
  2775. dev->type = ARPHRD_NONE;
  2776. dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
  2777. dev->hard_header_len = 0;
  2778. dev->addr_len = 0;
  2779. dev->priv_flags |= IFF_NO_QUEUE;
  2780. dev->features |= NETIF_F_LLTX;
  2781. dev->features |= NETIF_F_GSO_SOFTWARE;
  2782. dev->features |= NETIF_F_NETNS_LOCAL;
  2783. dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
  2784. dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
  2785. dev->hw_features |= NETIF_F_GSO_SOFTWARE;
  2786. eth_hw_addr_random(dev);
  2787. eth_zero_addr(dev->broadcast);
  2788. ether_setup(dev);
  2789. }
  2790. static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
  2791. [IFLA_AMT_MODE] = { .type = NLA_U32 },
  2792. [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
  2793. [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
  2794. [IFLA_AMT_LINK] = { .type = NLA_U32 },
  2795. [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
  2796. [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
  2797. [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
  2798. [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
  2799. };
  2800. static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
  2801. struct netlink_ext_ack *extack)
  2802. {
  2803. if (!data)
  2804. return -EINVAL;
  2805. if (!data[IFLA_AMT_LINK]) {
  2806. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
  2807. "Link attribute is required");
  2808. return -EINVAL;
  2809. }
  2810. if (!data[IFLA_AMT_MODE]) {
  2811. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
  2812. "Mode attribute is required");
  2813. return -EINVAL;
  2814. }
  2815. if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
  2816. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
  2817. "Mode attribute is not valid");
  2818. return -EINVAL;
  2819. }
  2820. if (!data[IFLA_AMT_LOCAL_IP]) {
  2821. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
  2822. "Local attribute is required");
  2823. return -EINVAL;
  2824. }
  2825. if (!data[IFLA_AMT_DISCOVERY_IP] &&
  2826. nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
  2827. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
  2828. "Discovery attribute is required");
  2829. return -EINVAL;
  2830. }
  2831. return 0;
  2832. }
  2833. static int amt_newlink(struct net *net, struct net_device *dev,
  2834. struct nlattr *tb[], struct nlattr *data[],
  2835. struct netlink_ext_ack *extack)
  2836. {
  2837. struct amt_dev *amt = netdev_priv(dev);
  2838. int err = -EINVAL;
  2839. amt->net = net;
  2840. amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
  2841. if (data[IFLA_AMT_MAX_TUNNELS] &&
  2842. nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
  2843. amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
  2844. else
  2845. amt->max_tunnels = AMT_MAX_TUNNELS;
  2846. spin_lock_init(&amt->lock);
  2847. amt->max_groups = AMT_MAX_GROUP;
  2848. amt->max_sources = AMT_MAX_SOURCE;
  2849. amt->hash_buckets = AMT_HSIZE;
  2850. amt->nr_tunnels = 0;
  2851. get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
  2852. amt->stream_dev = dev_get_by_index(net,
  2853. nla_get_u32(data[IFLA_AMT_LINK]));
  2854. if (!amt->stream_dev) {
  2855. NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
  2856. "Can't find stream device");
  2857. return -ENODEV;
  2858. }
  2859. if (amt->stream_dev->type != ARPHRD_ETHER) {
  2860. NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
  2861. "Invalid stream device type");
  2862. goto err;
  2863. }
  2864. amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
  2865. if (ipv4_is_loopback(amt->local_ip) ||
  2866. ipv4_is_zeronet(amt->local_ip) ||
  2867. ipv4_is_multicast(amt->local_ip)) {
  2868. NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
  2869. "Invalid Local address");
  2870. goto err;
  2871. }
  2872. if (data[IFLA_AMT_RELAY_PORT])
  2873. amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
  2874. else
  2875. amt->relay_port = htons(IANA_AMT_UDP_PORT);
  2876. if (data[IFLA_AMT_GATEWAY_PORT])
  2877. amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
  2878. else
  2879. amt->gw_port = htons(IANA_AMT_UDP_PORT);
  2880. if (!amt->relay_port) {
  2881. NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
  2882. "relay port must not be 0");
  2883. goto err;
  2884. }
  2885. if (amt->mode == AMT_MODE_RELAY) {
  2886. amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
  2887. amt->qri = 10;
  2888. dev->needed_headroom = amt->stream_dev->needed_headroom +
  2889. AMT_RELAY_HLEN;
  2890. dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
  2891. dev->max_mtu = dev->mtu;
  2892. dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
  2893. } else {
  2894. if (!data[IFLA_AMT_DISCOVERY_IP]) {
  2895. NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
  2896. "discovery must be set in gateway mode");
  2897. goto err;
  2898. }
  2899. if (!amt->gw_port) {
  2900. NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
  2901. "gateway port must not be 0");
  2902. goto err;
  2903. }
  2904. amt->remote_ip = 0;
  2905. amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
  2906. if (ipv4_is_loopback(amt->discovery_ip) ||
  2907. ipv4_is_zeronet(amt->discovery_ip) ||
  2908. ipv4_is_multicast(amt->discovery_ip)) {
  2909. NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
  2910. "discovery must be unicast");
  2911. goto err;
  2912. }
  2913. dev->needed_headroom = amt->stream_dev->needed_headroom +
  2914. AMT_GW_HLEN;
  2915. dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
  2916. dev->max_mtu = dev->mtu;
  2917. dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
  2918. }
  2919. amt->qi = AMT_INIT_QUERY_INTERVAL;
  2920. err = register_netdevice(dev);
  2921. if (err < 0) {
  2922. netdev_dbg(dev, "failed to register new netdev %d\n", err);
  2923. goto err;
  2924. }
  2925. err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
  2926. if (err < 0) {
  2927. unregister_netdevice(dev);
  2928. goto err;
  2929. }
  2930. INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
  2931. INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
  2932. INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
  2933. INIT_WORK(&amt->event_wq, amt_event_work);
  2934. INIT_LIST_HEAD(&amt->tunnel_list);
  2935. return 0;
  2936. err:
  2937. dev_put(amt->stream_dev);
  2938. return err;
  2939. }
  2940. static void amt_dellink(struct net_device *dev, struct list_head *head)
  2941. {
  2942. struct amt_dev *amt = netdev_priv(dev);
  2943. unregister_netdevice_queue(dev, head);
  2944. netdev_upper_dev_unlink(amt->stream_dev, dev);
  2945. dev_put(amt->stream_dev);
  2946. }
  2947. static size_t amt_get_size(const struct net_device *dev)
  2948. {
  2949. return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
  2950. nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
  2951. nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
  2952. nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
  2953. nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
  2954. nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
  2955. nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
  2956. nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
  2957. }
  2958. static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
  2959. {
  2960. struct amt_dev *amt = netdev_priv(dev);
  2961. if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
  2962. goto nla_put_failure;
  2963. if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
  2964. goto nla_put_failure;
  2965. if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
  2966. goto nla_put_failure;
  2967. if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
  2968. goto nla_put_failure;
  2969. if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
  2970. goto nla_put_failure;
  2971. if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
  2972. goto nla_put_failure;
  2973. if (amt->remote_ip)
  2974. if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
  2975. goto nla_put_failure;
  2976. if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
  2977. goto nla_put_failure;
  2978. return 0;
  2979. nla_put_failure:
  2980. return -EMSGSIZE;
  2981. }
  2982. static struct rtnl_link_ops amt_link_ops __read_mostly = {
  2983. .kind = "amt",
  2984. .maxtype = IFLA_AMT_MAX,
  2985. .policy = amt_policy,
  2986. .priv_size = sizeof(struct amt_dev),
  2987. .setup = amt_link_setup,
  2988. .validate = amt_validate,
  2989. .newlink = amt_newlink,
  2990. .dellink = amt_dellink,
  2991. .get_size = amt_get_size,
  2992. .fill_info = amt_fill_info,
  2993. };
  2994. static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
  2995. {
  2996. struct net_device *upper_dev;
  2997. struct amt_dev *amt;
  2998. for_each_netdev(dev_net(dev), upper_dev) {
  2999. if (netif_is_amt(upper_dev)) {
  3000. amt = netdev_priv(upper_dev);
  3001. if (amt->stream_dev == dev)
  3002. return upper_dev;
  3003. }
  3004. }
  3005. return NULL;
  3006. }
  3007. static int amt_device_event(struct notifier_block *unused,
  3008. unsigned long event, void *ptr)
  3009. {
  3010. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  3011. struct net_device *upper_dev;
  3012. struct amt_dev *amt;
  3013. LIST_HEAD(list);
  3014. int new_mtu;
  3015. upper_dev = amt_lookup_upper_dev(dev);
  3016. if (!upper_dev)
  3017. return NOTIFY_DONE;
  3018. amt = netdev_priv(upper_dev);
  3019. switch (event) {
  3020. case NETDEV_UNREGISTER:
  3021. amt_dellink(amt->dev, &list);
  3022. unregister_netdevice_many(&list);
  3023. break;
  3024. case NETDEV_CHANGEMTU:
  3025. if (amt->mode == AMT_MODE_RELAY)
  3026. new_mtu = dev->mtu - AMT_RELAY_HLEN;
  3027. else
  3028. new_mtu = dev->mtu - AMT_GW_HLEN;
  3029. dev_set_mtu(amt->dev, new_mtu);
  3030. break;
  3031. }
  3032. return NOTIFY_DONE;
  3033. }
  3034. static struct notifier_block amt_notifier_block __read_mostly = {
  3035. .notifier_call = amt_device_event,
  3036. };
  3037. static int __init amt_init(void)
  3038. {
  3039. int err;
  3040. err = register_netdevice_notifier(&amt_notifier_block);
  3041. if (err < 0)
  3042. goto err;
  3043. err = rtnl_link_register(&amt_link_ops);
  3044. if (err < 0)
  3045. goto unregister_notifier;
  3046. amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
  3047. if (!amt_wq) {
  3048. err = -ENOMEM;
  3049. goto rtnl_unregister;
  3050. }
  3051. spin_lock_init(&source_gc_lock);
  3052. spin_lock_bh(&source_gc_lock);
  3053. INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
  3054. mod_delayed_work(amt_wq, &source_gc_wq,
  3055. msecs_to_jiffies(AMT_GC_INTERVAL));
  3056. spin_unlock_bh(&source_gc_lock);
  3057. return 0;
  3058. rtnl_unregister:
  3059. rtnl_link_unregister(&amt_link_ops);
  3060. unregister_notifier:
  3061. unregister_netdevice_notifier(&amt_notifier_block);
  3062. err:
  3063. pr_err("error loading AMT module loaded\n");
  3064. return err;
  3065. }
  3066. late_initcall(amt_init);
  3067. static void __exit amt_fini(void)
  3068. {
  3069. rtnl_link_unregister(&amt_link_ops);
  3070. unregister_netdevice_notifier(&amt_notifier_block);
  3071. cancel_delayed_work_sync(&source_gc_wq);
  3072. __amt_source_gc_work();
  3073. destroy_workqueue(amt_wq);
  3074. }
  3075. module_exit(amt_fini);
  3076. MODULE_LICENSE("GPL");
  3077. MODULE_AUTHOR("Taehee Yoo <[email protected]>");
  3078. MODULE_ALIAS_RTNL_LINK("amt");