xfrm_policy.c 107 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * xfrm_policy.c
  4. *
  5. * Changes:
  6. * Mitsuru KANDA @USAGI
  7. * Kazunori MIYAZAWA @USAGI
  8. * Kunihiro Ishiguro <[email protected]>
  9. * IPv6 support
  10. * Kazunori MIYAZAWA @USAGI
  11. * YOSHIFUJI Hideaki
  12. * Split up af-specific portion
  13. * Derek Atkins <[email protected]> Add the post_input processor
  14. *
  15. */
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/kmod.h>
  19. #include <linux/list.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/notifier.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/netfilter.h>
  25. #include <linux/module.h>
  26. #include <linux/cache.h>
  27. #include <linux/cpu.h>
  28. #include <linux/audit.h>
  29. #include <linux/rhashtable.h>
  30. #include <linux/if_tunnel.h>
  31. #include <net/dst.h>
  32. #include <net/flow.h>
  33. #include <net/inet_ecn.h>
  34. #include <net/xfrm.h>
  35. #include <net/ip.h>
  36. #include <net/gre.h>
  37. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  38. #include <net/mip6.h>
  39. #endif
  40. #ifdef CONFIG_XFRM_STATISTICS
  41. #include <net/snmp.h>
  42. #endif
  43. #ifdef CONFIG_XFRM_ESPINTCP
  44. #include <net/espintcp.h>
  45. #endif
  46. #include "xfrm_hash.h"
  47. #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  48. #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  49. #define XFRM_MAX_QUEUE_LEN 100
  50. struct xfrm_flo {
  51. struct dst_entry *dst_orig;
  52. u8 flags;
  53. };
  54. /* prefixes smaller than this are stored in lists, not trees. */
  55. #define INEXACT_PREFIXLEN_IPV4 16
  56. #define INEXACT_PREFIXLEN_IPV6 48
  57. struct xfrm_pol_inexact_node {
  58. struct rb_node node;
  59. union {
  60. xfrm_address_t addr;
  61. struct rcu_head rcu;
  62. };
  63. u8 prefixlen;
  64. struct rb_root root;
  65. /* the policies matching this node, can be empty list */
  66. struct hlist_head hhead;
  67. };
  68. /* xfrm inexact policy search tree:
  69. * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
  70. * |
  71. * +---- root_d: sorted by daddr:prefix
  72. * | |
  73. * | xfrm_pol_inexact_node
  74. * | |
  75. * | +- root: sorted by saddr/prefix
  76. * | | |
  77. * | | xfrm_pol_inexact_node
  78. * | | |
  79. * | | + root: unused
  80. * | | |
  81. * | | + hhead: saddr:daddr policies
  82. * | |
  83. * | +- coarse policies and all any:daddr policies
  84. * |
  85. * +---- root_s: sorted by saddr:prefix
  86. * | |
  87. * | xfrm_pol_inexact_node
  88. * | |
  89. * | + root: unused
  90. * | |
  91. * | + hhead: saddr:any policies
  92. * |
  93. * +---- coarse policies and all any:any policies
  94. *
  95. * Lookups return four candidate lists:
  96. * 1. any:any list from top-level xfrm_pol_inexact_bin
  97. * 2. any:daddr list from daddr tree
  98. * 3. saddr:daddr list from 2nd level daddr tree
  99. * 4. saddr:any list from saddr tree
  100. *
  101. * This result set then needs to be searched for the policy with
  102. * the lowest priority. If two results have same prio, youngest one wins.
  103. */
  104. struct xfrm_pol_inexact_key {
  105. possible_net_t net;
  106. u32 if_id;
  107. u16 family;
  108. u8 dir, type;
  109. };
  110. struct xfrm_pol_inexact_bin {
  111. struct xfrm_pol_inexact_key k;
  112. struct rhash_head head;
  113. /* list containing '*:*' policies */
  114. struct hlist_head hhead;
  115. seqcount_spinlock_t count;
  116. /* tree sorted by daddr/prefix */
  117. struct rb_root root_d;
  118. /* tree sorted by saddr/prefix */
  119. struct rb_root root_s;
  120. /* slow path below */
  121. struct list_head inexact_bins;
  122. struct rcu_head rcu;
  123. };
  124. enum xfrm_pol_inexact_candidate_type {
  125. XFRM_POL_CAND_BOTH,
  126. XFRM_POL_CAND_SADDR,
  127. XFRM_POL_CAND_DADDR,
  128. XFRM_POL_CAND_ANY,
  129. XFRM_POL_CAND_MAX,
  130. };
  131. struct xfrm_pol_inexact_candidates {
  132. struct hlist_head *res[XFRM_POL_CAND_MAX];
  133. };
  134. static DEFINE_SPINLOCK(xfrm_if_cb_lock);
  135. static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
  136. static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  137. static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
  138. __read_mostly;
  139. static struct kmem_cache *xfrm_dst_cache __ro_after_init;
  140. static struct rhashtable xfrm_policy_inexact_table;
  141. static const struct rhashtable_params xfrm_pol_inexact_params;
  142. static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
  143. static int stale_bundle(struct dst_entry *dst);
  144. static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  145. static void xfrm_policy_queue_process(struct timer_list *t);
  146. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
  147. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  148. int dir);
  149. static struct xfrm_pol_inexact_bin *
  150. xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
  151. u32 if_id);
  152. static struct xfrm_pol_inexact_bin *
  153. xfrm_policy_inexact_lookup_rcu(struct net *net,
  154. u8 type, u16 family, u8 dir, u32 if_id);
  155. static struct xfrm_policy *
  156. xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
  157. bool excl);
  158. static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
  159. struct xfrm_policy *policy);
  160. static bool
  161. xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
  162. struct xfrm_pol_inexact_bin *b,
  163. const xfrm_address_t *saddr,
  164. const xfrm_address_t *daddr);
  165. static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
  166. {
  167. return refcount_inc_not_zero(&policy->refcnt);
  168. }
  169. static inline bool
  170. __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  171. {
  172. const struct flowi4 *fl4 = &fl->u.ip4;
  173. return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  174. addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  175. !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  176. !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  177. (fl4->flowi4_proto == sel->proto || !sel->proto) &&
  178. (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  179. }
  180. static inline bool
  181. __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  182. {
  183. const struct flowi6 *fl6 = &fl->u.ip6;
  184. return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  185. addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  186. !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  187. !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  188. (fl6->flowi6_proto == sel->proto || !sel->proto) &&
  189. (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  190. }
  191. bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  192. unsigned short family)
  193. {
  194. switch (family) {
  195. case AF_INET:
  196. return __xfrm4_selector_match(sel, fl);
  197. case AF_INET6:
  198. return __xfrm6_selector_match(sel, fl);
  199. }
  200. return false;
  201. }
  202. static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  203. {
  204. const struct xfrm_policy_afinfo *afinfo;
  205. if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
  206. return NULL;
  207. rcu_read_lock();
  208. afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
  209. if (unlikely(!afinfo))
  210. rcu_read_unlock();
  211. return afinfo;
  212. }
  213. /* Called with rcu_read_lock(). */
  214. static const struct xfrm_if_cb *xfrm_if_get_cb(void)
  215. {
  216. return rcu_dereference(xfrm_if_cb);
  217. }
  218. struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
  219. const xfrm_address_t *saddr,
  220. const xfrm_address_t *daddr,
  221. int family, u32 mark)
  222. {
  223. const struct xfrm_policy_afinfo *afinfo;
  224. struct dst_entry *dst;
  225. afinfo = xfrm_policy_get_afinfo(family);
  226. if (unlikely(afinfo == NULL))
  227. return ERR_PTR(-EAFNOSUPPORT);
  228. dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
  229. rcu_read_unlock();
  230. return dst;
  231. }
  232. EXPORT_SYMBOL(__xfrm_dst_lookup);
  233. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
  234. int tos, int oif,
  235. xfrm_address_t *prev_saddr,
  236. xfrm_address_t *prev_daddr,
  237. int family, u32 mark)
  238. {
  239. struct net *net = xs_net(x);
  240. xfrm_address_t *saddr = &x->props.saddr;
  241. xfrm_address_t *daddr = &x->id.daddr;
  242. struct dst_entry *dst;
  243. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  244. saddr = x->coaddr;
  245. daddr = prev_daddr;
  246. }
  247. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  248. saddr = prev_saddr;
  249. daddr = x->coaddr;
  250. }
  251. dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
  252. if (!IS_ERR(dst)) {
  253. if (prev_saddr != saddr)
  254. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  255. if (prev_daddr != daddr)
  256. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  257. }
  258. return dst;
  259. }
  260. static inline unsigned long make_jiffies(long secs)
  261. {
  262. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  263. return MAX_SCHEDULE_TIMEOUT-1;
  264. else
  265. return secs*HZ;
  266. }
  267. static void xfrm_policy_timer(struct timer_list *t)
  268. {
  269. struct xfrm_policy *xp = from_timer(xp, t, timer);
  270. time64_t now = ktime_get_real_seconds();
  271. time64_t next = TIME64_MAX;
  272. int warn = 0;
  273. int dir;
  274. read_lock(&xp->lock);
  275. if (unlikely(xp->walk.dead))
  276. goto out;
  277. dir = xfrm_policy_id2dir(xp->index);
  278. if (xp->lft.hard_add_expires_seconds) {
  279. time64_t tmo = xp->lft.hard_add_expires_seconds +
  280. xp->curlft.add_time - now;
  281. if (tmo <= 0)
  282. goto expired;
  283. if (tmo < next)
  284. next = tmo;
  285. }
  286. if (xp->lft.hard_use_expires_seconds) {
  287. time64_t tmo = xp->lft.hard_use_expires_seconds +
  288. (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
  289. if (tmo <= 0)
  290. goto expired;
  291. if (tmo < next)
  292. next = tmo;
  293. }
  294. if (xp->lft.soft_add_expires_seconds) {
  295. time64_t tmo = xp->lft.soft_add_expires_seconds +
  296. xp->curlft.add_time - now;
  297. if (tmo <= 0) {
  298. warn = 1;
  299. tmo = XFRM_KM_TIMEOUT;
  300. }
  301. if (tmo < next)
  302. next = tmo;
  303. }
  304. if (xp->lft.soft_use_expires_seconds) {
  305. time64_t tmo = xp->lft.soft_use_expires_seconds +
  306. (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
  307. if (tmo <= 0) {
  308. warn = 1;
  309. tmo = XFRM_KM_TIMEOUT;
  310. }
  311. if (tmo < next)
  312. next = tmo;
  313. }
  314. if (warn)
  315. km_policy_expired(xp, dir, 0, 0);
  316. if (next != TIME64_MAX &&
  317. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  318. xfrm_pol_hold(xp);
  319. out:
  320. read_unlock(&xp->lock);
  321. xfrm_pol_put(xp);
  322. return;
  323. expired:
  324. read_unlock(&xp->lock);
  325. if (!xfrm_policy_delete(xp, dir))
  326. km_policy_expired(xp, dir, 1, 0);
  327. xfrm_pol_put(xp);
  328. }
  329. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  330. * SPD calls.
  331. */
  332. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  333. {
  334. struct xfrm_policy *policy;
  335. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  336. if (policy) {
  337. write_pnet(&policy->xp_net, net);
  338. INIT_LIST_HEAD(&policy->walk.all);
  339. INIT_HLIST_NODE(&policy->bydst_inexact_list);
  340. INIT_HLIST_NODE(&policy->bydst);
  341. INIT_HLIST_NODE(&policy->byidx);
  342. rwlock_init(&policy->lock);
  343. refcount_set(&policy->refcnt, 1);
  344. skb_queue_head_init(&policy->polq.hold_queue);
  345. timer_setup(&policy->timer, xfrm_policy_timer, 0);
  346. timer_setup(&policy->polq.hold_timer,
  347. xfrm_policy_queue_process, 0);
  348. }
  349. return policy;
  350. }
  351. EXPORT_SYMBOL(xfrm_policy_alloc);
  352. static void xfrm_policy_destroy_rcu(struct rcu_head *head)
  353. {
  354. struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
  355. security_xfrm_policy_free(policy->security);
  356. kfree(policy);
  357. }
  358. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  359. void xfrm_policy_destroy(struct xfrm_policy *policy)
  360. {
  361. BUG_ON(!policy->walk.dead);
  362. if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
  363. BUG();
  364. call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
  365. }
  366. EXPORT_SYMBOL(xfrm_policy_destroy);
  367. /* Rule must be locked. Release descendant resources, announce
  368. * entry dead. The rule must be unlinked from lists to the moment.
  369. */
  370. static void xfrm_policy_kill(struct xfrm_policy *policy)
  371. {
  372. write_lock_bh(&policy->lock);
  373. policy->walk.dead = 1;
  374. write_unlock_bh(&policy->lock);
  375. atomic_inc(&policy->genid);
  376. if (del_timer(&policy->polq.hold_timer))
  377. xfrm_pol_put(policy);
  378. skb_queue_purge(&policy->polq.hold_queue);
  379. if (del_timer(&policy->timer))
  380. xfrm_pol_put(policy);
  381. xfrm_pol_put(policy);
  382. }
  383. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  384. static inline unsigned int idx_hash(struct net *net, u32 index)
  385. {
  386. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  387. }
  388. /* calculate policy hash thresholds */
  389. static void __get_hash_thresh(struct net *net,
  390. unsigned short family, int dir,
  391. u8 *dbits, u8 *sbits)
  392. {
  393. switch (family) {
  394. case AF_INET:
  395. *dbits = net->xfrm.policy_bydst[dir].dbits4;
  396. *sbits = net->xfrm.policy_bydst[dir].sbits4;
  397. break;
  398. case AF_INET6:
  399. *dbits = net->xfrm.policy_bydst[dir].dbits6;
  400. *sbits = net->xfrm.policy_bydst[dir].sbits6;
  401. break;
  402. default:
  403. *dbits = 0;
  404. *sbits = 0;
  405. }
  406. }
  407. static struct hlist_head *policy_hash_bysel(struct net *net,
  408. const struct xfrm_selector *sel,
  409. unsigned short family, int dir)
  410. {
  411. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  412. unsigned int hash;
  413. u8 dbits;
  414. u8 sbits;
  415. __get_hash_thresh(net, family, dir, &dbits, &sbits);
  416. hash = __sel_hash(sel, family, hmask, dbits, sbits);
  417. if (hash == hmask + 1)
  418. return NULL;
  419. return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
  420. lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
  421. }
  422. static struct hlist_head *policy_hash_direct(struct net *net,
  423. const xfrm_address_t *daddr,
  424. const xfrm_address_t *saddr,
  425. unsigned short family, int dir)
  426. {
  427. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  428. unsigned int hash;
  429. u8 dbits;
  430. u8 sbits;
  431. __get_hash_thresh(net, family, dir, &dbits, &sbits);
  432. hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
  433. return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
  434. lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
  435. }
  436. static void xfrm_dst_hash_transfer(struct net *net,
  437. struct hlist_head *list,
  438. struct hlist_head *ndsttable,
  439. unsigned int nhashmask,
  440. int dir)
  441. {
  442. struct hlist_node *tmp, *entry0 = NULL;
  443. struct xfrm_policy *pol;
  444. unsigned int h0 = 0;
  445. u8 dbits;
  446. u8 sbits;
  447. redo:
  448. hlist_for_each_entry_safe(pol, tmp, list, bydst) {
  449. unsigned int h;
  450. __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
  451. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  452. pol->family, nhashmask, dbits, sbits);
  453. if (!entry0) {
  454. hlist_del_rcu(&pol->bydst);
  455. hlist_add_head_rcu(&pol->bydst, ndsttable + h);
  456. h0 = h;
  457. } else {
  458. if (h != h0)
  459. continue;
  460. hlist_del_rcu(&pol->bydst);
  461. hlist_add_behind_rcu(&pol->bydst, entry0);
  462. }
  463. entry0 = &pol->bydst;
  464. }
  465. if (!hlist_empty(list)) {
  466. entry0 = NULL;
  467. goto redo;
  468. }
  469. }
  470. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  471. struct hlist_head *nidxtable,
  472. unsigned int nhashmask)
  473. {
  474. struct hlist_node *tmp;
  475. struct xfrm_policy *pol;
  476. hlist_for_each_entry_safe(pol, tmp, list, byidx) {
  477. unsigned int h;
  478. h = __idx_hash(pol->index, nhashmask);
  479. hlist_add_head(&pol->byidx, nidxtable+h);
  480. }
  481. }
  482. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  483. {
  484. return ((old_hmask + 1) << 1) - 1;
  485. }
  486. static void xfrm_bydst_resize(struct net *net, int dir)
  487. {
  488. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  489. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  490. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  491. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  492. struct hlist_head *odst;
  493. int i;
  494. if (!ndst)
  495. return;
  496. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  497. write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
  498. odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
  499. lockdep_is_held(&net->xfrm.xfrm_policy_lock));
  500. for (i = hmask; i >= 0; i--)
  501. xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
  502. rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
  503. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  504. write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
  505. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  506. synchronize_rcu();
  507. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  508. }
  509. static void xfrm_byidx_resize(struct net *net, int total)
  510. {
  511. unsigned int hmask = net->xfrm.policy_idx_hmask;
  512. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  513. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  514. struct hlist_head *oidx = net->xfrm.policy_byidx;
  515. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  516. int i;
  517. if (!nidx)
  518. return;
  519. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  520. for (i = hmask; i >= 0; i--)
  521. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  522. net->xfrm.policy_byidx = nidx;
  523. net->xfrm.policy_idx_hmask = nhashmask;
  524. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  525. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  526. }
  527. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  528. {
  529. unsigned int cnt = net->xfrm.policy_count[dir];
  530. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  531. if (total)
  532. *total += cnt;
  533. if ((hmask + 1) < xfrm_policy_hashmax &&
  534. cnt > hmask)
  535. return 1;
  536. return 0;
  537. }
  538. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  539. {
  540. unsigned int hmask = net->xfrm.policy_idx_hmask;
  541. if ((hmask + 1) < xfrm_policy_hashmax &&
  542. total > hmask)
  543. return 1;
  544. return 0;
  545. }
  546. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  547. {
  548. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  549. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  550. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  551. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  552. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  553. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  554. si->spdhcnt = net->xfrm.policy_idx_hmask;
  555. si->spdhmcnt = xfrm_policy_hashmax;
  556. }
  557. EXPORT_SYMBOL(xfrm_spd_getinfo);
  558. static DEFINE_MUTEX(hash_resize_mutex);
  559. static void xfrm_hash_resize(struct work_struct *work)
  560. {
  561. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  562. int dir, total;
  563. mutex_lock(&hash_resize_mutex);
  564. total = 0;
  565. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  566. if (xfrm_bydst_should_resize(net, dir, &total))
  567. xfrm_bydst_resize(net, dir);
  568. }
  569. if (xfrm_byidx_should_resize(net, total))
  570. xfrm_byidx_resize(net, total);
  571. mutex_unlock(&hash_resize_mutex);
  572. }
  573. /* Make sure *pol can be inserted into fastbin.
  574. * Useful to check that later insert requests will be successful
  575. * (provided xfrm_policy_lock is held throughout).
  576. */
  577. static struct xfrm_pol_inexact_bin *
  578. xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
  579. {
  580. struct xfrm_pol_inexact_bin *bin, *prev;
  581. struct xfrm_pol_inexact_key k = {
  582. .family = pol->family,
  583. .type = pol->type,
  584. .dir = dir,
  585. .if_id = pol->if_id,
  586. };
  587. struct net *net = xp_net(pol);
  588. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  589. write_pnet(&k.net, net);
  590. bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
  591. xfrm_pol_inexact_params);
  592. if (bin)
  593. return bin;
  594. bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
  595. if (!bin)
  596. return NULL;
  597. bin->k = k;
  598. INIT_HLIST_HEAD(&bin->hhead);
  599. bin->root_d = RB_ROOT;
  600. bin->root_s = RB_ROOT;
  601. seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
  602. prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
  603. &bin->k, &bin->head,
  604. xfrm_pol_inexact_params);
  605. if (!prev) {
  606. list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
  607. return bin;
  608. }
  609. kfree(bin);
  610. return IS_ERR(prev) ? NULL : prev;
  611. }
  612. static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
  613. int family, u8 prefixlen)
  614. {
  615. if (xfrm_addr_any(addr, family))
  616. return true;
  617. if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
  618. return true;
  619. if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
  620. return true;
  621. return false;
  622. }
  623. static bool
  624. xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
  625. {
  626. const xfrm_address_t *addr;
  627. bool saddr_any, daddr_any;
  628. u8 prefixlen;
  629. addr = &policy->selector.saddr;
  630. prefixlen = policy->selector.prefixlen_s;
  631. saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
  632. policy->family,
  633. prefixlen);
  634. addr = &policy->selector.daddr;
  635. prefixlen = policy->selector.prefixlen_d;
  636. daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
  637. policy->family,
  638. prefixlen);
  639. return saddr_any && daddr_any;
  640. }
  641. static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
  642. const xfrm_address_t *addr, u8 prefixlen)
  643. {
  644. node->addr = *addr;
  645. node->prefixlen = prefixlen;
  646. }
  647. static struct xfrm_pol_inexact_node *
  648. xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
  649. {
  650. struct xfrm_pol_inexact_node *node;
  651. node = kzalloc(sizeof(*node), GFP_ATOMIC);
  652. if (node)
  653. xfrm_pol_inexact_node_init(node, addr, prefixlen);
  654. return node;
  655. }
  656. static int xfrm_policy_addr_delta(const xfrm_address_t *a,
  657. const xfrm_address_t *b,
  658. u8 prefixlen, u16 family)
  659. {
  660. u32 ma, mb, mask;
  661. unsigned int pdw, pbi;
  662. int delta = 0;
  663. switch (family) {
  664. case AF_INET:
  665. if (prefixlen == 0)
  666. return 0;
  667. mask = ~0U << (32 - prefixlen);
  668. ma = ntohl(a->a4) & mask;
  669. mb = ntohl(b->a4) & mask;
  670. if (ma < mb)
  671. delta = -1;
  672. else if (ma > mb)
  673. delta = 1;
  674. break;
  675. case AF_INET6:
  676. pdw = prefixlen >> 5;
  677. pbi = prefixlen & 0x1f;
  678. if (pdw) {
  679. delta = memcmp(a->a6, b->a6, pdw << 2);
  680. if (delta)
  681. return delta;
  682. }
  683. if (pbi) {
  684. mask = ~0U << (32 - pbi);
  685. ma = ntohl(a->a6[pdw]) & mask;
  686. mb = ntohl(b->a6[pdw]) & mask;
  687. if (ma < mb)
  688. delta = -1;
  689. else if (ma > mb)
  690. delta = 1;
  691. }
  692. break;
  693. default:
  694. break;
  695. }
  696. return delta;
  697. }
  698. static void xfrm_policy_inexact_list_reinsert(struct net *net,
  699. struct xfrm_pol_inexact_node *n,
  700. u16 family)
  701. {
  702. unsigned int matched_s, matched_d;
  703. struct xfrm_policy *policy, *p;
  704. matched_s = 0;
  705. matched_d = 0;
  706. list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
  707. struct hlist_node *newpos = NULL;
  708. bool matches_s, matches_d;
  709. if (policy->walk.dead || !policy->bydst_reinsert)
  710. continue;
  711. WARN_ON_ONCE(policy->family != family);
  712. policy->bydst_reinsert = false;
  713. hlist_for_each_entry(p, &n->hhead, bydst) {
  714. if (policy->priority > p->priority)
  715. newpos = &p->bydst;
  716. else if (policy->priority == p->priority &&
  717. policy->pos > p->pos)
  718. newpos = &p->bydst;
  719. else
  720. break;
  721. }
  722. if (newpos)
  723. hlist_add_behind_rcu(&policy->bydst, newpos);
  724. else
  725. hlist_add_head_rcu(&policy->bydst, &n->hhead);
  726. /* paranoia checks follow.
  727. * Check that the reinserted policy matches at least
  728. * saddr or daddr for current node prefix.
  729. *
  730. * Matching both is fine, matching saddr in one policy
  731. * (but not daddr) and then matching only daddr in another
  732. * is a bug.
  733. */
  734. matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
  735. &n->addr,
  736. n->prefixlen,
  737. family) == 0;
  738. matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
  739. &n->addr,
  740. n->prefixlen,
  741. family) == 0;
  742. if (matches_s && matches_d)
  743. continue;
  744. WARN_ON_ONCE(!matches_s && !matches_d);
  745. if (matches_s)
  746. matched_s++;
  747. if (matches_d)
  748. matched_d++;
  749. WARN_ON_ONCE(matched_s && matched_d);
  750. }
  751. }
  752. static void xfrm_policy_inexact_node_reinsert(struct net *net,
  753. struct xfrm_pol_inexact_node *n,
  754. struct rb_root *new,
  755. u16 family)
  756. {
  757. struct xfrm_pol_inexact_node *node;
  758. struct rb_node **p, *parent;
  759. /* we should not have another subtree here */
  760. WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
  761. restart:
  762. parent = NULL;
  763. p = &new->rb_node;
  764. while (*p) {
  765. u8 prefixlen;
  766. int delta;
  767. parent = *p;
  768. node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
  769. prefixlen = min(node->prefixlen, n->prefixlen);
  770. delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
  771. prefixlen, family);
  772. if (delta < 0) {
  773. p = &parent->rb_left;
  774. } else if (delta > 0) {
  775. p = &parent->rb_right;
  776. } else {
  777. bool same_prefixlen = node->prefixlen == n->prefixlen;
  778. struct xfrm_policy *tmp;
  779. hlist_for_each_entry(tmp, &n->hhead, bydst) {
  780. tmp->bydst_reinsert = true;
  781. hlist_del_rcu(&tmp->bydst);
  782. }
  783. node->prefixlen = prefixlen;
  784. xfrm_policy_inexact_list_reinsert(net, node, family);
  785. if (same_prefixlen) {
  786. kfree_rcu(n, rcu);
  787. return;
  788. }
  789. rb_erase(*p, new);
  790. kfree_rcu(n, rcu);
  791. n = node;
  792. goto restart;
  793. }
  794. }
  795. rb_link_node_rcu(&n->node, parent, p);
  796. rb_insert_color(&n->node, new);
  797. }
  798. /* merge nodes v and n */
  799. static void xfrm_policy_inexact_node_merge(struct net *net,
  800. struct xfrm_pol_inexact_node *v,
  801. struct xfrm_pol_inexact_node *n,
  802. u16 family)
  803. {
  804. struct xfrm_pol_inexact_node *node;
  805. struct xfrm_policy *tmp;
  806. struct rb_node *rnode;
  807. /* To-be-merged node v has a subtree.
  808. *
  809. * Dismantle it and insert its nodes to n->root.
  810. */
  811. while ((rnode = rb_first(&v->root)) != NULL) {
  812. node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
  813. rb_erase(&node->node, &v->root);
  814. xfrm_policy_inexact_node_reinsert(net, node, &n->root,
  815. family);
  816. }
  817. hlist_for_each_entry(tmp, &v->hhead, bydst) {
  818. tmp->bydst_reinsert = true;
  819. hlist_del_rcu(&tmp->bydst);
  820. }
  821. xfrm_policy_inexact_list_reinsert(net, n, family);
  822. }
  823. static struct xfrm_pol_inexact_node *
  824. xfrm_policy_inexact_insert_node(struct net *net,
  825. struct rb_root *root,
  826. xfrm_address_t *addr,
  827. u16 family, u8 prefixlen, u8 dir)
  828. {
  829. struct xfrm_pol_inexact_node *cached = NULL;
  830. struct rb_node **p, *parent = NULL;
  831. struct xfrm_pol_inexact_node *node;
  832. p = &root->rb_node;
  833. while (*p) {
  834. int delta;
  835. parent = *p;
  836. node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
  837. delta = xfrm_policy_addr_delta(addr, &node->addr,
  838. node->prefixlen,
  839. family);
  840. if (delta == 0 && prefixlen >= node->prefixlen) {
  841. WARN_ON_ONCE(cached); /* ipsec policies got lost */
  842. return node;
  843. }
  844. if (delta < 0)
  845. p = &parent->rb_left;
  846. else
  847. p = &parent->rb_right;
  848. if (prefixlen < node->prefixlen) {
  849. delta = xfrm_policy_addr_delta(addr, &node->addr,
  850. prefixlen,
  851. family);
  852. if (delta)
  853. continue;
  854. /* This node is a subnet of the new prefix. It needs
  855. * to be removed and re-inserted with the smaller
  856. * prefix and all nodes that are now also covered
  857. * by the reduced prefixlen.
  858. */
  859. rb_erase(&node->node, root);
  860. if (!cached) {
  861. xfrm_pol_inexact_node_init(node, addr,
  862. prefixlen);
  863. cached = node;
  864. } else {
  865. /* This node also falls within the new
  866. * prefixlen. Merge the to-be-reinserted
  867. * node and this one.
  868. */
  869. xfrm_policy_inexact_node_merge(net, node,
  870. cached, family);
  871. kfree_rcu(node, rcu);
  872. }
  873. /* restart */
  874. p = &root->rb_node;
  875. parent = NULL;
  876. }
  877. }
  878. node = cached;
  879. if (!node) {
  880. node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
  881. if (!node)
  882. return NULL;
  883. }
  884. rb_link_node_rcu(&node->node, parent, p);
  885. rb_insert_color(&node->node, root);
  886. return node;
  887. }
  888. static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
  889. {
  890. struct xfrm_pol_inexact_node *node;
  891. struct rb_node *rn = rb_first(r);
  892. while (rn) {
  893. node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
  894. xfrm_policy_inexact_gc_tree(&node->root, rm);
  895. rn = rb_next(rn);
  896. if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
  897. WARN_ON_ONCE(rm);
  898. continue;
  899. }
  900. rb_erase(&node->node, r);
  901. kfree_rcu(node, rcu);
  902. }
  903. }
  904. static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
  905. {
  906. write_seqcount_begin(&b->count);
  907. xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
  908. xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
  909. write_seqcount_end(&b->count);
  910. if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
  911. !hlist_empty(&b->hhead)) {
  912. WARN_ON_ONCE(net_exit);
  913. return;
  914. }
  915. if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
  916. xfrm_pol_inexact_params) == 0) {
  917. list_del(&b->inexact_bins);
  918. kfree_rcu(b, rcu);
  919. }
  920. }
  921. static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
  922. {
  923. struct net *net = read_pnet(&b->k.net);
  924. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  925. __xfrm_policy_inexact_prune_bin(b, false);
  926. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  927. }
  928. static void __xfrm_policy_inexact_flush(struct net *net)
  929. {
  930. struct xfrm_pol_inexact_bin *bin, *t;
  931. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  932. list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
  933. __xfrm_policy_inexact_prune_bin(bin, false);
  934. }
  935. static struct hlist_head *
  936. xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
  937. struct xfrm_policy *policy, u8 dir)
  938. {
  939. struct xfrm_pol_inexact_node *n;
  940. struct net *net;
  941. net = xp_net(policy);
  942. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  943. if (xfrm_policy_inexact_insert_use_any_list(policy))
  944. return &bin->hhead;
  945. if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
  946. policy->family,
  947. policy->selector.prefixlen_d)) {
  948. write_seqcount_begin(&bin->count);
  949. n = xfrm_policy_inexact_insert_node(net,
  950. &bin->root_s,
  951. &policy->selector.saddr,
  952. policy->family,
  953. policy->selector.prefixlen_s,
  954. dir);
  955. write_seqcount_end(&bin->count);
  956. if (!n)
  957. return NULL;
  958. return &n->hhead;
  959. }
  960. /* daddr is fixed */
  961. write_seqcount_begin(&bin->count);
  962. n = xfrm_policy_inexact_insert_node(net,
  963. &bin->root_d,
  964. &policy->selector.daddr,
  965. policy->family,
  966. policy->selector.prefixlen_d, dir);
  967. write_seqcount_end(&bin->count);
  968. if (!n)
  969. return NULL;
  970. /* saddr is wildcard */
  971. if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
  972. policy->family,
  973. policy->selector.prefixlen_s))
  974. return &n->hhead;
  975. write_seqcount_begin(&bin->count);
  976. n = xfrm_policy_inexact_insert_node(net,
  977. &n->root,
  978. &policy->selector.saddr,
  979. policy->family,
  980. policy->selector.prefixlen_s, dir);
  981. write_seqcount_end(&bin->count);
  982. if (!n)
  983. return NULL;
  984. return &n->hhead;
  985. }
  986. static struct xfrm_policy *
  987. xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
  988. {
  989. struct xfrm_pol_inexact_bin *bin;
  990. struct xfrm_policy *delpol;
  991. struct hlist_head *chain;
  992. struct net *net;
  993. bin = xfrm_policy_inexact_alloc_bin(policy, dir);
  994. if (!bin)
  995. return ERR_PTR(-ENOMEM);
  996. net = xp_net(policy);
  997. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  998. chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
  999. if (!chain) {
  1000. __xfrm_policy_inexact_prune_bin(bin, false);
  1001. return ERR_PTR(-ENOMEM);
  1002. }
  1003. delpol = xfrm_policy_insert_list(chain, policy, excl);
  1004. if (delpol && excl) {
  1005. __xfrm_policy_inexact_prune_bin(bin, false);
  1006. return ERR_PTR(-EEXIST);
  1007. }
  1008. chain = &net->xfrm.policy_inexact[dir];
  1009. xfrm_policy_insert_inexact_list(chain, policy);
  1010. if (delpol)
  1011. __xfrm_policy_inexact_prune_bin(bin, false);
  1012. return delpol;
  1013. }
  1014. static void xfrm_hash_rebuild(struct work_struct *work)
  1015. {
  1016. struct net *net = container_of(work, struct net,
  1017. xfrm.policy_hthresh.work);
  1018. unsigned int hmask;
  1019. struct xfrm_policy *pol;
  1020. struct xfrm_policy *policy;
  1021. struct hlist_head *chain;
  1022. struct hlist_head *odst;
  1023. struct hlist_node *newpos;
  1024. int i;
  1025. int dir;
  1026. unsigned seq;
  1027. u8 lbits4, rbits4, lbits6, rbits6;
  1028. mutex_lock(&hash_resize_mutex);
  1029. /* read selector prefixlen thresholds */
  1030. do {
  1031. seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
  1032. lbits4 = net->xfrm.policy_hthresh.lbits4;
  1033. rbits4 = net->xfrm.policy_hthresh.rbits4;
  1034. lbits6 = net->xfrm.policy_hthresh.lbits6;
  1035. rbits6 = net->xfrm.policy_hthresh.rbits6;
  1036. } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
  1037. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1038. write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
  1039. /* make sure that we can insert the indirect policies again before
  1040. * we start with destructive action.
  1041. */
  1042. list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
  1043. struct xfrm_pol_inexact_bin *bin;
  1044. u8 dbits, sbits;
  1045. if (policy->walk.dead)
  1046. continue;
  1047. dir = xfrm_policy_id2dir(policy->index);
  1048. if (dir >= XFRM_POLICY_MAX)
  1049. continue;
  1050. if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
  1051. if (policy->family == AF_INET) {
  1052. dbits = rbits4;
  1053. sbits = lbits4;
  1054. } else {
  1055. dbits = rbits6;
  1056. sbits = lbits6;
  1057. }
  1058. } else {
  1059. if (policy->family == AF_INET) {
  1060. dbits = lbits4;
  1061. sbits = rbits4;
  1062. } else {
  1063. dbits = lbits6;
  1064. sbits = rbits6;
  1065. }
  1066. }
  1067. if (policy->selector.prefixlen_d < dbits ||
  1068. policy->selector.prefixlen_s < sbits)
  1069. continue;
  1070. bin = xfrm_policy_inexact_alloc_bin(policy, dir);
  1071. if (!bin)
  1072. goto out_unlock;
  1073. if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
  1074. goto out_unlock;
  1075. }
  1076. /* reset the bydst and inexact table in all directions */
  1077. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  1078. struct hlist_node *n;
  1079. hlist_for_each_entry_safe(policy, n,
  1080. &net->xfrm.policy_inexact[dir],
  1081. bydst_inexact_list) {
  1082. hlist_del_rcu(&policy->bydst);
  1083. hlist_del_init(&policy->bydst_inexact_list);
  1084. }
  1085. hmask = net->xfrm.policy_bydst[dir].hmask;
  1086. odst = net->xfrm.policy_bydst[dir].table;
  1087. for (i = hmask; i >= 0; i--) {
  1088. hlist_for_each_entry_safe(policy, n, odst + i, bydst)
  1089. hlist_del_rcu(&policy->bydst);
  1090. }
  1091. if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
  1092. /* dir out => dst = remote, src = local */
  1093. net->xfrm.policy_bydst[dir].dbits4 = rbits4;
  1094. net->xfrm.policy_bydst[dir].sbits4 = lbits4;
  1095. net->xfrm.policy_bydst[dir].dbits6 = rbits6;
  1096. net->xfrm.policy_bydst[dir].sbits6 = lbits6;
  1097. } else {
  1098. /* dir in/fwd => dst = local, src = remote */
  1099. net->xfrm.policy_bydst[dir].dbits4 = lbits4;
  1100. net->xfrm.policy_bydst[dir].sbits4 = rbits4;
  1101. net->xfrm.policy_bydst[dir].dbits6 = lbits6;
  1102. net->xfrm.policy_bydst[dir].sbits6 = rbits6;
  1103. }
  1104. }
  1105. /* re-insert all policies by order of creation */
  1106. list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
  1107. if (policy->walk.dead)
  1108. continue;
  1109. dir = xfrm_policy_id2dir(policy->index);
  1110. if (dir >= XFRM_POLICY_MAX) {
  1111. /* skip socket policies */
  1112. continue;
  1113. }
  1114. newpos = NULL;
  1115. chain = policy_hash_bysel(net, &policy->selector,
  1116. policy->family, dir);
  1117. if (!chain) {
  1118. void *p = xfrm_policy_inexact_insert(policy, dir, 0);
  1119. WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
  1120. continue;
  1121. }
  1122. hlist_for_each_entry(pol, chain, bydst) {
  1123. if (policy->priority >= pol->priority)
  1124. newpos = &pol->bydst;
  1125. else
  1126. break;
  1127. }
  1128. if (newpos)
  1129. hlist_add_behind_rcu(&policy->bydst, newpos);
  1130. else
  1131. hlist_add_head_rcu(&policy->bydst, chain);
  1132. }
  1133. out_unlock:
  1134. __xfrm_policy_inexact_flush(net);
  1135. write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
  1136. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1137. mutex_unlock(&hash_resize_mutex);
  1138. }
  1139. void xfrm_policy_hash_rebuild(struct net *net)
  1140. {
  1141. schedule_work(&net->xfrm.policy_hthresh.work);
  1142. }
  1143. EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
  1144. /* Generate new index... KAME seems to generate them ordered by cost
  1145. * of an absolute inpredictability of ordering of rules. This will not pass. */
  1146. static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
  1147. {
  1148. static u32 idx_generator;
  1149. for (;;) {
  1150. struct hlist_head *list;
  1151. struct xfrm_policy *p;
  1152. u32 idx;
  1153. int found;
  1154. if (!index) {
  1155. idx = (idx_generator | dir);
  1156. idx_generator += 8;
  1157. } else {
  1158. idx = index;
  1159. index = 0;
  1160. }
  1161. if (idx == 0)
  1162. idx = 8;
  1163. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  1164. found = 0;
  1165. hlist_for_each_entry(p, list, byidx) {
  1166. if (p->index == idx) {
  1167. found = 1;
  1168. break;
  1169. }
  1170. }
  1171. if (!found)
  1172. return idx;
  1173. }
  1174. }
  1175. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  1176. {
  1177. u32 *p1 = (u32 *) s1;
  1178. u32 *p2 = (u32 *) s2;
  1179. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  1180. int i;
  1181. for (i = 0; i < len; i++) {
  1182. if (p1[i] != p2[i])
  1183. return 1;
  1184. }
  1185. return 0;
  1186. }
  1187. static void xfrm_policy_requeue(struct xfrm_policy *old,
  1188. struct xfrm_policy *new)
  1189. {
  1190. struct xfrm_policy_queue *pq = &old->polq;
  1191. struct sk_buff_head list;
  1192. if (skb_queue_empty(&pq->hold_queue))
  1193. return;
  1194. __skb_queue_head_init(&list);
  1195. spin_lock_bh(&pq->hold_queue.lock);
  1196. skb_queue_splice_init(&pq->hold_queue, &list);
  1197. if (del_timer(&pq->hold_timer))
  1198. xfrm_pol_put(old);
  1199. spin_unlock_bh(&pq->hold_queue.lock);
  1200. pq = &new->polq;
  1201. spin_lock_bh(&pq->hold_queue.lock);
  1202. skb_queue_splice(&list, &pq->hold_queue);
  1203. pq->timeout = XFRM_QUEUE_TMO_MIN;
  1204. if (!mod_timer(&pq->hold_timer, jiffies))
  1205. xfrm_pol_hold(new);
  1206. spin_unlock_bh(&pq->hold_queue.lock);
  1207. }
  1208. static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
  1209. struct xfrm_policy *pol)
  1210. {
  1211. return mark->v == pol->mark.v && mark->m == pol->mark.m;
  1212. }
  1213. static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
  1214. {
  1215. const struct xfrm_pol_inexact_key *k = data;
  1216. u32 a = k->type << 24 | k->dir << 16 | k->family;
  1217. return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
  1218. seed);
  1219. }
  1220. static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
  1221. {
  1222. const struct xfrm_pol_inexact_bin *b = data;
  1223. return xfrm_pol_bin_key(&b->k, 0, seed);
  1224. }
  1225. static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
  1226. const void *ptr)
  1227. {
  1228. const struct xfrm_pol_inexact_key *key = arg->key;
  1229. const struct xfrm_pol_inexact_bin *b = ptr;
  1230. int ret;
  1231. if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
  1232. return -1;
  1233. ret = b->k.dir ^ key->dir;
  1234. if (ret)
  1235. return ret;
  1236. ret = b->k.type ^ key->type;
  1237. if (ret)
  1238. return ret;
  1239. ret = b->k.family ^ key->family;
  1240. if (ret)
  1241. return ret;
  1242. return b->k.if_id ^ key->if_id;
  1243. }
  1244. static const struct rhashtable_params xfrm_pol_inexact_params = {
  1245. .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
  1246. .hashfn = xfrm_pol_bin_key,
  1247. .obj_hashfn = xfrm_pol_bin_obj,
  1248. .obj_cmpfn = xfrm_pol_bin_cmp,
  1249. .automatic_shrinking = true,
  1250. };
  1251. static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
  1252. struct xfrm_policy *policy)
  1253. {
  1254. struct xfrm_policy *pol, *delpol = NULL;
  1255. struct hlist_node *newpos = NULL;
  1256. int i = 0;
  1257. hlist_for_each_entry(pol, chain, bydst_inexact_list) {
  1258. if (pol->type == policy->type &&
  1259. pol->if_id == policy->if_id &&
  1260. !selector_cmp(&pol->selector, &policy->selector) &&
  1261. xfrm_policy_mark_match(&policy->mark, pol) &&
  1262. xfrm_sec_ctx_match(pol->security, policy->security) &&
  1263. !WARN_ON(delpol)) {
  1264. delpol = pol;
  1265. if (policy->priority > pol->priority)
  1266. continue;
  1267. } else if (policy->priority >= pol->priority) {
  1268. newpos = &pol->bydst_inexact_list;
  1269. continue;
  1270. }
  1271. if (delpol)
  1272. break;
  1273. }
  1274. if (newpos)
  1275. hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
  1276. else
  1277. hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
  1278. hlist_for_each_entry(pol, chain, bydst_inexact_list) {
  1279. pol->pos = i;
  1280. i++;
  1281. }
  1282. }
  1283. static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
  1284. struct xfrm_policy *policy,
  1285. bool excl)
  1286. {
  1287. struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
  1288. hlist_for_each_entry(pol, chain, bydst) {
  1289. if (pol->type == policy->type &&
  1290. pol->if_id == policy->if_id &&
  1291. !selector_cmp(&pol->selector, &policy->selector) &&
  1292. xfrm_policy_mark_match(&policy->mark, pol) &&
  1293. xfrm_sec_ctx_match(pol->security, policy->security) &&
  1294. !WARN_ON(delpol)) {
  1295. if (excl)
  1296. return ERR_PTR(-EEXIST);
  1297. delpol = pol;
  1298. if (policy->priority > pol->priority)
  1299. continue;
  1300. } else if (policy->priority >= pol->priority) {
  1301. newpos = pol;
  1302. continue;
  1303. }
  1304. if (delpol)
  1305. break;
  1306. }
  1307. if (newpos)
  1308. hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
  1309. else
  1310. hlist_add_head_rcu(&policy->bydst, chain);
  1311. return delpol;
  1312. }
  1313. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  1314. {
  1315. struct net *net = xp_net(policy);
  1316. struct xfrm_policy *delpol;
  1317. struct hlist_head *chain;
  1318. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1319. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  1320. if (chain)
  1321. delpol = xfrm_policy_insert_list(chain, policy, excl);
  1322. else
  1323. delpol = xfrm_policy_inexact_insert(policy, dir, excl);
  1324. if (IS_ERR(delpol)) {
  1325. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1326. return PTR_ERR(delpol);
  1327. }
  1328. __xfrm_policy_link(policy, dir);
  1329. /* After previous checking, family can either be AF_INET or AF_INET6 */
  1330. if (policy->family == AF_INET)
  1331. rt_genid_bump_ipv4(net);
  1332. else
  1333. rt_genid_bump_ipv6(net);
  1334. if (delpol) {
  1335. xfrm_policy_requeue(delpol, policy);
  1336. __xfrm_policy_unlink(delpol, dir);
  1337. }
  1338. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
  1339. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  1340. policy->curlft.add_time = ktime_get_real_seconds();
  1341. policy->curlft.use_time = 0;
  1342. if (!mod_timer(&policy->timer, jiffies + HZ))
  1343. xfrm_pol_hold(policy);
  1344. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1345. if (delpol)
  1346. xfrm_policy_kill(delpol);
  1347. else if (xfrm_bydst_should_resize(net, dir, NULL))
  1348. schedule_work(&net->xfrm.policy_hash_work);
  1349. return 0;
  1350. }
  1351. EXPORT_SYMBOL(xfrm_policy_insert);
  1352. static struct xfrm_policy *
  1353. __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
  1354. u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
  1355. struct xfrm_sec_ctx *ctx)
  1356. {
  1357. struct xfrm_policy *pol;
  1358. if (!chain)
  1359. return NULL;
  1360. hlist_for_each_entry(pol, chain, bydst) {
  1361. if (pol->type == type &&
  1362. pol->if_id == if_id &&
  1363. xfrm_policy_mark_match(mark, pol) &&
  1364. !selector_cmp(sel, &pol->selector) &&
  1365. xfrm_sec_ctx_match(ctx, pol->security))
  1366. return pol;
  1367. }
  1368. return NULL;
  1369. }
  1370. struct xfrm_policy *
  1371. xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
  1372. u8 type, int dir, struct xfrm_selector *sel,
  1373. struct xfrm_sec_ctx *ctx, int delete, int *err)
  1374. {
  1375. struct xfrm_pol_inexact_bin *bin = NULL;
  1376. struct xfrm_policy *pol, *ret = NULL;
  1377. struct hlist_head *chain;
  1378. *err = 0;
  1379. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1380. chain = policy_hash_bysel(net, sel, sel->family, dir);
  1381. if (!chain) {
  1382. struct xfrm_pol_inexact_candidates cand;
  1383. int i;
  1384. bin = xfrm_policy_inexact_lookup(net, type,
  1385. sel->family, dir, if_id);
  1386. if (!bin) {
  1387. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1388. return NULL;
  1389. }
  1390. if (!xfrm_policy_find_inexact_candidates(&cand, bin,
  1391. &sel->saddr,
  1392. &sel->daddr)) {
  1393. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1394. return NULL;
  1395. }
  1396. pol = NULL;
  1397. for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
  1398. struct xfrm_policy *tmp;
  1399. tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
  1400. if_id, type, dir,
  1401. sel, ctx);
  1402. if (!tmp)
  1403. continue;
  1404. if (!pol || tmp->pos < pol->pos)
  1405. pol = tmp;
  1406. }
  1407. } else {
  1408. pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
  1409. sel, ctx);
  1410. }
  1411. if (pol) {
  1412. xfrm_pol_hold(pol);
  1413. if (delete) {
  1414. *err = security_xfrm_policy_delete(pol->security);
  1415. if (*err) {
  1416. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1417. return pol;
  1418. }
  1419. __xfrm_policy_unlink(pol, dir);
  1420. }
  1421. ret = pol;
  1422. }
  1423. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1424. if (ret && delete)
  1425. xfrm_policy_kill(ret);
  1426. if (bin && delete)
  1427. xfrm_policy_inexact_prune_bin(bin);
  1428. return ret;
  1429. }
  1430. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  1431. struct xfrm_policy *
  1432. xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
  1433. u8 type, int dir, u32 id, int delete, int *err)
  1434. {
  1435. struct xfrm_policy *pol, *ret;
  1436. struct hlist_head *chain;
  1437. *err = -ENOENT;
  1438. if (xfrm_policy_id2dir(id) != dir)
  1439. return NULL;
  1440. *err = 0;
  1441. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1442. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  1443. ret = NULL;
  1444. hlist_for_each_entry(pol, chain, byidx) {
  1445. if (pol->type == type && pol->index == id &&
  1446. pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
  1447. xfrm_pol_hold(pol);
  1448. if (delete) {
  1449. *err = security_xfrm_policy_delete(
  1450. pol->security);
  1451. if (*err) {
  1452. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1453. return pol;
  1454. }
  1455. __xfrm_policy_unlink(pol, dir);
  1456. }
  1457. ret = pol;
  1458. break;
  1459. }
  1460. }
  1461. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1462. if (ret && delete)
  1463. xfrm_policy_kill(ret);
  1464. return ret;
  1465. }
  1466. EXPORT_SYMBOL(xfrm_policy_byid);
  1467. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  1468. static inline int
  1469. xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
  1470. {
  1471. struct xfrm_policy *pol;
  1472. int err = 0;
  1473. list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
  1474. if (pol->walk.dead ||
  1475. xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
  1476. pol->type != type)
  1477. continue;
  1478. err = security_xfrm_policy_delete(pol->security);
  1479. if (err) {
  1480. xfrm_audit_policy_delete(pol, 0, task_valid);
  1481. return err;
  1482. }
  1483. }
  1484. return err;
  1485. }
  1486. #else
  1487. static inline int
  1488. xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
  1489. {
  1490. return 0;
  1491. }
  1492. #endif
  1493. int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
  1494. {
  1495. int dir, err = 0, cnt = 0;
  1496. struct xfrm_policy *pol;
  1497. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1498. err = xfrm_policy_flush_secctx_check(net, type, task_valid);
  1499. if (err)
  1500. goto out;
  1501. again:
  1502. list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
  1503. if (pol->walk.dead)
  1504. continue;
  1505. dir = xfrm_policy_id2dir(pol->index);
  1506. if (dir >= XFRM_POLICY_MAX ||
  1507. pol->type != type)
  1508. continue;
  1509. __xfrm_policy_unlink(pol, dir);
  1510. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1511. cnt++;
  1512. xfrm_audit_policy_delete(pol, 1, task_valid);
  1513. xfrm_policy_kill(pol);
  1514. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1515. goto again;
  1516. }
  1517. if (cnt)
  1518. __xfrm_policy_inexact_flush(net);
  1519. else
  1520. err = -ESRCH;
  1521. out:
  1522. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1523. return err;
  1524. }
  1525. EXPORT_SYMBOL(xfrm_policy_flush);
  1526. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  1527. int (*func)(struct xfrm_policy *, int, int, void*),
  1528. void *data)
  1529. {
  1530. struct xfrm_policy *pol;
  1531. struct xfrm_policy_walk_entry *x;
  1532. int error = 0;
  1533. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  1534. walk->type != XFRM_POLICY_TYPE_ANY)
  1535. return -EINVAL;
  1536. if (list_empty(&walk->walk.all) && walk->seq != 0)
  1537. return 0;
  1538. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1539. if (list_empty(&walk->walk.all))
  1540. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  1541. else
  1542. x = list_first_entry(&walk->walk.all,
  1543. struct xfrm_policy_walk_entry, all);
  1544. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  1545. if (x->dead)
  1546. continue;
  1547. pol = container_of(x, struct xfrm_policy, walk);
  1548. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  1549. walk->type != pol->type)
  1550. continue;
  1551. error = func(pol, xfrm_policy_id2dir(pol->index),
  1552. walk->seq, data);
  1553. if (error) {
  1554. list_move_tail(&walk->walk.all, &x->all);
  1555. goto out;
  1556. }
  1557. walk->seq++;
  1558. }
  1559. if (walk->seq == 0) {
  1560. error = -ENOENT;
  1561. goto out;
  1562. }
  1563. list_del_init(&walk->walk.all);
  1564. out:
  1565. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1566. return error;
  1567. }
  1568. EXPORT_SYMBOL(xfrm_policy_walk);
  1569. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  1570. {
  1571. INIT_LIST_HEAD(&walk->walk.all);
  1572. walk->walk.dead = 1;
  1573. walk->type = type;
  1574. walk->seq = 0;
  1575. }
  1576. EXPORT_SYMBOL(xfrm_policy_walk_init);
  1577. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
  1578. {
  1579. if (list_empty(&walk->walk.all))
  1580. return;
  1581. spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
  1582. list_del(&walk->walk.all);
  1583. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1584. }
  1585. EXPORT_SYMBOL(xfrm_policy_walk_done);
  1586. /*
  1587. * Find policy to apply to this flow.
  1588. *
  1589. * Returns 0 if policy found, else an -errno.
  1590. */
  1591. static int xfrm_policy_match(const struct xfrm_policy *pol,
  1592. const struct flowi *fl,
  1593. u8 type, u16 family, u32 if_id)
  1594. {
  1595. const struct xfrm_selector *sel = &pol->selector;
  1596. int ret = -ESRCH;
  1597. bool match;
  1598. if (pol->family != family ||
  1599. pol->if_id != if_id ||
  1600. (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
  1601. pol->type != type)
  1602. return ret;
  1603. match = xfrm_selector_match(sel, fl, family);
  1604. if (match)
  1605. ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
  1606. return ret;
  1607. }
  1608. static struct xfrm_pol_inexact_node *
  1609. xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
  1610. seqcount_spinlock_t *count,
  1611. const xfrm_address_t *addr, u16 family)
  1612. {
  1613. const struct rb_node *parent;
  1614. int seq;
  1615. again:
  1616. seq = read_seqcount_begin(count);
  1617. parent = rcu_dereference_raw(r->rb_node);
  1618. while (parent) {
  1619. struct xfrm_pol_inexact_node *node;
  1620. int delta;
  1621. node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
  1622. delta = xfrm_policy_addr_delta(addr, &node->addr,
  1623. node->prefixlen, family);
  1624. if (delta < 0) {
  1625. parent = rcu_dereference_raw(parent->rb_left);
  1626. continue;
  1627. } else if (delta > 0) {
  1628. parent = rcu_dereference_raw(parent->rb_right);
  1629. continue;
  1630. }
  1631. return node;
  1632. }
  1633. if (read_seqcount_retry(count, seq))
  1634. goto again;
  1635. return NULL;
  1636. }
  1637. static bool
  1638. xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
  1639. struct xfrm_pol_inexact_bin *b,
  1640. const xfrm_address_t *saddr,
  1641. const xfrm_address_t *daddr)
  1642. {
  1643. struct xfrm_pol_inexact_node *n;
  1644. u16 family;
  1645. if (!b)
  1646. return false;
  1647. family = b->k.family;
  1648. memset(cand, 0, sizeof(*cand));
  1649. cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
  1650. n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
  1651. family);
  1652. if (n) {
  1653. cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
  1654. n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
  1655. family);
  1656. if (n)
  1657. cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
  1658. }
  1659. n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
  1660. family);
  1661. if (n)
  1662. cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
  1663. return true;
  1664. }
  1665. static struct xfrm_pol_inexact_bin *
  1666. xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
  1667. u8 dir, u32 if_id)
  1668. {
  1669. struct xfrm_pol_inexact_key k = {
  1670. .family = family,
  1671. .type = type,
  1672. .dir = dir,
  1673. .if_id = if_id,
  1674. };
  1675. write_pnet(&k.net, net);
  1676. return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
  1677. xfrm_pol_inexact_params);
  1678. }
  1679. static struct xfrm_pol_inexact_bin *
  1680. xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
  1681. u8 dir, u32 if_id)
  1682. {
  1683. struct xfrm_pol_inexact_bin *bin;
  1684. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  1685. rcu_read_lock();
  1686. bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
  1687. rcu_read_unlock();
  1688. return bin;
  1689. }
  1690. static struct xfrm_policy *
  1691. __xfrm_policy_eval_candidates(struct hlist_head *chain,
  1692. struct xfrm_policy *prefer,
  1693. const struct flowi *fl,
  1694. u8 type, u16 family, u32 if_id)
  1695. {
  1696. u32 priority = prefer ? prefer->priority : ~0u;
  1697. struct xfrm_policy *pol;
  1698. if (!chain)
  1699. return NULL;
  1700. hlist_for_each_entry_rcu(pol, chain, bydst) {
  1701. int err;
  1702. if (pol->priority > priority)
  1703. break;
  1704. err = xfrm_policy_match(pol, fl, type, family, if_id);
  1705. if (err) {
  1706. if (err != -ESRCH)
  1707. return ERR_PTR(err);
  1708. continue;
  1709. }
  1710. if (prefer) {
  1711. /* matches. Is it older than *prefer? */
  1712. if (pol->priority == priority &&
  1713. prefer->pos < pol->pos)
  1714. return prefer;
  1715. }
  1716. return pol;
  1717. }
  1718. return NULL;
  1719. }
  1720. static struct xfrm_policy *
  1721. xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
  1722. struct xfrm_policy *prefer,
  1723. const struct flowi *fl,
  1724. u8 type, u16 family, u32 if_id)
  1725. {
  1726. struct xfrm_policy *tmp;
  1727. int i;
  1728. for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
  1729. tmp = __xfrm_policy_eval_candidates(cand->res[i],
  1730. prefer,
  1731. fl, type, family, if_id);
  1732. if (!tmp)
  1733. continue;
  1734. if (IS_ERR(tmp))
  1735. return tmp;
  1736. prefer = tmp;
  1737. }
  1738. return prefer;
  1739. }
  1740. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  1741. const struct flowi *fl,
  1742. u16 family, u8 dir,
  1743. u32 if_id)
  1744. {
  1745. struct xfrm_pol_inexact_candidates cand;
  1746. const xfrm_address_t *daddr, *saddr;
  1747. struct xfrm_pol_inexact_bin *bin;
  1748. struct xfrm_policy *pol, *ret;
  1749. struct hlist_head *chain;
  1750. unsigned int sequence;
  1751. int err;
  1752. daddr = xfrm_flowi_daddr(fl, family);
  1753. saddr = xfrm_flowi_saddr(fl, family);
  1754. if (unlikely(!daddr || !saddr))
  1755. return NULL;
  1756. rcu_read_lock();
  1757. retry:
  1758. do {
  1759. sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
  1760. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  1761. } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
  1762. ret = NULL;
  1763. hlist_for_each_entry_rcu(pol, chain, bydst) {
  1764. err = xfrm_policy_match(pol, fl, type, family, if_id);
  1765. if (err) {
  1766. if (err == -ESRCH)
  1767. continue;
  1768. else {
  1769. ret = ERR_PTR(err);
  1770. goto fail;
  1771. }
  1772. } else {
  1773. ret = pol;
  1774. break;
  1775. }
  1776. }
  1777. bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
  1778. if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
  1779. daddr))
  1780. goto skip_inexact;
  1781. pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
  1782. family, if_id);
  1783. if (pol) {
  1784. ret = pol;
  1785. if (IS_ERR(pol))
  1786. goto fail;
  1787. }
  1788. skip_inexact:
  1789. if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
  1790. goto retry;
  1791. if (ret && !xfrm_pol_hold_rcu(ret))
  1792. goto retry;
  1793. fail:
  1794. rcu_read_unlock();
  1795. return ret;
  1796. }
  1797. static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
  1798. const struct flowi *fl,
  1799. u16 family, u8 dir, u32 if_id)
  1800. {
  1801. #ifdef CONFIG_XFRM_SUB_POLICY
  1802. struct xfrm_policy *pol;
  1803. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
  1804. dir, if_id);
  1805. if (pol != NULL)
  1806. return pol;
  1807. #endif
  1808. return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
  1809. dir, if_id);
  1810. }
  1811. static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
  1812. const struct flowi *fl,
  1813. u16 family, u32 if_id)
  1814. {
  1815. struct xfrm_policy *pol;
  1816. rcu_read_lock();
  1817. again:
  1818. pol = rcu_dereference(sk->sk_policy[dir]);
  1819. if (pol != NULL) {
  1820. bool match;
  1821. int err = 0;
  1822. if (pol->family != family) {
  1823. pol = NULL;
  1824. goto out;
  1825. }
  1826. match = xfrm_selector_match(&pol->selector, fl, family);
  1827. if (match) {
  1828. if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
  1829. pol->if_id != if_id) {
  1830. pol = NULL;
  1831. goto out;
  1832. }
  1833. err = security_xfrm_policy_lookup(pol->security,
  1834. fl->flowi_secid);
  1835. if (!err) {
  1836. if (!xfrm_pol_hold_rcu(pol))
  1837. goto again;
  1838. } else if (err == -ESRCH) {
  1839. pol = NULL;
  1840. } else {
  1841. pol = ERR_PTR(err);
  1842. }
  1843. } else
  1844. pol = NULL;
  1845. }
  1846. out:
  1847. rcu_read_unlock();
  1848. return pol;
  1849. }
  1850. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  1851. {
  1852. struct net *net = xp_net(pol);
  1853. list_add(&pol->walk.all, &net->xfrm.policy_all);
  1854. net->xfrm.policy_count[dir]++;
  1855. xfrm_pol_hold(pol);
  1856. }
  1857. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  1858. int dir)
  1859. {
  1860. struct net *net = xp_net(pol);
  1861. if (list_empty(&pol->walk.all))
  1862. return NULL;
  1863. /* Socket policies are not hashed. */
  1864. if (!hlist_unhashed(&pol->bydst)) {
  1865. hlist_del_rcu(&pol->bydst);
  1866. hlist_del_init(&pol->bydst_inexact_list);
  1867. hlist_del(&pol->byidx);
  1868. }
  1869. list_del_init(&pol->walk.all);
  1870. net->xfrm.policy_count[dir]--;
  1871. return pol;
  1872. }
  1873. static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
  1874. {
  1875. __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
  1876. }
  1877. static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
  1878. {
  1879. __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
  1880. }
  1881. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  1882. {
  1883. struct net *net = xp_net(pol);
  1884. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1885. pol = __xfrm_policy_unlink(pol, dir);
  1886. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1887. if (pol) {
  1888. xfrm_policy_kill(pol);
  1889. return 0;
  1890. }
  1891. return -ENOENT;
  1892. }
  1893. EXPORT_SYMBOL(xfrm_policy_delete);
  1894. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  1895. {
  1896. struct net *net = sock_net(sk);
  1897. struct xfrm_policy *old_pol;
  1898. #ifdef CONFIG_XFRM_SUB_POLICY
  1899. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  1900. return -EINVAL;
  1901. #endif
  1902. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1903. old_pol = rcu_dereference_protected(sk->sk_policy[dir],
  1904. lockdep_is_held(&net->xfrm.xfrm_policy_lock));
  1905. if (pol) {
  1906. pol->curlft.add_time = ktime_get_real_seconds();
  1907. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
  1908. xfrm_sk_policy_link(pol, dir);
  1909. }
  1910. rcu_assign_pointer(sk->sk_policy[dir], pol);
  1911. if (old_pol) {
  1912. if (pol)
  1913. xfrm_policy_requeue(old_pol, pol);
  1914. /* Unlinking succeeds always. This is the only function
  1915. * allowed to delete or replace socket policy.
  1916. */
  1917. xfrm_sk_policy_unlink(old_pol, dir);
  1918. }
  1919. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1920. if (old_pol) {
  1921. xfrm_policy_kill(old_pol);
  1922. }
  1923. return 0;
  1924. }
  1925. static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
  1926. {
  1927. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  1928. struct net *net = xp_net(old);
  1929. if (newp) {
  1930. newp->selector = old->selector;
  1931. if (security_xfrm_policy_clone(old->security,
  1932. &newp->security)) {
  1933. kfree(newp);
  1934. return NULL; /* ENOMEM */
  1935. }
  1936. newp->lft = old->lft;
  1937. newp->curlft = old->curlft;
  1938. newp->mark = old->mark;
  1939. newp->if_id = old->if_id;
  1940. newp->action = old->action;
  1941. newp->flags = old->flags;
  1942. newp->xfrm_nr = old->xfrm_nr;
  1943. newp->index = old->index;
  1944. newp->type = old->type;
  1945. newp->family = old->family;
  1946. memcpy(newp->xfrm_vec, old->xfrm_vec,
  1947. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  1948. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1949. xfrm_sk_policy_link(newp, dir);
  1950. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1951. xfrm_pol_put(newp);
  1952. }
  1953. return newp;
  1954. }
  1955. int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
  1956. {
  1957. const struct xfrm_policy *p;
  1958. struct xfrm_policy *np;
  1959. int i, ret = 0;
  1960. rcu_read_lock();
  1961. for (i = 0; i < 2; i++) {
  1962. p = rcu_dereference(osk->sk_policy[i]);
  1963. if (p) {
  1964. np = clone_policy(p, i);
  1965. if (unlikely(!np)) {
  1966. ret = -ENOMEM;
  1967. break;
  1968. }
  1969. rcu_assign_pointer(sk->sk_policy[i], np);
  1970. }
  1971. }
  1972. rcu_read_unlock();
  1973. return ret;
  1974. }
  1975. static int
  1976. xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
  1977. xfrm_address_t *remote, unsigned short family, u32 mark)
  1978. {
  1979. int err;
  1980. const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1981. if (unlikely(afinfo == NULL))
  1982. return -EINVAL;
  1983. err = afinfo->get_saddr(net, oif, local, remote, mark);
  1984. rcu_read_unlock();
  1985. return err;
  1986. }
  1987. /* Resolve list of templates for the flow, given policy. */
  1988. static int
  1989. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
  1990. struct xfrm_state **xfrm, unsigned short family)
  1991. {
  1992. struct net *net = xp_net(policy);
  1993. int nx;
  1994. int i, error;
  1995. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  1996. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  1997. xfrm_address_t tmp;
  1998. for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
  1999. struct xfrm_state *x;
  2000. xfrm_address_t *remote = daddr;
  2001. xfrm_address_t *local = saddr;
  2002. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  2003. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  2004. tmpl->mode == XFRM_MODE_BEET) {
  2005. remote = &tmpl->id.daddr;
  2006. local = &tmpl->saddr;
  2007. if (xfrm_addr_any(local, tmpl->encap_family)) {
  2008. error = xfrm_get_saddr(net, fl->flowi_oif,
  2009. &tmp, remote,
  2010. tmpl->encap_family, 0);
  2011. if (error)
  2012. goto fail;
  2013. local = &tmp;
  2014. }
  2015. }
  2016. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
  2017. family, policy->if_id);
  2018. if (x && x->km.state == XFRM_STATE_VALID) {
  2019. xfrm[nx++] = x;
  2020. daddr = remote;
  2021. saddr = local;
  2022. continue;
  2023. }
  2024. if (x) {
  2025. error = (x->km.state == XFRM_STATE_ERROR ?
  2026. -EINVAL : -EAGAIN);
  2027. xfrm_state_put(x);
  2028. } else if (error == -ESRCH) {
  2029. error = -EAGAIN;
  2030. }
  2031. if (!tmpl->optional)
  2032. goto fail;
  2033. }
  2034. return nx;
  2035. fail:
  2036. for (nx--; nx >= 0; nx--)
  2037. xfrm_state_put(xfrm[nx]);
  2038. return error;
  2039. }
  2040. static int
  2041. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
  2042. struct xfrm_state **xfrm, unsigned short family)
  2043. {
  2044. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  2045. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  2046. int cnx = 0;
  2047. int error;
  2048. int ret;
  2049. int i;
  2050. for (i = 0; i < npols; i++) {
  2051. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  2052. error = -ENOBUFS;
  2053. goto fail;
  2054. }
  2055. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  2056. if (ret < 0) {
  2057. error = ret;
  2058. goto fail;
  2059. } else
  2060. cnx += ret;
  2061. }
  2062. /* found states are sorted for outbound processing */
  2063. if (npols > 1)
  2064. xfrm_state_sort(xfrm, tpp, cnx, family);
  2065. return cnx;
  2066. fail:
  2067. for (cnx--; cnx >= 0; cnx--)
  2068. xfrm_state_put(tpp[cnx]);
  2069. return error;
  2070. }
  2071. static int xfrm_get_tos(const struct flowi *fl, int family)
  2072. {
  2073. if (family == AF_INET)
  2074. return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
  2075. return 0;
  2076. }
  2077. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  2078. {
  2079. const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  2080. struct dst_ops *dst_ops;
  2081. struct xfrm_dst *xdst;
  2082. if (!afinfo)
  2083. return ERR_PTR(-EINVAL);
  2084. switch (family) {
  2085. case AF_INET:
  2086. dst_ops = &net->xfrm.xfrm4_dst_ops;
  2087. break;
  2088. #if IS_ENABLED(CONFIG_IPV6)
  2089. case AF_INET6:
  2090. dst_ops = &net->xfrm.xfrm6_dst_ops;
  2091. break;
  2092. #endif
  2093. default:
  2094. BUG();
  2095. }
  2096. xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
  2097. if (likely(xdst)) {
  2098. memset_after(xdst, 0, u.dst);
  2099. } else
  2100. xdst = ERR_PTR(-ENOBUFS);
  2101. rcu_read_unlock();
  2102. return xdst;
  2103. }
  2104. static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  2105. int nfheader_len)
  2106. {
  2107. if (dst->ops->family == AF_INET6) {
  2108. struct rt6_info *rt = (struct rt6_info *)dst;
  2109. path->path_cookie = rt6_get_cookie(rt);
  2110. path->u.rt6.rt6i_nfheader_len = nfheader_len;
  2111. }
  2112. }
  2113. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  2114. const struct flowi *fl)
  2115. {
  2116. const struct xfrm_policy_afinfo *afinfo =
  2117. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  2118. int err;
  2119. if (!afinfo)
  2120. return -EINVAL;
  2121. err = afinfo->fill_dst(xdst, dev, fl);
  2122. rcu_read_unlock();
  2123. return err;
  2124. }
  2125. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  2126. * all the metrics... Shortly, bundle a bundle.
  2127. */
  2128. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  2129. struct xfrm_state **xfrm,
  2130. struct xfrm_dst **bundle,
  2131. int nx,
  2132. const struct flowi *fl,
  2133. struct dst_entry *dst)
  2134. {
  2135. const struct xfrm_state_afinfo *afinfo;
  2136. const struct xfrm_mode *inner_mode;
  2137. struct net *net = xp_net(policy);
  2138. unsigned long now = jiffies;
  2139. struct net_device *dev;
  2140. struct xfrm_dst *xdst_prev = NULL;
  2141. struct xfrm_dst *xdst0 = NULL;
  2142. int i = 0;
  2143. int err;
  2144. int header_len = 0;
  2145. int nfheader_len = 0;
  2146. int trailer_len = 0;
  2147. int tos;
  2148. int family = policy->selector.family;
  2149. xfrm_address_t saddr, daddr;
  2150. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  2151. tos = xfrm_get_tos(fl, family);
  2152. dst_hold(dst);
  2153. for (; i < nx; i++) {
  2154. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  2155. struct dst_entry *dst1 = &xdst->u.dst;
  2156. err = PTR_ERR(xdst);
  2157. if (IS_ERR(xdst)) {
  2158. dst_release(dst);
  2159. goto put_states;
  2160. }
  2161. bundle[i] = xdst;
  2162. if (!xdst_prev)
  2163. xdst0 = xdst;
  2164. else
  2165. /* Ref count is taken during xfrm_alloc_dst()
  2166. * No need to do dst_clone() on dst1
  2167. */
  2168. xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
  2169. if (xfrm[i]->sel.family == AF_UNSPEC) {
  2170. inner_mode = xfrm_ip2inner_mode(xfrm[i],
  2171. xfrm_af2proto(family));
  2172. if (!inner_mode) {
  2173. err = -EAFNOSUPPORT;
  2174. dst_release(dst);
  2175. goto put_states;
  2176. }
  2177. } else
  2178. inner_mode = &xfrm[i]->inner_mode;
  2179. xdst->route = dst;
  2180. dst_copy_metrics(dst1, dst);
  2181. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  2182. __u32 mark = 0;
  2183. int oif;
  2184. if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
  2185. mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
  2186. family = xfrm[i]->props.family;
  2187. oif = fl->flowi_oif ? : fl->flowi_l3mdev;
  2188. dst = xfrm_dst_lookup(xfrm[i], tos, oif,
  2189. &saddr, &daddr, family, mark);
  2190. err = PTR_ERR(dst);
  2191. if (IS_ERR(dst))
  2192. goto put_states;
  2193. } else
  2194. dst_hold(dst);
  2195. dst1->xfrm = xfrm[i];
  2196. xdst->xfrm_genid = xfrm[i]->genid;
  2197. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  2198. dst1->lastuse = now;
  2199. dst1->input = dst_discard;
  2200. rcu_read_lock();
  2201. afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
  2202. if (likely(afinfo))
  2203. dst1->output = afinfo->output;
  2204. else
  2205. dst1->output = dst_discard_out;
  2206. rcu_read_unlock();
  2207. xdst_prev = xdst;
  2208. header_len += xfrm[i]->props.header_len;
  2209. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  2210. nfheader_len += xfrm[i]->props.header_len;
  2211. trailer_len += xfrm[i]->props.trailer_len;
  2212. }
  2213. xfrm_dst_set_child(xdst_prev, dst);
  2214. xdst0->path = dst;
  2215. err = -ENODEV;
  2216. dev = dst->dev;
  2217. if (!dev)
  2218. goto free_dst;
  2219. xfrm_init_path(xdst0, dst, nfheader_len);
  2220. xfrm_init_pmtu(bundle, nx);
  2221. for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
  2222. xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
  2223. err = xfrm_fill_dst(xdst_prev, dev, fl);
  2224. if (err)
  2225. goto free_dst;
  2226. xdst_prev->u.dst.header_len = header_len;
  2227. xdst_prev->u.dst.trailer_len = trailer_len;
  2228. header_len -= xdst_prev->u.dst.xfrm->props.header_len;
  2229. trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
  2230. }
  2231. return &xdst0->u.dst;
  2232. put_states:
  2233. for (; i < nx; i++)
  2234. xfrm_state_put(xfrm[i]);
  2235. free_dst:
  2236. if (xdst0)
  2237. dst_release_immediate(&xdst0->u.dst);
  2238. return ERR_PTR(err);
  2239. }
  2240. static int xfrm_expand_policies(const struct flowi *fl, u16 family,
  2241. struct xfrm_policy **pols,
  2242. int *num_pols, int *num_xfrms)
  2243. {
  2244. int i;
  2245. if (*num_pols == 0 || !pols[0]) {
  2246. *num_pols = 0;
  2247. *num_xfrms = 0;
  2248. return 0;
  2249. }
  2250. if (IS_ERR(pols[0])) {
  2251. *num_pols = 0;
  2252. return PTR_ERR(pols[0]);
  2253. }
  2254. *num_xfrms = pols[0]->xfrm_nr;
  2255. #ifdef CONFIG_XFRM_SUB_POLICY
  2256. if (pols[0]->action == XFRM_POLICY_ALLOW &&
  2257. pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  2258. pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
  2259. XFRM_POLICY_TYPE_MAIN,
  2260. fl, family,
  2261. XFRM_POLICY_OUT,
  2262. pols[0]->if_id);
  2263. if (pols[1]) {
  2264. if (IS_ERR(pols[1])) {
  2265. xfrm_pols_put(pols, *num_pols);
  2266. *num_pols = 0;
  2267. return PTR_ERR(pols[1]);
  2268. }
  2269. (*num_pols)++;
  2270. (*num_xfrms) += pols[1]->xfrm_nr;
  2271. }
  2272. }
  2273. #endif
  2274. for (i = 0; i < *num_pols; i++) {
  2275. if (pols[i]->action != XFRM_POLICY_ALLOW) {
  2276. *num_xfrms = -1;
  2277. break;
  2278. }
  2279. }
  2280. return 0;
  2281. }
  2282. static struct xfrm_dst *
  2283. xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
  2284. const struct flowi *fl, u16 family,
  2285. struct dst_entry *dst_orig)
  2286. {
  2287. struct net *net = xp_net(pols[0]);
  2288. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  2289. struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
  2290. struct xfrm_dst *xdst;
  2291. struct dst_entry *dst;
  2292. int err;
  2293. /* Try to instantiate a bundle */
  2294. err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
  2295. if (err <= 0) {
  2296. if (err == 0)
  2297. return NULL;
  2298. if (err != -EAGAIN)
  2299. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  2300. return ERR_PTR(err);
  2301. }
  2302. dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
  2303. if (IS_ERR(dst)) {
  2304. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  2305. return ERR_CAST(dst);
  2306. }
  2307. xdst = (struct xfrm_dst *)dst;
  2308. xdst->num_xfrms = err;
  2309. xdst->num_pols = num_pols;
  2310. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
  2311. xdst->policy_genid = atomic_read(&pols[0]->genid);
  2312. return xdst;
  2313. }
  2314. static void xfrm_policy_queue_process(struct timer_list *t)
  2315. {
  2316. struct sk_buff *skb;
  2317. struct sock *sk;
  2318. struct dst_entry *dst;
  2319. struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
  2320. struct net *net = xp_net(pol);
  2321. struct xfrm_policy_queue *pq = &pol->polq;
  2322. struct flowi fl;
  2323. struct sk_buff_head list;
  2324. __u32 skb_mark;
  2325. spin_lock(&pq->hold_queue.lock);
  2326. skb = skb_peek(&pq->hold_queue);
  2327. if (!skb) {
  2328. spin_unlock(&pq->hold_queue.lock);
  2329. goto out;
  2330. }
  2331. dst = skb_dst(skb);
  2332. sk = skb->sk;
  2333. /* Fixup the mark to support VTI. */
  2334. skb_mark = skb->mark;
  2335. skb->mark = pol->mark.v;
  2336. xfrm_decode_session(skb, &fl, dst->ops->family);
  2337. skb->mark = skb_mark;
  2338. spin_unlock(&pq->hold_queue.lock);
  2339. dst_hold(xfrm_dst_path(dst));
  2340. dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
  2341. if (IS_ERR(dst))
  2342. goto purge_queue;
  2343. if (dst->flags & DST_XFRM_QUEUE) {
  2344. dst_release(dst);
  2345. if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
  2346. goto purge_queue;
  2347. pq->timeout = pq->timeout << 1;
  2348. if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
  2349. xfrm_pol_hold(pol);
  2350. goto out;
  2351. }
  2352. dst_release(dst);
  2353. __skb_queue_head_init(&list);
  2354. spin_lock(&pq->hold_queue.lock);
  2355. pq->timeout = 0;
  2356. skb_queue_splice_init(&pq->hold_queue, &list);
  2357. spin_unlock(&pq->hold_queue.lock);
  2358. while (!skb_queue_empty(&list)) {
  2359. skb = __skb_dequeue(&list);
  2360. /* Fixup the mark to support VTI. */
  2361. skb_mark = skb->mark;
  2362. skb->mark = pol->mark.v;
  2363. xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
  2364. skb->mark = skb_mark;
  2365. dst_hold(xfrm_dst_path(skb_dst(skb)));
  2366. dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
  2367. if (IS_ERR(dst)) {
  2368. kfree_skb(skb);
  2369. continue;
  2370. }
  2371. nf_reset_ct(skb);
  2372. skb_dst_drop(skb);
  2373. skb_dst_set(skb, dst);
  2374. dst_output(net, skb->sk, skb);
  2375. }
  2376. out:
  2377. xfrm_pol_put(pol);
  2378. return;
  2379. purge_queue:
  2380. pq->timeout = 0;
  2381. skb_queue_purge(&pq->hold_queue);
  2382. xfrm_pol_put(pol);
  2383. }
  2384. static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  2385. {
  2386. unsigned long sched_next;
  2387. struct dst_entry *dst = skb_dst(skb);
  2388. struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
  2389. struct xfrm_policy *pol = xdst->pols[0];
  2390. struct xfrm_policy_queue *pq = &pol->polq;
  2391. if (unlikely(skb_fclone_busy(sk, skb))) {
  2392. kfree_skb(skb);
  2393. return 0;
  2394. }
  2395. if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
  2396. kfree_skb(skb);
  2397. return -EAGAIN;
  2398. }
  2399. skb_dst_force(skb);
  2400. spin_lock_bh(&pq->hold_queue.lock);
  2401. if (!pq->timeout)
  2402. pq->timeout = XFRM_QUEUE_TMO_MIN;
  2403. sched_next = jiffies + pq->timeout;
  2404. if (del_timer(&pq->hold_timer)) {
  2405. if (time_before(pq->hold_timer.expires, sched_next))
  2406. sched_next = pq->hold_timer.expires;
  2407. xfrm_pol_put(pol);
  2408. }
  2409. __skb_queue_tail(&pq->hold_queue, skb);
  2410. if (!mod_timer(&pq->hold_timer, sched_next))
  2411. xfrm_pol_hold(pol);
  2412. spin_unlock_bh(&pq->hold_queue.lock);
  2413. return 0;
  2414. }
  2415. static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
  2416. struct xfrm_flo *xflo,
  2417. const struct flowi *fl,
  2418. int num_xfrms,
  2419. u16 family)
  2420. {
  2421. int err;
  2422. struct net_device *dev;
  2423. struct dst_entry *dst;
  2424. struct dst_entry *dst1;
  2425. struct xfrm_dst *xdst;
  2426. xdst = xfrm_alloc_dst(net, family);
  2427. if (IS_ERR(xdst))
  2428. return xdst;
  2429. if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
  2430. net->xfrm.sysctl_larval_drop ||
  2431. num_xfrms <= 0)
  2432. return xdst;
  2433. dst = xflo->dst_orig;
  2434. dst1 = &xdst->u.dst;
  2435. dst_hold(dst);
  2436. xdst->route = dst;
  2437. dst_copy_metrics(dst1, dst);
  2438. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  2439. dst1->flags |= DST_XFRM_QUEUE;
  2440. dst1->lastuse = jiffies;
  2441. dst1->input = dst_discard;
  2442. dst1->output = xdst_queue_output;
  2443. dst_hold(dst);
  2444. xfrm_dst_set_child(xdst, dst);
  2445. xdst->path = dst;
  2446. xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
  2447. err = -ENODEV;
  2448. dev = dst->dev;
  2449. if (!dev)
  2450. goto free_dst;
  2451. err = xfrm_fill_dst(xdst, dev, fl);
  2452. if (err)
  2453. goto free_dst;
  2454. out:
  2455. return xdst;
  2456. free_dst:
  2457. dst_release(dst1);
  2458. xdst = ERR_PTR(err);
  2459. goto out;
  2460. }
  2461. static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
  2462. const struct flowi *fl,
  2463. u16 family, u8 dir,
  2464. struct xfrm_flo *xflo, u32 if_id)
  2465. {
  2466. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  2467. int num_pols = 0, num_xfrms = 0, err;
  2468. struct xfrm_dst *xdst;
  2469. /* Resolve policies to use if we couldn't get them from
  2470. * previous cache entry */
  2471. num_pols = 1;
  2472. pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
  2473. err = xfrm_expand_policies(fl, family, pols,
  2474. &num_pols, &num_xfrms);
  2475. if (err < 0)
  2476. goto inc_error;
  2477. if (num_pols == 0)
  2478. return NULL;
  2479. if (num_xfrms <= 0)
  2480. goto make_dummy_bundle;
  2481. xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
  2482. xflo->dst_orig);
  2483. if (IS_ERR(xdst)) {
  2484. err = PTR_ERR(xdst);
  2485. if (err == -EREMOTE) {
  2486. xfrm_pols_put(pols, num_pols);
  2487. return NULL;
  2488. }
  2489. if (err != -EAGAIN)
  2490. goto error;
  2491. goto make_dummy_bundle;
  2492. } else if (xdst == NULL) {
  2493. num_xfrms = 0;
  2494. goto make_dummy_bundle;
  2495. }
  2496. return xdst;
  2497. make_dummy_bundle:
  2498. /* We found policies, but there's no bundles to instantiate:
  2499. * either because the policy blocks, has no transformations or
  2500. * we could not build template (no xfrm_states).*/
  2501. xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
  2502. if (IS_ERR(xdst)) {
  2503. xfrm_pols_put(pols, num_pols);
  2504. return ERR_CAST(xdst);
  2505. }
  2506. xdst->num_pols = num_pols;
  2507. xdst->num_xfrms = num_xfrms;
  2508. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
  2509. return xdst;
  2510. inc_error:
  2511. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  2512. error:
  2513. xfrm_pols_put(pols, num_pols);
  2514. return ERR_PTR(err);
  2515. }
  2516. static struct dst_entry *make_blackhole(struct net *net, u16 family,
  2517. struct dst_entry *dst_orig)
  2518. {
  2519. const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  2520. struct dst_entry *ret;
  2521. if (!afinfo) {
  2522. dst_release(dst_orig);
  2523. return ERR_PTR(-EINVAL);
  2524. } else {
  2525. ret = afinfo->blackhole_route(net, dst_orig);
  2526. }
  2527. rcu_read_unlock();
  2528. return ret;
  2529. }
  2530. /* Finds/creates a bundle for given flow and if_id
  2531. *
  2532. * At the moment we eat a raw IP route. Mostly to speed up lookups
  2533. * on interfaces with disabled IPsec.
  2534. *
  2535. * xfrm_lookup uses an if_id of 0 by default, and is provided for
  2536. * compatibility
  2537. */
  2538. struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
  2539. struct dst_entry *dst_orig,
  2540. const struct flowi *fl,
  2541. const struct sock *sk,
  2542. int flags, u32 if_id)
  2543. {
  2544. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  2545. struct xfrm_dst *xdst;
  2546. struct dst_entry *dst, *route;
  2547. u16 family = dst_orig->ops->family;
  2548. u8 dir = XFRM_POLICY_OUT;
  2549. int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
  2550. dst = NULL;
  2551. xdst = NULL;
  2552. route = NULL;
  2553. sk = sk_const_to_full_sk(sk);
  2554. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  2555. num_pols = 1;
  2556. pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
  2557. if_id);
  2558. err = xfrm_expand_policies(fl, family, pols,
  2559. &num_pols, &num_xfrms);
  2560. if (err < 0)
  2561. goto dropdst;
  2562. if (num_pols) {
  2563. if (num_xfrms <= 0) {
  2564. drop_pols = num_pols;
  2565. goto no_transform;
  2566. }
  2567. xdst = xfrm_resolve_and_create_bundle(
  2568. pols, num_pols, fl,
  2569. family, dst_orig);
  2570. if (IS_ERR(xdst)) {
  2571. xfrm_pols_put(pols, num_pols);
  2572. err = PTR_ERR(xdst);
  2573. if (err == -EREMOTE)
  2574. goto nopol;
  2575. goto dropdst;
  2576. } else if (xdst == NULL) {
  2577. num_xfrms = 0;
  2578. drop_pols = num_pols;
  2579. goto no_transform;
  2580. }
  2581. route = xdst->route;
  2582. }
  2583. }
  2584. if (xdst == NULL) {
  2585. struct xfrm_flo xflo;
  2586. xflo.dst_orig = dst_orig;
  2587. xflo.flags = flags;
  2588. /* To accelerate a bit... */
  2589. if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
  2590. !net->xfrm.policy_count[XFRM_POLICY_OUT]))
  2591. goto nopol;
  2592. xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
  2593. if (xdst == NULL)
  2594. goto nopol;
  2595. if (IS_ERR(xdst)) {
  2596. err = PTR_ERR(xdst);
  2597. goto dropdst;
  2598. }
  2599. num_pols = xdst->num_pols;
  2600. num_xfrms = xdst->num_xfrms;
  2601. memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
  2602. route = xdst->route;
  2603. }
  2604. dst = &xdst->u.dst;
  2605. if (route == NULL && num_xfrms > 0) {
  2606. /* The only case when xfrm_bundle_lookup() returns a
  2607. * bundle with null route, is when the template could
  2608. * not be resolved. It means policies are there, but
  2609. * bundle could not be created, since we don't yet
  2610. * have the xfrm_state's. We need to wait for KM to
  2611. * negotiate new SA's or bail out with error.*/
  2612. if (net->xfrm.sysctl_larval_drop) {
  2613. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  2614. err = -EREMOTE;
  2615. goto error;
  2616. }
  2617. err = -EAGAIN;
  2618. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  2619. goto error;
  2620. }
  2621. no_transform:
  2622. if (num_pols == 0)
  2623. goto nopol;
  2624. if ((flags & XFRM_LOOKUP_ICMP) &&
  2625. !(pols[0]->flags & XFRM_POLICY_ICMP)) {
  2626. err = -ENOENT;
  2627. goto error;
  2628. }
  2629. for (i = 0; i < num_pols; i++)
  2630. WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
  2631. if (num_xfrms < 0) {
  2632. /* Prohibit the flow */
  2633. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  2634. err = -EPERM;
  2635. goto error;
  2636. } else if (num_xfrms > 0) {
  2637. /* Flow transformed */
  2638. dst_release(dst_orig);
  2639. } else {
  2640. /* Flow passes untransformed */
  2641. dst_release(dst);
  2642. dst = dst_orig;
  2643. }
  2644. ok:
  2645. xfrm_pols_put(pols, drop_pols);
  2646. if (dst && dst->xfrm &&
  2647. dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
  2648. dst->flags |= DST_XFRM_TUNNEL;
  2649. return dst;
  2650. nopol:
  2651. if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
  2652. net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
  2653. err = -EPERM;
  2654. goto error;
  2655. }
  2656. if (!(flags & XFRM_LOOKUP_ICMP)) {
  2657. dst = dst_orig;
  2658. goto ok;
  2659. }
  2660. err = -ENOENT;
  2661. error:
  2662. dst_release(dst);
  2663. dropdst:
  2664. if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
  2665. dst_release(dst_orig);
  2666. xfrm_pols_put(pols, drop_pols);
  2667. return ERR_PTR(err);
  2668. }
  2669. EXPORT_SYMBOL(xfrm_lookup_with_ifid);
  2670. /* Main function: finds/creates a bundle for given flow.
  2671. *
  2672. * At the moment we eat a raw IP route. Mostly to speed up lookups
  2673. * on interfaces with disabled IPsec.
  2674. */
  2675. struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
  2676. const struct flowi *fl, const struct sock *sk,
  2677. int flags)
  2678. {
  2679. return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
  2680. }
  2681. EXPORT_SYMBOL(xfrm_lookup);
  2682. /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
  2683. * Otherwise we may send out blackholed packets.
  2684. */
  2685. struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
  2686. const struct flowi *fl,
  2687. const struct sock *sk, int flags)
  2688. {
  2689. struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
  2690. flags | XFRM_LOOKUP_QUEUE |
  2691. XFRM_LOOKUP_KEEP_DST_REF);
  2692. if (PTR_ERR(dst) == -EREMOTE)
  2693. return make_blackhole(net, dst_orig->ops->family, dst_orig);
  2694. if (IS_ERR(dst))
  2695. dst_release(dst_orig);
  2696. return dst;
  2697. }
  2698. EXPORT_SYMBOL(xfrm_lookup_route);
  2699. static inline int
  2700. xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
  2701. {
  2702. struct sec_path *sp = skb_sec_path(skb);
  2703. struct xfrm_state *x;
  2704. if (!sp || idx < 0 || idx >= sp->len)
  2705. return 0;
  2706. x = sp->xvec[idx];
  2707. if (!x->type->reject)
  2708. return 0;
  2709. return x->type->reject(x, skb, fl);
  2710. }
  2711. /* When skb is transformed back to its "native" form, we have to
  2712. * check policy restrictions. At the moment we make this in maximally
  2713. * stupid way. Shame on me. :-) Of course, connected sockets must
  2714. * have policy cached at them.
  2715. */
  2716. static inline int
  2717. xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  2718. unsigned short family, u32 if_id)
  2719. {
  2720. if (xfrm_state_kern(x))
  2721. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  2722. return x->id.proto == tmpl->id.proto &&
  2723. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  2724. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  2725. x->props.mode == tmpl->mode &&
  2726. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  2727. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  2728. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  2729. xfrm_state_addr_cmp(tmpl, x, family)) &&
  2730. (if_id == 0 || if_id == x->if_id);
  2731. }
  2732. /*
  2733. * 0 or more than 0 is returned when validation is succeeded (either bypass
  2734. * because of optional transport mode, or next index of the matched secpath
  2735. * state with the template.
  2736. * -1 is returned when no matching template is found.
  2737. * Otherwise "-2 - errored_index" is returned.
  2738. */
  2739. static inline int
  2740. xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
  2741. unsigned short family, u32 if_id)
  2742. {
  2743. int idx = start;
  2744. if (tmpl->optional) {
  2745. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  2746. return start;
  2747. } else
  2748. start = -1;
  2749. for (; idx < sp->len; idx++) {
  2750. if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
  2751. return ++idx;
  2752. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  2753. if (idx < sp->verified_cnt) {
  2754. /* Secpath entry previously verified, consider optional and
  2755. * continue searching
  2756. */
  2757. continue;
  2758. }
  2759. if (start == -1)
  2760. start = -2-idx;
  2761. break;
  2762. }
  2763. }
  2764. return start;
  2765. }
  2766. static void
  2767. decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
  2768. {
  2769. const struct iphdr *iph = ip_hdr(skb);
  2770. int ihl = iph->ihl;
  2771. u8 *xprth = skb_network_header(skb) + ihl * 4;
  2772. struct flowi4 *fl4 = &fl->u.ip4;
  2773. int oif = 0;
  2774. if (skb_dst(skb) && skb_dst(skb)->dev)
  2775. oif = skb_dst(skb)->dev->ifindex;
  2776. memset(fl4, 0, sizeof(struct flowi4));
  2777. fl4->flowi4_mark = skb->mark;
  2778. fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
  2779. fl4->flowi4_proto = iph->protocol;
  2780. fl4->daddr = reverse ? iph->saddr : iph->daddr;
  2781. fl4->saddr = reverse ? iph->daddr : iph->saddr;
  2782. fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
  2783. if (!ip_is_fragment(iph)) {
  2784. switch (iph->protocol) {
  2785. case IPPROTO_UDP:
  2786. case IPPROTO_UDPLITE:
  2787. case IPPROTO_TCP:
  2788. case IPPROTO_SCTP:
  2789. case IPPROTO_DCCP:
  2790. if (xprth + 4 < skb->data ||
  2791. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  2792. __be16 *ports;
  2793. xprth = skb_network_header(skb) + ihl * 4;
  2794. ports = (__be16 *)xprth;
  2795. fl4->fl4_sport = ports[!!reverse];
  2796. fl4->fl4_dport = ports[!reverse];
  2797. }
  2798. break;
  2799. case IPPROTO_ICMP:
  2800. if (xprth + 2 < skb->data ||
  2801. pskb_may_pull(skb, xprth + 2 - skb->data)) {
  2802. u8 *icmp;
  2803. xprth = skb_network_header(skb) + ihl * 4;
  2804. icmp = xprth;
  2805. fl4->fl4_icmp_type = icmp[0];
  2806. fl4->fl4_icmp_code = icmp[1];
  2807. }
  2808. break;
  2809. case IPPROTO_GRE:
  2810. if (xprth + 12 < skb->data ||
  2811. pskb_may_pull(skb, xprth + 12 - skb->data)) {
  2812. __be16 *greflags;
  2813. __be32 *gre_hdr;
  2814. xprth = skb_network_header(skb) + ihl * 4;
  2815. greflags = (__be16 *)xprth;
  2816. gre_hdr = (__be32 *)xprth;
  2817. if (greflags[0] & GRE_KEY) {
  2818. if (greflags[0] & GRE_CSUM)
  2819. gre_hdr++;
  2820. fl4->fl4_gre_key = gre_hdr[1];
  2821. }
  2822. }
  2823. break;
  2824. default:
  2825. break;
  2826. }
  2827. }
  2828. }
  2829. #if IS_ENABLED(CONFIG_IPV6)
  2830. static void
  2831. decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
  2832. {
  2833. struct flowi6 *fl6 = &fl->u.ip6;
  2834. int onlyproto = 0;
  2835. const struct ipv6hdr *hdr = ipv6_hdr(skb);
  2836. u32 offset = sizeof(*hdr);
  2837. struct ipv6_opt_hdr *exthdr;
  2838. const unsigned char *nh = skb_network_header(skb);
  2839. u16 nhoff = IP6CB(skb)->nhoff;
  2840. int oif = 0;
  2841. u8 nexthdr;
  2842. if (!nhoff)
  2843. nhoff = offsetof(struct ipv6hdr, nexthdr);
  2844. nexthdr = nh[nhoff];
  2845. if (skb_dst(skb) && skb_dst(skb)->dev)
  2846. oif = skb_dst(skb)->dev->ifindex;
  2847. memset(fl6, 0, sizeof(struct flowi6));
  2848. fl6->flowi6_mark = skb->mark;
  2849. fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
  2850. fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
  2851. fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
  2852. while (nh + offset + sizeof(*exthdr) < skb->data ||
  2853. pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
  2854. nh = skb_network_header(skb);
  2855. exthdr = (struct ipv6_opt_hdr *)(nh + offset);
  2856. switch (nexthdr) {
  2857. case NEXTHDR_FRAGMENT:
  2858. onlyproto = 1;
  2859. fallthrough;
  2860. case NEXTHDR_ROUTING:
  2861. case NEXTHDR_HOP:
  2862. case NEXTHDR_DEST:
  2863. offset += ipv6_optlen(exthdr);
  2864. nexthdr = exthdr->nexthdr;
  2865. break;
  2866. case IPPROTO_UDP:
  2867. case IPPROTO_UDPLITE:
  2868. case IPPROTO_TCP:
  2869. case IPPROTO_SCTP:
  2870. case IPPROTO_DCCP:
  2871. if (!onlyproto && (nh + offset + 4 < skb->data ||
  2872. pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
  2873. __be16 *ports;
  2874. nh = skb_network_header(skb);
  2875. ports = (__be16 *)(nh + offset);
  2876. fl6->fl6_sport = ports[!!reverse];
  2877. fl6->fl6_dport = ports[!reverse];
  2878. }
  2879. fl6->flowi6_proto = nexthdr;
  2880. return;
  2881. case IPPROTO_ICMPV6:
  2882. if (!onlyproto && (nh + offset + 2 < skb->data ||
  2883. pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
  2884. u8 *icmp;
  2885. nh = skb_network_header(skb);
  2886. icmp = (u8 *)(nh + offset);
  2887. fl6->fl6_icmp_type = icmp[0];
  2888. fl6->fl6_icmp_code = icmp[1];
  2889. }
  2890. fl6->flowi6_proto = nexthdr;
  2891. return;
  2892. case IPPROTO_GRE:
  2893. if (!onlyproto &&
  2894. (nh + offset + 12 < skb->data ||
  2895. pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
  2896. struct gre_base_hdr *gre_hdr;
  2897. __be32 *gre_key;
  2898. nh = skb_network_header(skb);
  2899. gre_hdr = (struct gre_base_hdr *)(nh + offset);
  2900. gre_key = (__be32 *)(gre_hdr + 1);
  2901. if (gre_hdr->flags & GRE_KEY) {
  2902. if (gre_hdr->flags & GRE_CSUM)
  2903. gre_key++;
  2904. fl6->fl6_gre_key = *gre_key;
  2905. }
  2906. }
  2907. fl6->flowi6_proto = nexthdr;
  2908. return;
  2909. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  2910. case IPPROTO_MH:
  2911. offset += ipv6_optlen(exthdr);
  2912. if (!onlyproto && (nh + offset + 3 < skb->data ||
  2913. pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
  2914. struct ip6_mh *mh;
  2915. nh = skb_network_header(skb);
  2916. mh = (struct ip6_mh *)(nh + offset);
  2917. fl6->fl6_mh_type = mh->ip6mh_type;
  2918. }
  2919. fl6->flowi6_proto = nexthdr;
  2920. return;
  2921. #endif
  2922. default:
  2923. fl6->flowi6_proto = nexthdr;
  2924. return;
  2925. }
  2926. }
  2927. }
  2928. #endif
  2929. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  2930. unsigned int family, int reverse)
  2931. {
  2932. switch (family) {
  2933. case AF_INET:
  2934. decode_session4(skb, fl, reverse);
  2935. break;
  2936. #if IS_ENABLED(CONFIG_IPV6)
  2937. case AF_INET6:
  2938. decode_session6(skb, fl, reverse);
  2939. break;
  2940. #endif
  2941. default:
  2942. return -EAFNOSUPPORT;
  2943. }
  2944. return security_xfrm_decode_session(skb, &fl->flowi_secid);
  2945. }
  2946. EXPORT_SYMBOL(__xfrm_decode_session);
  2947. static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
  2948. {
  2949. for (; k < sp->len; k++) {
  2950. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  2951. *idxp = k;
  2952. return 1;
  2953. }
  2954. }
  2955. return 0;
  2956. }
  2957. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  2958. unsigned short family)
  2959. {
  2960. struct net *net = dev_net(skb->dev);
  2961. struct xfrm_policy *pol;
  2962. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  2963. int npols = 0;
  2964. int xfrm_nr;
  2965. int pi;
  2966. int reverse;
  2967. struct flowi fl;
  2968. int xerr_idx = -1;
  2969. const struct xfrm_if_cb *ifcb;
  2970. struct sec_path *sp;
  2971. u32 if_id = 0;
  2972. rcu_read_lock();
  2973. ifcb = xfrm_if_get_cb();
  2974. if (ifcb) {
  2975. struct xfrm_if_decode_session_result r;
  2976. if (ifcb->decode_session(skb, family, &r)) {
  2977. if_id = r.if_id;
  2978. net = r.net;
  2979. }
  2980. }
  2981. rcu_read_unlock();
  2982. reverse = dir & ~XFRM_POLICY_MASK;
  2983. dir &= XFRM_POLICY_MASK;
  2984. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  2985. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  2986. return 0;
  2987. }
  2988. nf_nat_decode_session(skb, &fl, family);
  2989. /* First, check used SA against their selectors. */
  2990. sp = skb_sec_path(skb);
  2991. if (sp) {
  2992. int i;
  2993. for (i = sp->len - 1; i >= 0; i--) {
  2994. struct xfrm_state *x = sp->xvec[i];
  2995. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  2996. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  2997. return 0;
  2998. }
  2999. }
  3000. }
  3001. pol = NULL;
  3002. sk = sk_to_full_sk(sk);
  3003. if (sk && sk->sk_policy[dir]) {
  3004. pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
  3005. if (IS_ERR(pol)) {
  3006. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  3007. return 0;
  3008. }
  3009. }
  3010. if (!pol)
  3011. pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
  3012. if (IS_ERR(pol)) {
  3013. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  3014. return 0;
  3015. }
  3016. if (!pol) {
  3017. if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
  3018. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  3019. return 0;
  3020. }
  3021. if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
  3022. xfrm_secpath_reject(xerr_idx, skb, &fl);
  3023. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  3024. return 0;
  3025. }
  3026. return 1;
  3027. }
  3028. /* This lockless write can happen from different cpus. */
  3029. WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
  3030. pols[0] = pol;
  3031. npols++;
  3032. #ifdef CONFIG_XFRM_SUB_POLICY
  3033. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  3034. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  3035. &fl, family,
  3036. XFRM_POLICY_IN, if_id);
  3037. if (pols[1]) {
  3038. if (IS_ERR(pols[1])) {
  3039. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  3040. xfrm_pol_put(pols[0]);
  3041. return 0;
  3042. }
  3043. /* This write can happen from different cpus. */
  3044. WRITE_ONCE(pols[1]->curlft.use_time,
  3045. ktime_get_real_seconds());
  3046. npols++;
  3047. }
  3048. }
  3049. #endif
  3050. if (pol->action == XFRM_POLICY_ALLOW) {
  3051. static struct sec_path dummy;
  3052. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  3053. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  3054. struct xfrm_tmpl **tpp = tp;
  3055. int ti = 0;
  3056. int i, k;
  3057. sp = skb_sec_path(skb);
  3058. if (!sp)
  3059. sp = &dummy;
  3060. for (pi = 0; pi < npols; pi++) {
  3061. if (pols[pi] != pol &&
  3062. pols[pi]->action != XFRM_POLICY_ALLOW) {
  3063. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  3064. goto reject;
  3065. }
  3066. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  3067. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  3068. goto reject_error;
  3069. }
  3070. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  3071. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  3072. }
  3073. xfrm_nr = ti;
  3074. if (npols > 1) {
  3075. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  3076. tpp = stp;
  3077. }
  3078. /* For each tunnel xfrm, find the first matching tmpl.
  3079. * For each tmpl before that, find corresponding xfrm.
  3080. * Order is _important_. Later we will implement
  3081. * some barriers, but at the moment barriers
  3082. * are implied between each two transformations.
  3083. * Upon success, marks secpath entries as having been
  3084. * verified to allow them to be skipped in future policy
  3085. * checks (e.g. nested tunnels).
  3086. */
  3087. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  3088. k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
  3089. if (k < 0) {
  3090. if (k < -1)
  3091. /* "-2 - errored_index" returned */
  3092. xerr_idx = -(2+k);
  3093. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  3094. goto reject;
  3095. }
  3096. }
  3097. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  3098. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  3099. goto reject;
  3100. }
  3101. xfrm_pols_put(pols, npols);
  3102. sp->verified_cnt = k;
  3103. return 1;
  3104. }
  3105. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  3106. reject:
  3107. xfrm_secpath_reject(xerr_idx, skb, &fl);
  3108. reject_error:
  3109. xfrm_pols_put(pols, npols);
  3110. return 0;
  3111. }
  3112. EXPORT_SYMBOL(__xfrm_policy_check);
  3113. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  3114. {
  3115. struct net *net = dev_net(skb->dev);
  3116. struct flowi fl;
  3117. struct dst_entry *dst;
  3118. int res = 1;
  3119. if (xfrm_decode_session(skb, &fl, family) < 0) {
  3120. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  3121. return 0;
  3122. }
  3123. skb_dst_force(skb);
  3124. if (!skb_dst(skb)) {
  3125. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  3126. return 0;
  3127. }
  3128. dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
  3129. if (IS_ERR(dst)) {
  3130. res = 0;
  3131. dst = NULL;
  3132. }
  3133. skb_dst_set(skb, dst);
  3134. return res;
  3135. }
  3136. EXPORT_SYMBOL(__xfrm_route_forward);
  3137. /* Optimize later using cookies and generation ids. */
  3138. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  3139. {
  3140. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  3141. * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
  3142. * get validated by dst_ops->check on every use. We do this
  3143. * because when a normal route referenced by an XFRM dst is
  3144. * obsoleted we do not go looking around for all parent
  3145. * referencing XFRM dsts so that we can invalidate them. It
  3146. * is just too much work. Instead we make the checks here on
  3147. * every use. For example:
  3148. *
  3149. * XFRM dst A --> IPv4 dst X
  3150. *
  3151. * X is the "xdst->route" of A (X is also the "dst->path" of A
  3152. * in this example). If X is marked obsolete, "A" will not
  3153. * notice. That's what we are validating here via the
  3154. * stale_bundle() check.
  3155. *
  3156. * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
  3157. * be marked on it.
  3158. * This will force stale_bundle() to fail on any xdst bundle with
  3159. * this dst linked in it.
  3160. */
  3161. if (dst->obsolete < 0 && !stale_bundle(dst))
  3162. return dst;
  3163. return NULL;
  3164. }
  3165. static int stale_bundle(struct dst_entry *dst)
  3166. {
  3167. return !xfrm_bundle_ok((struct xfrm_dst *)dst);
  3168. }
  3169. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  3170. {
  3171. while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
  3172. dst->dev = blackhole_netdev;
  3173. dev_hold(dst->dev);
  3174. dev_put(dev);
  3175. }
  3176. }
  3177. EXPORT_SYMBOL(xfrm_dst_ifdown);
  3178. static void xfrm_link_failure(struct sk_buff *skb)
  3179. {
  3180. /* Impossible. Such dst must be popped before reaches point of failure. */
  3181. }
  3182. static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
  3183. {
  3184. if (dst->obsolete)
  3185. sk_dst_reset(sk);
  3186. }
  3187. static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
  3188. {
  3189. while (nr--) {
  3190. struct xfrm_dst *xdst = bundle[nr];
  3191. u32 pmtu, route_mtu_cached;
  3192. struct dst_entry *dst;
  3193. dst = &xdst->u.dst;
  3194. pmtu = dst_mtu(xfrm_dst_child(dst));
  3195. xdst->child_mtu_cached = pmtu;
  3196. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  3197. route_mtu_cached = dst_mtu(xdst->route);
  3198. xdst->route_mtu_cached = route_mtu_cached;
  3199. if (pmtu > route_mtu_cached)
  3200. pmtu = route_mtu_cached;
  3201. dst_metric_set(dst, RTAX_MTU, pmtu);
  3202. }
  3203. }
  3204. /* Check that the bundle accepts the flow and its components are
  3205. * still valid.
  3206. */
  3207. static int xfrm_bundle_ok(struct xfrm_dst *first)
  3208. {
  3209. struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
  3210. struct dst_entry *dst = &first->u.dst;
  3211. struct xfrm_dst *xdst;
  3212. int start_from, nr;
  3213. u32 mtu;
  3214. if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
  3215. (dst->dev && !netif_running(dst->dev)))
  3216. return 0;
  3217. if (dst->flags & DST_XFRM_QUEUE)
  3218. return 1;
  3219. start_from = nr = 0;
  3220. do {
  3221. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  3222. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  3223. return 0;
  3224. if (xdst->xfrm_genid != dst->xfrm->genid)
  3225. return 0;
  3226. if (xdst->num_pols > 0 &&
  3227. xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  3228. return 0;
  3229. bundle[nr++] = xdst;
  3230. mtu = dst_mtu(xfrm_dst_child(dst));
  3231. if (xdst->child_mtu_cached != mtu) {
  3232. start_from = nr;
  3233. xdst->child_mtu_cached = mtu;
  3234. }
  3235. if (!dst_check(xdst->route, xdst->route_cookie))
  3236. return 0;
  3237. mtu = dst_mtu(xdst->route);
  3238. if (xdst->route_mtu_cached != mtu) {
  3239. start_from = nr;
  3240. xdst->route_mtu_cached = mtu;
  3241. }
  3242. dst = xfrm_dst_child(dst);
  3243. } while (dst->xfrm);
  3244. if (likely(!start_from))
  3245. return 1;
  3246. xdst = bundle[start_from - 1];
  3247. mtu = xdst->child_mtu_cached;
  3248. while (start_from--) {
  3249. dst = &xdst->u.dst;
  3250. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  3251. if (mtu > xdst->route_mtu_cached)
  3252. mtu = xdst->route_mtu_cached;
  3253. dst_metric_set(dst, RTAX_MTU, mtu);
  3254. if (!start_from)
  3255. break;
  3256. xdst = bundle[start_from - 1];
  3257. xdst->child_mtu_cached = mtu;
  3258. }
  3259. return 1;
  3260. }
  3261. static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
  3262. {
  3263. return dst_metric_advmss(xfrm_dst_path(dst));
  3264. }
  3265. static unsigned int xfrm_mtu(const struct dst_entry *dst)
  3266. {
  3267. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  3268. return mtu ? : dst_mtu(xfrm_dst_path(dst));
  3269. }
  3270. static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
  3271. const void *daddr)
  3272. {
  3273. while (dst->xfrm) {
  3274. const struct xfrm_state *xfrm = dst->xfrm;
  3275. dst = xfrm_dst_child(dst);
  3276. if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
  3277. continue;
  3278. if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
  3279. daddr = xfrm->coaddr;
  3280. else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
  3281. daddr = &xfrm->id.daddr;
  3282. }
  3283. return daddr;
  3284. }
  3285. static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
  3286. struct sk_buff *skb,
  3287. const void *daddr)
  3288. {
  3289. const struct dst_entry *path = xfrm_dst_path(dst);
  3290. if (!skb)
  3291. daddr = xfrm_get_dst_nexthop(dst, daddr);
  3292. return path->ops->neigh_lookup(path, skb, daddr);
  3293. }
  3294. static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
  3295. {
  3296. const struct dst_entry *path = xfrm_dst_path(dst);
  3297. daddr = xfrm_get_dst_nexthop(dst, daddr);
  3298. path->ops->confirm_neigh(path, daddr);
  3299. }
  3300. int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
  3301. {
  3302. int err = 0;
  3303. if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
  3304. return -EAFNOSUPPORT;
  3305. spin_lock(&xfrm_policy_afinfo_lock);
  3306. if (unlikely(xfrm_policy_afinfo[family] != NULL))
  3307. err = -EEXIST;
  3308. else {
  3309. struct dst_ops *dst_ops = afinfo->dst_ops;
  3310. if (likely(dst_ops->kmem_cachep == NULL))
  3311. dst_ops->kmem_cachep = xfrm_dst_cache;
  3312. if (likely(dst_ops->check == NULL))
  3313. dst_ops->check = xfrm_dst_check;
  3314. if (likely(dst_ops->default_advmss == NULL))
  3315. dst_ops->default_advmss = xfrm_default_advmss;
  3316. if (likely(dst_ops->mtu == NULL))
  3317. dst_ops->mtu = xfrm_mtu;
  3318. if (likely(dst_ops->negative_advice == NULL))
  3319. dst_ops->negative_advice = (void *)xfrm_negative_advice;
  3320. if (likely(dst_ops->link_failure == NULL))
  3321. dst_ops->link_failure = xfrm_link_failure;
  3322. if (likely(dst_ops->neigh_lookup == NULL))
  3323. dst_ops->neigh_lookup = xfrm_neigh_lookup;
  3324. if (likely(!dst_ops->confirm_neigh))
  3325. dst_ops->confirm_neigh = xfrm_confirm_neigh;
  3326. rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
  3327. }
  3328. spin_unlock(&xfrm_policy_afinfo_lock);
  3329. return err;
  3330. }
  3331. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  3332. void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
  3333. {
  3334. struct dst_ops *dst_ops = afinfo->dst_ops;
  3335. int i;
  3336. for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
  3337. if (xfrm_policy_afinfo[i] != afinfo)
  3338. continue;
  3339. RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
  3340. break;
  3341. }
  3342. synchronize_rcu();
  3343. dst_ops->kmem_cachep = NULL;
  3344. dst_ops->check = NULL;
  3345. dst_ops->negative_advice = NULL;
  3346. dst_ops->link_failure = NULL;
  3347. }
  3348. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  3349. void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
  3350. {
  3351. spin_lock(&xfrm_if_cb_lock);
  3352. rcu_assign_pointer(xfrm_if_cb, ifcb);
  3353. spin_unlock(&xfrm_if_cb_lock);
  3354. }
  3355. EXPORT_SYMBOL(xfrm_if_register_cb);
  3356. void xfrm_if_unregister_cb(void)
  3357. {
  3358. RCU_INIT_POINTER(xfrm_if_cb, NULL);
  3359. synchronize_rcu();
  3360. }
  3361. EXPORT_SYMBOL(xfrm_if_unregister_cb);
  3362. #ifdef CONFIG_XFRM_STATISTICS
  3363. static int __net_init xfrm_statistics_init(struct net *net)
  3364. {
  3365. int rv;
  3366. net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
  3367. if (!net->mib.xfrm_statistics)
  3368. return -ENOMEM;
  3369. rv = xfrm_proc_init(net);
  3370. if (rv < 0)
  3371. free_percpu(net->mib.xfrm_statistics);
  3372. return rv;
  3373. }
  3374. static void xfrm_statistics_fini(struct net *net)
  3375. {
  3376. xfrm_proc_fini(net);
  3377. free_percpu(net->mib.xfrm_statistics);
  3378. }
  3379. #else
  3380. static int __net_init xfrm_statistics_init(struct net *net)
  3381. {
  3382. return 0;
  3383. }
  3384. static void xfrm_statistics_fini(struct net *net)
  3385. {
  3386. }
  3387. #endif
  3388. static int __net_init xfrm_policy_init(struct net *net)
  3389. {
  3390. unsigned int hmask, sz;
  3391. int dir, err;
  3392. if (net_eq(net, &init_net)) {
  3393. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  3394. sizeof(struct xfrm_dst),
  3395. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  3396. NULL);
  3397. err = rhashtable_init(&xfrm_policy_inexact_table,
  3398. &xfrm_pol_inexact_params);
  3399. BUG_ON(err);
  3400. }
  3401. hmask = 8 - 1;
  3402. sz = (hmask+1) * sizeof(struct hlist_head);
  3403. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  3404. if (!net->xfrm.policy_byidx)
  3405. goto out_byidx;
  3406. net->xfrm.policy_idx_hmask = hmask;
  3407. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  3408. struct xfrm_policy_hash *htab;
  3409. net->xfrm.policy_count[dir] = 0;
  3410. net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
  3411. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  3412. htab = &net->xfrm.policy_bydst[dir];
  3413. htab->table = xfrm_hash_alloc(sz);
  3414. if (!htab->table)
  3415. goto out_bydst;
  3416. htab->hmask = hmask;
  3417. htab->dbits4 = 32;
  3418. htab->sbits4 = 32;
  3419. htab->dbits6 = 128;
  3420. htab->sbits6 = 128;
  3421. }
  3422. net->xfrm.policy_hthresh.lbits4 = 32;
  3423. net->xfrm.policy_hthresh.rbits4 = 32;
  3424. net->xfrm.policy_hthresh.lbits6 = 128;
  3425. net->xfrm.policy_hthresh.rbits6 = 128;
  3426. seqlock_init(&net->xfrm.policy_hthresh.lock);
  3427. INIT_LIST_HEAD(&net->xfrm.policy_all);
  3428. INIT_LIST_HEAD(&net->xfrm.inexact_bins);
  3429. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  3430. INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
  3431. return 0;
  3432. out_bydst:
  3433. for (dir--; dir >= 0; dir--) {
  3434. struct xfrm_policy_hash *htab;
  3435. htab = &net->xfrm.policy_bydst[dir];
  3436. xfrm_hash_free(htab->table, sz);
  3437. }
  3438. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  3439. out_byidx:
  3440. return -ENOMEM;
  3441. }
  3442. static void xfrm_policy_fini(struct net *net)
  3443. {
  3444. struct xfrm_pol_inexact_bin *b, *t;
  3445. unsigned int sz;
  3446. int dir;
  3447. flush_work(&net->xfrm.policy_hash_work);
  3448. #ifdef CONFIG_XFRM_SUB_POLICY
  3449. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
  3450. #endif
  3451. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
  3452. WARN_ON(!list_empty(&net->xfrm.policy_all));
  3453. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  3454. struct xfrm_policy_hash *htab;
  3455. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  3456. htab = &net->xfrm.policy_bydst[dir];
  3457. sz = (htab->hmask + 1) * sizeof(struct hlist_head);
  3458. WARN_ON(!hlist_empty(htab->table));
  3459. xfrm_hash_free(htab->table, sz);
  3460. }
  3461. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  3462. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  3463. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  3464. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  3465. list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
  3466. __xfrm_policy_inexact_prune_bin(b, true);
  3467. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  3468. }
  3469. static int __net_init xfrm_net_init(struct net *net)
  3470. {
  3471. int rv;
  3472. /* Initialize the per-net locks here */
  3473. spin_lock_init(&net->xfrm.xfrm_state_lock);
  3474. spin_lock_init(&net->xfrm.xfrm_policy_lock);
  3475. seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
  3476. mutex_init(&net->xfrm.xfrm_cfg_mutex);
  3477. net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
  3478. net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
  3479. net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
  3480. rv = xfrm_statistics_init(net);
  3481. if (rv < 0)
  3482. goto out_statistics;
  3483. rv = xfrm_state_init(net);
  3484. if (rv < 0)
  3485. goto out_state;
  3486. rv = xfrm_policy_init(net);
  3487. if (rv < 0)
  3488. goto out_policy;
  3489. rv = xfrm_sysctl_init(net);
  3490. if (rv < 0)
  3491. goto out_sysctl;
  3492. return 0;
  3493. out_sysctl:
  3494. xfrm_policy_fini(net);
  3495. out_policy:
  3496. xfrm_state_fini(net);
  3497. out_state:
  3498. xfrm_statistics_fini(net);
  3499. out_statistics:
  3500. return rv;
  3501. }
  3502. static void __net_exit xfrm_net_exit(struct net *net)
  3503. {
  3504. xfrm_sysctl_fini(net);
  3505. xfrm_policy_fini(net);
  3506. xfrm_state_fini(net);
  3507. xfrm_statistics_fini(net);
  3508. }
  3509. static struct pernet_operations __net_initdata xfrm_net_ops = {
  3510. .init = xfrm_net_init,
  3511. .exit = xfrm_net_exit,
  3512. };
  3513. void __init xfrm_init(void)
  3514. {
  3515. register_pernet_subsys(&xfrm_net_ops);
  3516. xfrm_dev_init();
  3517. xfrm_input_init();
  3518. #ifdef CONFIG_XFRM_ESPINTCP
  3519. espintcp_init();
  3520. #endif
  3521. }
  3522. #ifdef CONFIG_AUDITSYSCALL
  3523. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  3524. struct audit_buffer *audit_buf)
  3525. {
  3526. struct xfrm_sec_ctx *ctx = xp->security;
  3527. struct xfrm_selector *sel = &xp->selector;
  3528. if (ctx)
  3529. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  3530. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  3531. switch (sel->family) {
  3532. case AF_INET:
  3533. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  3534. if (sel->prefixlen_s != 32)
  3535. audit_log_format(audit_buf, " src_prefixlen=%d",
  3536. sel->prefixlen_s);
  3537. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  3538. if (sel->prefixlen_d != 32)
  3539. audit_log_format(audit_buf, " dst_prefixlen=%d",
  3540. sel->prefixlen_d);
  3541. break;
  3542. case AF_INET6:
  3543. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  3544. if (sel->prefixlen_s != 128)
  3545. audit_log_format(audit_buf, " src_prefixlen=%d",
  3546. sel->prefixlen_s);
  3547. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  3548. if (sel->prefixlen_d != 128)
  3549. audit_log_format(audit_buf, " dst_prefixlen=%d",
  3550. sel->prefixlen_d);
  3551. break;
  3552. }
  3553. }
  3554. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
  3555. {
  3556. struct audit_buffer *audit_buf;
  3557. audit_buf = xfrm_audit_start("SPD-add");
  3558. if (audit_buf == NULL)
  3559. return;
  3560. xfrm_audit_helper_usrinfo(task_valid, audit_buf);
  3561. audit_log_format(audit_buf, " res=%u", result);
  3562. xfrm_audit_common_policyinfo(xp, audit_buf);
  3563. audit_log_end(audit_buf);
  3564. }
  3565. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  3566. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  3567. bool task_valid)
  3568. {
  3569. struct audit_buffer *audit_buf;
  3570. audit_buf = xfrm_audit_start("SPD-delete");
  3571. if (audit_buf == NULL)
  3572. return;
  3573. xfrm_audit_helper_usrinfo(task_valid, audit_buf);
  3574. audit_log_format(audit_buf, " res=%u", result);
  3575. xfrm_audit_common_policyinfo(xp, audit_buf);
  3576. audit_log_end(audit_buf);
  3577. }
  3578. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  3579. #endif
  3580. #ifdef CONFIG_XFRM_MIGRATE
  3581. static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
  3582. const struct xfrm_selector *sel_tgt)
  3583. {
  3584. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  3585. if (sel_tgt->family == sel_cmp->family &&
  3586. xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
  3587. sel_cmp->family) &&
  3588. xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
  3589. sel_cmp->family) &&
  3590. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  3591. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  3592. return true;
  3593. }
  3594. } else {
  3595. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  3596. return true;
  3597. }
  3598. }
  3599. return false;
  3600. }
  3601. static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
  3602. u8 dir, u8 type, struct net *net, u32 if_id)
  3603. {
  3604. struct xfrm_policy *pol, *ret = NULL;
  3605. struct hlist_head *chain;
  3606. u32 priority = ~0U;
  3607. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  3608. chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
  3609. hlist_for_each_entry(pol, chain, bydst) {
  3610. if ((if_id == 0 || pol->if_id == if_id) &&
  3611. xfrm_migrate_selector_match(sel, &pol->selector) &&
  3612. pol->type == type) {
  3613. ret = pol;
  3614. priority = ret->priority;
  3615. break;
  3616. }
  3617. }
  3618. chain = &net->xfrm.policy_inexact[dir];
  3619. hlist_for_each_entry(pol, chain, bydst_inexact_list) {
  3620. if ((pol->priority >= priority) && ret)
  3621. break;
  3622. if ((if_id == 0 || pol->if_id == if_id) &&
  3623. xfrm_migrate_selector_match(sel, &pol->selector) &&
  3624. pol->type == type) {
  3625. ret = pol;
  3626. break;
  3627. }
  3628. }
  3629. xfrm_pol_hold(ret);
  3630. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  3631. return ret;
  3632. }
  3633. static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
  3634. {
  3635. int match = 0;
  3636. if (t->mode == m->mode && t->id.proto == m->proto &&
  3637. (m->reqid == 0 || t->reqid == m->reqid)) {
  3638. switch (t->mode) {
  3639. case XFRM_MODE_TUNNEL:
  3640. case XFRM_MODE_BEET:
  3641. if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
  3642. m->old_family) &&
  3643. xfrm_addr_equal(&t->saddr, &m->old_saddr,
  3644. m->old_family)) {
  3645. match = 1;
  3646. }
  3647. break;
  3648. case XFRM_MODE_TRANSPORT:
  3649. /* in case of transport mode, template does not store
  3650. any IP addresses, hence we just compare mode and
  3651. protocol */
  3652. match = 1;
  3653. break;
  3654. default:
  3655. break;
  3656. }
  3657. }
  3658. return match;
  3659. }
  3660. /* update endpoint address(es) of template(s) */
  3661. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  3662. struct xfrm_migrate *m, int num_migrate)
  3663. {
  3664. struct xfrm_migrate *mp;
  3665. int i, j, n = 0;
  3666. write_lock_bh(&pol->lock);
  3667. if (unlikely(pol->walk.dead)) {
  3668. /* target policy has been deleted */
  3669. write_unlock_bh(&pol->lock);
  3670. return -ENOENT;
  3671. }
  3672. for (i = 0; i < pol->xfrm_nr; i++) {
  3673. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  3674. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  3675. continue;
  3676. n++;
  3677. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  3678. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  3679. continue;
  3680. /* update endpoints */
  3681. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  3682. sizeof(pol->xfrm_vec[i].id.daddr));
  3683. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  3684. sizeof(pol->xfrm_vec[i].saddr));
  3685. pol->xfrm_vec[i].encap_family = mp->new_family;
  3686. /* flush bundles */
  3687. atomic_inc(&pol->genid);
  3688. }
  3689. }
  3690. write_unlock_bh(&pol->lock);
  3691. if (!n)
  3692. return -ENODATA;
  3693. return 0;
  3694. }
  3695. static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
  3696. {
  3697. int i, j;
  3698. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  3699. return -EINVAL;
  3700. for (i = 0; i < num_migrate; i++) {
  3701. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  3702. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  3703. return -EINVAL;
  3704. /* check if there is any duplicated entry */
  3705. for (j = i + 1; j < num_migrate; j++) {
  3706. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  3707. sizeof(m[i].old_daddr)) &&
  3708. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  3709. sizeof(m[i].old_saddr)) &&
  3710. m[i].proto == m[j].proto &&
  3711. m[i].mode == m[j].mode &&
  3712. m[i].reqid == m[j].reqid &&
  3713. m[i].old_family == m[j].old_family)
  3714. return -EINVAL;
  3715. }
  3716. }
  3717. return 0;
  3718. }
  3719. int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
  3720. struct xfrm_migrate *m, int num_migrate,
  3721. struct xfrm_kmaddress *k, struct net *net,
  3722. struct xfrm_encap_tmpl *encap, u32 if_id)
  3723. {
  3724. int i, err, nx_cur = 0, nx_new = 0;
  3725. struct xfrm_policy *pol = NULL;
  3726. struct xfrm_state *x, *xc;
  3727. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  3728. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  3729. struct xfrm_migrate *mp;
  3730. /* Stage 0 - sanity checks */
  3731. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  3732. goto out;
  3733. if (dir >= XFRM_POLICY_MAX) {
  3734. err = -EINVAL;
  3735. goto out;
  3736. }
  3737. /* Stage 1 - find policy */
  3738. if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
  3739. err = -ENOENT;
  3740. goto out;
  3741. }
  3742. /* Stage 2 - find and update state(s) */
  3743. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  3744. if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
  3745. x_cur[nx_cur] = x;
  3746. nx_cur++;
  3747. xc = xfrm_state_migrate(x, mp, encap);
  3748. if (xc) {
  3749. x_new[nx_new] = xc;
  3750. nx_new++;
  3751. } else {
  3752. err = -ENODATA;
  3753. goto restore_state;
  3754. }
  3755. }
  3756. }
  3757. /* Stage 3 - update policy */
  3758. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  3759. goto restore_state;
  3760. /* Stage 4 - delete old state(s) */
  3761. if (nx_cur) {
  3762. xfrm_states_put(x_cur, nx_cur);
  3763. xfrm_states_delete(x_cur, nx_cur);
  3764. }
  3765. /* Stage 5 - announce */
  3766. km_migrate(sel, dir, type, m, num_migrate, k, encap);
  3767. xfrm_pol_put(pol);
  3768. return 0;
  3769. out:
  3770. return err;
  3771. restore_state:
  3772. if (pol)
  3773. xfrm_pol_put(pol);
  3774. if (nx_cur)
  3775. xfrm_states_put(x_cur, nx_cur);
  3776. if (nx_new)
  3777. xfrm_states_delete(x_new, nx_new);
  3778. return err;
  3779. }
  3780. EXPORT_SYMBOL(xfrm_migrate);
  3781. #endif