12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842984398449845984698479848984998509851985298539854985598569857985898599860986198629863986498659866986798689869987098719872987398749875987698779878987998809881988298839884988598869887988898899890989198929893989498959896989798989899990099019902990399049905990699079908990999109911991299139914991599169917991899199920992199229923992499259926992799289929993099319932993399349935993699379938993999409941994299439944994599469947994899499950995199529953995499559956995799589959996099619962996399649965996699679968996999709971997299739974997599769977997899799980998199829983998499859986998799889989999099919992999399949995999699979998999910000100011000210003100041000510006100071000810009100101001110012100131001410015100161001710018100191002010021100221002310024100251002610027100281002910030100311003210033100341003510036100371003810039100401004110042100431004410045100461004710048100491005010051100521005310054100551005610057100581005910060100611006210063100641006510066100671006810069100701007110072100731007410075100761007710078100791008010081100821008310084100851008610087100881008910090100911009210093100941009510096100971009810099101001010110102101031010410105101061010710108101091011010111101121011310114101151011610117101181011910120101211012210123101241012510126101271012810129101301013110132101331013410135101361013710138101391014010141101421014310144101451014610147101481014910150101511015210153101541015510156101571015810159101601016110162101631016410165101661016710168101691017010171101721017310174101751017610177101781017910180101811018210183101841018510186101871018810189101901019110192101931019410195101961019710198101991020010201102021020310204102051020610207102081020910210102111021210213102141021510216102171021810219102201022110222102231022410225102261022710228102291023010231102321023310234102351023610237102381023910240102411024210243102441024510246102471024810249102501025110252102531025410255102561025710258102591026010261102621026310264102651026610267102681026910270102711027210273102741027510276102771027810279102801028110282102831028410285102861028710288102891029010291102921029310294102951029610297102981029910300103011030210303103041030510306103071030810309103101031110312103131031410315103161031710318103191032010321103221032310324103251032610327103281032910330103311033210333103341033510336103371033810339103401034110342103431034410345103461034710348103491035010351103521035310354103551035610357103581035910360103611036210363103641036510366103671036810369103701037110372103731037410375103761037710378103791038010381103821038310384103851038610387103881038910390103911039210393103941039510396103971039810399104001040110402104031040410405104061040710408104091041010411104121041310414104151041610417104181041910420104211042210423104241042510426104271042810429104301043110432104331043410435104361043710438104391044010441104421044310444104451044610447104481044910450104511045210453104541045510456104571045810459104601046110462104631046410465104661046710468104691047010471104721047310474104751047610477104781047910480104811048210483104841048510486104871048810489104901049110492104931049410495104961049710498104991050010501105021050310504105051050610507105081050910510105111051210513105141051510516105171051810519105201052110522105231052410525105261052710528105291053010531105321053310534105351053610537105381053910540105411054210543105441054510546105471054810549105501055110552105531055410555105561055710558105591056010561105621056310564105651056610567105681056910570105711057210573105741057510576105771057810579105801058110582105831058410585105861058710588105891059010591105921059310594105951059610597105981059910600106011060210603106041060510606106071060810609106101061110612106131061410615106161061710618106191062010621106221062310624106251062610627106281062910630106311063210633106341063510636106371063810639106401064110642106431064410645106461064710648106491065010651106521065310654106551065610657106581065910660106611066210663106641066510666106671066810669106701067110672106731067410675106761067710678106791068010681106821068310684106851068610687106881068910690106911069210693106941069510696106971069810699107001070110702107031070410705107061070710708107091071010711107121071310714107151071610717107181071910720107211072210723107241072510726107271072810729107301073110732107331073410735107361073710738107391074010741107421074310744107451074610747107481074910750107511075210753107541075510756107571075810759107601076110762107631076410765107661076710768107691077010771107721077310774107751077610777107781077910780107811078210783107841078510786107871078810789107901079110792107931079410795107961079710798107991080010801108021080310804108051080610807108081080910810108111081210813108141081510816108171081810819108201082110822108231082410825108261082710828108291083010831108321083310834108351083610837108381083910840108411084210843108441084510846108471084810849108501085110852108531085410855108561085710858108591086010861108621086310864108651086610867108681086910870108711087210873108741087510876108771087810879108801088110882108831088410885108861088710888108891089010891108921089310894108951089610897108981089910900109011090210903109041090510906109071090810909109101091110912109131091410915109161091710918109191092010921109221092310924109251092610927109281092910930109311093210933109341093510936109371093810939109401094110942109431094410945109461094710948109491095010951109521095310954109551095610957109581095910960109611096210963109641096510966109671096810969109701097110972109731097410975109761097710978109791098010981109821098310984109851098610987109881098910990109911099210993109941099510996109971099810999110001100111002110031100411005110061100711008110091101011011110121101311014110151101611017110181101911020110211102211023110241102511026110271102811029110301103111032110331103411035110361103711038110391104011041110421104311044110451104611047110481104911050110511105211053110541105511056110571105811059110601106111062110631106411065110661106711068110691107011071110721107311074110751107611077110781107911080110811108211083110841108511086110871108811089110901109111092110931109411095110961109711098110991110011101111021110311104111051110611107111081110911110111111111211113111141111511116111171111811119111201112111122111231112411125111261112711128111291113011131111321113311134111351113611137111381113911140111411114211143111441114511146111471114811149111501115111152111531115411155111561115711158111591116011161111621116311164111651116611167111681116911170111711117211173111741117511176111771117811179111801118111182111831118411185111861118711188111891119011191111921119311194111951119611197111981119911200112011120211203112041120511206112071120811209112101121111212112131121411215112161121711218112191122011221112221122311224112251122611227112281122911230112311123211233112341123511236112371123811239112401124111242112431124411245112461124711248112491125011251112521125311254112551125611257112581125911260112611126211263112641126511266112671126811269112701127111272112731127411275112761127711278112791128011281112821128311284112851128611287112881128911290112911129211293112941129511296112971129811299113001130111302113031130411305113061130711308113091131011311113121131311314113151131611317113181131911320113211132211323113241132511326113271132811329113301133111332113331133411335113361133711338113391134011341113421134311344113451134611347113481134911350113511135211353113541135511356113571135811359113601136111362113631136411365113661136711368113691137011371113721137311374113751137611377113781137911380113811138211383113841138511386113871138811389113901139111392113931139411395113961139711398113991140011401114021140311404114051140611407114081140911410114111141211413114141141511416114171141811419114201142111422114231142411425114261142711428114291143011431114321143311434114351143611437114381143911440114411144211443114441144511446114471144811449114501145111452114531145411455114561145711458114591146011461114621146311464114651146611467114681146911470114711147211473114741147511476114771147811479114801148111482114831148411485114861148711488114891149011491114921149311494114951149611497114981149911500115011150211503115041150511506115071150811509115101151111512115131151411515115161151711518115191152011521115221152311524115251152611527115281152911530115311153211533115341153511536115371153811539115401154111542115431154411545115461154711548115491155011551115521155311554115551155611557115581155911560115611156211563115641156511566115671156811569115701157111572115731157411575115761157711578115791158011581115821158311584115851158611587115881158911590115911159211593115941159511596115971159811599116001160111602116031160411605116061160711608116091161011611116121161311614116151161611617116181161911620116211162211623116241162511626116271162811629116301163111632116331163411635116361163711638116391164011641116421164311644116451164611647116481164911650116511165211653116541165511656116571165811659116601166111662116631166411665116661166711668116691167011671116721167311674116751167611677116781167911680116811168211683116841168511686116871168811689116901169111692116931169411695116961169711698116991170011701117021170311704117051170611707117081170911710117111171211713117141171511716117171171811719117201172111722117231172411725117261172711728117291173011731117321173311734117351173611737117381173911740117411174211743117441174511746117471174811749117501175111752117531175411755117561175711758117591176011761117621176311764117651176611767117681176911770117711177211773117741177511776117771177811779117801178111782117831178411785117861178711788117891179011791117921179311794117951179611797117981179911800118011180211803118041180511806118071180811809118101181111812118131181411815118161181711818118191182011821118221182311824118251182611827118281182911830118311183211833118341183511836118371183811839118401184111842118431184411845118461184711848118491185011851118521185311854118551185611857118581185911860118611186211863118641186511866118671186811869118701187111872118731187411875118761187711878118791188011881118821188311884118851188611887118881188911890118911189211893118941189511896118971189811899119001190111902119031190411905119061190711908119091191011911119121191311914119151191611917119181191911920119211192211923119241192511926119271192811929119301193111932119331193411935119361193711938119391194011941119421194311944119451194611947119481194911950119511195211953119541195511956119571195811959119601196111962119631196411965119661196711968119691197011971119721197311974119751197611977119781197911980119811198211983119841198511986119871198811989119901199111992119931199411995119961199711998119991200012001120021200312004120051200612007120081200912010120111201212013120141201512016120171201812019120201202112022120231202412025120261202712028120291203012031120321203312034120351203612037120381203912040120411204212043120441204512046120471204812049120501205112052120531205412055120561205712058120591206012061120621206312064120651206612067120681206912070120711207212073120741207512076120771207812079120801208112082120831208412085120861208712088120891209012091120921209312094120951209612097120981209912100121011210212103121041210512106121071210812109121101211112112121131211412115121161211712118121191212012121121221212312124121251212612127121281212912130121311213212133121341213512136121371213812139121401214112142121431214412145121461214712148121491215012151121521215312154121551215612157121581215912160121611216212163121641216512166121671216812169121701217112172121731217412175121761217712178121791218012181121821218312184121851218612187121881218912190121911219212193121941219512196121971219812199122001220112202122031220412205122061220712208122091221012211122121221312214122151221612217122181221912220122211222212223122241222512226122271222812229122301223112232122331223412235122361223712238122391224012241122421224312244122451224612247122481224912250122511225212253122541225512256122571225812259122601226112262122631226412265122661226712268122691227012271122721227312274122751227612277122781227912280122811228212283122841228512286122871228812289122901229112292122931229412295122961229712298122991230012301123021230312304123051230612307123081230912310123111231212313123141231512316123171231812319123201232112322123231232412325123261232712328123291233012331123321233312334123351233612337123381233912340123411234212343123441234512346123471234812349123501235112352123531235412355123561235712358123591236012361123621236312364123651236612367123681236912370123711237212373123741237512376123771237812379123801238112382123831238412385123861238712388123891239012391123921239312394123951239612397123981239912400124011240212403124041240512406124071240812409124101241112412124131241412415124161241712418124191242012421124221242312424124251242612427124281242912430124311243212433124341243512436124371243812439124401244112442124431244412445124461244712448124491245012451124521245312454124551245612457124581245912460124611246212463124641246512466124671246812469124701247112472124731247412475124761247712478124791248012481124821248312484124851248612487124881248912490124911249212493124941249512496124971249812499125001250112502125031250412505125061250712508125091251012511125121251312514125151251612517125181251912520125211252212523125241252512526125271252812529125301253112532125331253412535125361253712538125391254012541125421254312544125451254612547125481254912550125511255212553125541255512556125571255812559125601256112562125631256412565125661256712568125691257012571125721257312574125751257612577125781257912580125811258212583125841258512586125871258812589125901259112592125931259412595125961259712598125991260012601126021260312604126051260612607126081260912610126111261212613126141261512616126171261812619126201262112622126231262412625126261262712628126291263012631126321263312634126351263612637126381263912640126411264212643126441264512646126471264812649126501265112652126531265412655126561265712658126591266012661126621266312664126651266612667126681266912670 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
- *
- * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <[email protected]>
- *
- * Interactivity improvements by Mike Galbraith
- * (C) 2007 Mike Galbraith <[email protected]>
- *
- * Various enhancements by Dmitry Adamushko.
- * (C) 2007 Dmitry Adamushko <[email protected]>
- *
- * Group scheduling enhancements by Srivatsa Vaddagiri
- * Copyright IBM Corporation, 2007
- * Author: Srivatsa Vaddagiri <[email protected]>
- *
- * Scaled math optimizations by Thomas Gleixner
- * Copyright (C) 2007, Thomas Gleixner <[email protected]>
- *
- * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- */
- #include <linux/energy_model.h>
- #include <linux/mmap_lock.h>
- #include <linux/hugetlb_inline.h>
- #include <linux/jiffies.h>
- #include <linux/mm_api.h>
- #include <linux/highmem.h>
- #include <linux/spinlock_api.h>
- #include <linux/cpumask_api.h>
- #include <linux/lockdep_api.h>
- #include <linux/softirq.h>
- #include <linux/refcount_api.h>
- #include <linux/topology.h>
- #include <linux/sched/clock.h>
- #include <linux/sched/cond_resched.h>
- #include <linux/sched/cputime.h>
- #include <linux/sched/isolation.h>
- #include <linux/sched/nohz.h>
- #include <linux/cpuidle.h>
- #include <linux/interrupt.h>
- #include <linux/memory-tiers.h>
- #include <linux/mempolicy.h>
- #include <linux/mutex_api.h>
- #include <linux/profile.h>
- #include <linux/psi.h>
- #include <linux/ratelimit.h>
- #include <linux/task_work.h>
- #include <asm/switch_to.h>
- #include <linux/sched/cond_resched.h>
- #include "sched.h"
- #include "stats.h"
- #include "autogroup.h"
- #include <trace/hooks/sched.h>
- EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_runtime);
- /*
- * Targeted preemption latency for CPU-bound tasks:
- *
- * NOTE: this latency value is not the same as the concept of
- * 'timeslice length' - timeslices in CFS are of variable length
- * and have no persistent notion like in traditional, time-slice
- * based scheduling concepts.
- *
- * (to see the precise effective timeslice length of your workload,
- * run vmstat and monitor the context-switches (cs) field)
- *
- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
- */
- unsigned int sysctl_sched_latency = 6000000ULL;
- EXPORT_SYMBOL_GPL(sysctl_sched_latency);
- static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
- /*
- * The initial- and re-scaling of tunables is configurable
- *
- * Options are:
- *
- * SCHED_TUNABLESCALING_NONE - unscaled, always *1
- * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
- * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
- *
- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
- */
- unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
- /*
- * Minimal preemption granularity for CPU-bound tasks:
- *
- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
- */
- unsigned int sysctl_sched_min_granularity = 750000ULL;
- EXPORT_SYMBOL_GPL(sysctl_sched_min_granularity);
- static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
- /*
- * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks.
- * Applies only when SCHED_IDLE tasks compete with normal tasks.
- *
- * (default: 0.75 msec)
- */
- unsigned int sysctl_sched_idle_min_granularity = 750000ULL;
- EXPORT_SYMBOL_GPL(sysctl_sched_idle_min_granularity);
- /*
- * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
- */
- static unsigned int sched_nr_latency = 8;
- /*
- * After fork, child runs first. If set to 0 (default) then
- * parent will (try to) run first.
- */
- unsigned int sysctl_sched_child_runs_first __read_mostly;
- /*
- * SCHED_OTHER wake-up granularity.
- *
- * This option delays the preemption effects of decoupled workloads
- * and reduces their over-scheduling. Synchronous workloads will still
- * have immediate wakeup/sleep latencies.
- *
- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
- */
- unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
- static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
- const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
- int sched_thermal_decay_shift;
- static int __init setup_sched_thermal_decay_shift(char *str)
- {
- int _shift = 0;
- if (kstrtoint(str, 0, &_shift))
- pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
- sched_thermal_decay_shift = clamp(_shift, 0, 10);
- return 1;
- }
- __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
- #ifdef CONFIG_SMP
- /*
- * For asym packing, by default the lower numbered CPU has higher priority.
- */
- int __weak arch_asym_cpu_priority(int cpu)
- {
- return -cpu;
- }
- /*
- * The margin used when comparing utilization with CPU capacity.
- *
- * (default: ~20%)
- */
- #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024)
- /*
- * The margin used when comparing CPU capacities.
- * is 'cap1' noticeably greater than 'cap2'
- *
- * (default: ~5%)
- */
- #define capacity_greater(cap1, cap2) ((cap1) * 1024 > (cap2) * 1078)
- #endif
- #ifdef CONFIG_CFS_BANDWIDTH
- /*
- * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
- * each time a cfs_rq requests quota.
- *
- * Note: in the case that the slice exceeds the runtime remaining (either due
- * to consumption or the quota being specified to be smaller than the slice)
- * we will always only issue the remaining available time.
- *
- * (default: 5 msec, units: microseconds)
- */
- static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
- #endif
- #ifdef CONFIG_SYSCTL
- static struct ctl_table sched_fair_sysctls[] = {
- {
- .procname = "sched_child_runs_first",
- .data = &sysctl_sched_child_runs_first,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- #ifdef CONFIG_CFS_BANDWIDTH
- {
- .procname = "sched_cfs_bandwidth_slice_us",
- .data = &sysctl_sched_cfs_bandwidth_slice,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ONE,
- },
- #endif
- {}
- };
- static int __init sched_fair_sysctl_init(void)
- {
- register_sysctl_init("kernel", sched_fair_sysctls);
- return 0;
- }
- late_initcall(sched_fair_sysctl_init);
- #endif
- static inline void update_load_add(struct load_weight *lw, unsigned long inc)
- {
- lw->weight += inc;
- lw->inv_weight = 0;
- }
- static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
- {
- lw->weight -= dec;
- lw->inv_weight = 0;
- }
- static inline void update_load_set(struct load_weight *lw, unsigned long w)
- {
- lw->weight = w;
- lw->inv_weight = 0;
- }
- /*
- * Increase the granularity value when there are more CPUs,
- * because with more CPUs the 'effective latency' as visible
- * to users decreases. But the relationship is not linear,
- * so pick a second-best guess by going with the log2 of the
- * number of CPUs.
- *
- * This idea comes from the SD scheduler of Con Kolivas:
- */
- static unsigned int get_update_sysctl_factor(void)
- {
- unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
- unsigned int factor;
- switch (sysctl_sched_tunable_scaling) {
- case SCHED_TUNABLESCALING_NONE:
- factor = 1;
- break;
- case SCHED_TUNABLESCALING_LINEAR:
- factor = cpus;
- break;
- case SCHED_TUNABLESCALING_LOG:
- default:
- factor = 1 + ilog2(cpus);
- break;
- }
- return factor;
- }
- static void update_sysctl(void)
- {
- unsigned int factor = get_update_sysctl_factor();
- #define SET_SYSCTL(name) \
- (sysctl_##name = (factor) * normalized_sysctl_##name)
- SET_SYSCTL(sched_min_granularity);
- SET_SYSCTL(sched_latency);
- SET_SYSCTL(sched_wakeup_granularity);
- #undef SET_SYSCTL
- }
- void __init sched_init_granularity(void)
- {
- update_sysctl();
- }
- #define WMULT_CONST (~0U)
- #define WMULT_SHIFT 32
- static void __update_inv_weight(struct load_weight *lw)
- {
- unsigned long w;
- if (likely(lw->inv_weight))
- return;
- w = scale_load_down(lw->weight);
- if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
- lw->inv_weight = 1;
- else if (unlikely(!w))
- lw->inv_weight = WMULT_CONST;
- else
- lw->inv_weight = WMULT_CONST / w;
- }
- /*
- * delta_exec * weight / lw.weight
- * OR
- * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
- *
- * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
- * we're guaranteed shift stays positive because inv_weight is guaranteed to
- * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
- *
- * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
- * weight/lw.weight <= 1, and therefore our shift will also be positive.
- */
- static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
- {
- u64 fact = scale_load_down(weight);
- u32 fact_hi = (u32)(fact >> 32);
- int shift = WMULT_SHIFT;
- int fs;
- __update_inv_weight(lw);
- if (unlikely(fact_hi)) {
- fs = fls(fact_hi);
- shift -= fs;
- fact >>= fs;
- }
- fact = mul_u32_u32(fact, lw->inv_weight);
- fact_hi = (u32)(fact >> 32);
- if (fact_hi) {
- fs = fls(fact_hi);
- shift -= fs;
- fact >>= fs;
- }
- return mul_u64_u32_shr(delta_exec, fact, shift);
- }
- const struct sched_class fair_sched_class;
- /**************************************************************
- * CFS operations on generic schedulable entities:
- */
- #ifdef CONFIG_FAIR_GROUP_SCHED
- /* Walk up scheduling entities hierarchy */
- #define for_each_sched_entity(se) \
- for (; se; se = se->parent)
- static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- struct rq *rq = rq_of(cfs_rq);
- int cpu = cpu_of(rq);
- if (cfs_rq->on_list)
- return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list;
- cfs_rq->on_list = 1;
- /*
- * Ensure we either appear before our parent (if already
- * enqueued) or force our parent to appear after us when it is
- * enqueued. The fact that we always enqueue bottom-up
- * reduces this to two cases and a special case for the root
- * cfs_rq. Furthermore, it also means that we will always reset
- * tmp_alone_branch either when the branch is connected
- * to a tree or when we reach the top of the tree
- */
- if (cfs_rq->tg->parent &&
- cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
- /*
- * If parent is already on the list, we add the child
- * just before. Thanks to circular linked property of
- * the list, this means to put the child at the tail
- * of the list that starts by parent.
- */
- list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
- /*
- * The branch is now connected to its tree so we can
- * reset tmp_alone_branch to the beginning of the
- * list.
- */
- rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
- return true;
- }
- if (!cfs_rq->tg->parent) {
- /*
- * cfs rq without parent should be put
- * at the tail of the list.
- */
- list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq->leaf_cfs_rq_list);
- /*
- * We have reach the top of a tree so we can reset
- * tmp_alone_branch to the beginning of the list.
- */
- rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
- return true;
- }
- /*
- * The parent has not already been added so we want to
- * make sure that it will be put after us.
- * tmp_alone_branch points to the begin of the branch
- * where we will add parent.
- */
- list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
- /*
- * update tmp_alone_branch to points to the new begin
- * of the branch
- */
- rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
- return false;
- }
- static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- if (cfs_rq->on_list) {
- struct rq *rq = rq_of(cfs_rq);
- /*
- * With cfs_rq being unthrottled/throttled during an enqueue,
- * it can happen the tmp_alone_branch points the a leaf that
- * we finally want to del. In this case, tmp_alone_branch moves
- * to the prev element but it will point to rq->leaf_cfs_rq_list
- * at the end of the enqueue.
- */
- if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list)
- rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev;
- list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
- cfs_rq->on_list = 0;
- }
- }
- static inline void assert_list_leaf_cfs_rq(struct rq *rq)
- {
- SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
- }
- /* Iterate thr' all leaf cfs_rq's on a runqueue */
- #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
- list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
- leaf_cfs_rq_list)
- /* Do the two (enqueued) entities belong to the same group ? */
- static inline struct cfs_rq *
- is_same_group(struct sched_entity *se, struct sched_entity *pse)
- {
- if (se->cfs_rq == pse->cfs_rq)
- return se->cfs_rq;
- return NULL;
- }
- static inline struct sched_entity *parent_entity(struct sched_entity *se)
- {
- return se->parent;
- }
- static void
- find_matching_se(struct sched_entity **se, struct sched_entity **pse)
- {
- int se_depth, pse_depth;
- /*
- * preemption test can be made between sibling entities who are in the
- * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
- * both tasks until we find their ancestors who are siblings of common
- * parent.
- */
- /* First walk up until both entities are at same depth */
- se_depth = (*se)->depth;
- pse_depth = (*pse)->depth;
- while (se_depth > pse_depth) {
- se_depth--;
- *se = parent_entity(*se);
- }
- while (pse_depth > se_depth) {
- pse_depth--;
- *pse = parent_entity(*pse);
- }
- while (!is_same_group(*se, *pse)) {
- *se = parent_entity(*se);
- *pse = parent_entity(*pse);
- }
- }
- static int tg_is_idle(struct task_group *tg)
- {
- return tg->idle > 0;
- }
- static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
- {
- return cfs_rq->idle > 0;
- }
- static int se_is_idle(struct sched_entity *se)
- {
- if (entity_is_task(se))
- return task_has_idle_policy(task_of(se));
- return cfs_rq_is_idle(group_cfs_rq(se));
- }
- #else /* !CONFIG_FAIR_GROUP_SCHED */
- #define for_each_sched_entity(se) \
- for (; se; se = NULL)
- static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- return true;
- }
- static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
- {
- }
- static inline void assert_list_leaf_cfs_rq(struct rq *rq)
- {
- }
- #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
- for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
- static inline struct sched_entity *parent_entity(struct sched_entity *se)
- {
- return NULL;
- }
- static inline void
- find_matching_se(struct sched_entity **se, struct sched_entity **pse)
- {
- }
- static inline int tg_is_idle(struct task_group *tg)
- {
- return 0;
- }
- static int cfs_rq_is_idle(struct cfs_rq *cfs_rq)
- {
- return 0;
- }
- static int se_is_idle(struct sched_entity *se)
- {
- return 0;
- }
- #endif /* CONFIG_FAIR_GROUP_SCHED */
- static __always_inline
- void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
- /**************************************************************
- * Scheduling class tree data structure manipulation methods:
- */
- static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
- {
- s64 delta = (s64)(vruntime - max_vruntime);
- if (delta > 0)
- max_vruntime = vruntime;
- return max_vruntime;
- }
- static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
- {
- s64 delta = (s64)(vruntime - min_vruntime);
- if (delta < 0)
- min_vruntime = vruntime;
- return min_vruntime;
- }
- static inline bool entity_before(struct sched_entity *a,
- struct sched_entity *b)
- {
- return (s64)(a->vruntime - b->vruntime) < 0;
- }
- #define __node_2_se(node) \
- rb_entry((node), struct sched_entity, run_node)
- static void update_min_vruntime(struct cfs_rq *cfs_rq)
- {
- struct sched_entity *curr = cfs_rq->curr;
- struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
- u64 vruntime = cfs_rq->min_vruntime;
- if (curr) {
- if (curr->on_rq)
- vruntime = curr->vruntime;
- else
- curr = NULL;
- }
- if (leftmost) { /* non-empty tree */
- struct sched_entity *se = __node_2_se(leftmost);
- if (!curr)
- vruntime = se->vruntime;
- else
- vruntime = min_vruntime(vruntime, se->vruntime);
- }
- /* ensure we never gain time by being placed backwards. */
- u64_u32_store(cfs_rq->min_vruntime,
- max_vruntime(cfs_rq->min_vruntime, vruntime));
- }
- static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
- {
- return entity_before(__node_2_se(a), __node_2_se(b));
- }
- /*
- * Enqueue an entity into the rb-tree:
- */
- static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- trace_android_rvh_enqueue_entity(cfs_rq, se);
- rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
- }
- static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- trace_android_rvh_dequeue_entity(cfs_rq, se);
- rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
- }
- struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
- {
- struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
- if (!left)
- return NULL;
- return __node_2_se(left);
- }
- static struct sched_entity *__pick_next_entity(struct sched_entity *se)
- {
- struct rb_node *next = rb_next(&se->run_node);
- if (!next)
- return NULL;
- return __node_2_se(next);
- }
- #ifdef CONFIG_SCHED_DEBUG
- struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
- {
- struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
- if (!last)
- return NULL;
- return __node_2_se(last);
- }
- /**************************************************************
- * Scheduling class statistics methods:
- */
- int sched_update_scaling(void)
- {
- unsigned int factor = get_update_sysctl_factor();
- sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
- sysctl_sched_min_granularity);
- #define WRT_SYSCTL(name) \
- (normalized_sysctl_##name = sysctl_##name / (factor))
- WRT_SYSCTL(sched_min_granularity);
- WRT_SYSCTL(sched_latency);
- WRT_SYSCTL(sched_wakeup_granularity);
- #undef WRT_SYSCTL
- return 0;
- }
- #endif
- /*
- * delta /= w
- */
- static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
- {
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
- return delta;
- }
- /*
- * The idea is to set a period in which each task runs once.
- *
- * When there are too many tasks (sched_nr_latency) we have to stretch
- * this period because otherwise the slices get too small.
- *
- * p = (nr <= nl) ? l : l*nr/nl
- */
- static u64 __sched_period(unsigned long nr_running)
- {
- if (unlikely(nr_running > sched_nr_latency))
- return nr_running * sysctl_sched_min_granularity;
- else
- return sysctl_sched_latency;
- }
- static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq);
- /*
- * We calculate the wall-time slice from the period by taking a part
- * proportional to the weight.
- *
- * s = p*P[w/rw]
- */
- static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- unsigned int nr_running = cfs_rq->nr_running;
- struct sched_entity *init_se = se;
- unsigned int min_gran;
- u64 slice;
- if (sched_feat(ALT_PERIOD))
- nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
- slice = __sched_period(nr_running + !se->on_rq);
- for_each_sched_entity(se) {
- struct load_weight *load;
- struct load_weight lw;
- struct cfs_rq *qcfs_rq;
- qcfs_rq = cfs_rq_of(se);
- load = &qcfs_rq->load;
- if (unlikely(!se->on_rq)) {
- lw = qcfs_rq->load;
- update_load_add(&lw, se->load.weight);
- load = &lw;
- }
- slice = __calc_delta(slice, se->load.weight, load);
- }
- if (sched_feat(BASE_SLICE)) {
- if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq))
- min_gran = sysctl_sched_idle_min_granularity;
- else
- min_gran = sysctl_sched_min_granularity;
- slice = max_t(u64, slice, min_gran);
- }
- return slice;
- }
- /*
- * We calculate the vruntime slice of a to-be-inserted task.
- *
- * vs = s/w
- */
- static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- return calc_delta_fair(sched_slice(cfs_rq, se), se);
- }
- #include "pelt.h"
- #ifdef CONFIG_SMP
- static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
- static unsigned long task_h_load(struct task_struct *p);
- static unsigned long capacity_of(int cpu);
- /* Give new sched_entity start runnable values to heavy its load in infant time */
- void init_entity_runnable_average(struct sched_entity *se)
- {
- struct sched_avg *sa = &se->avg;
- memset(sa, 0, sizeof(*sa));
- /*
- * Tasks are initialized with full load to be seen as heavy tasks until
- * they get a chance to stabilize to their real load level.
- * Group entities are initialized with zero load to reflect the fact that
- * nothing has been attached to the task group yet.
- */
- if (entity_is_task(se))
- sa->load_avg = scale_load_down(se->load.weight);
- /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
- }
- /*
- * With new tasks being created, their initial util_avgs are extrapolated
- * based on the cfs_rq's current util_avg:
- *
- * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
- *
- * However, in many cases, the above util_avg does not give a desired
- * value. Moreover, the sum of the util_avgs may be divergent, such
- * as when the series is a harmonic series.
- *
- * To solve this problem, we also cap the util_avg of successive tasks to
- * only 1/2 of the left utilization budget:
- *
- * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
- *
- * where n denotes the nth task and cpu_scale the CPU capacity.
- *
- * For example, for a CPU with 1024 of capacity, a simplest series from
- * the beginning would be like:
- *
- * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
- * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
- *
- * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
- * if util_avg > util_avg_cap.
- */
- void post_init_entity_util_avg(struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- struct sched_avg *sa = &se->avg;
- long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
- long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
- if (p->sched_class != &fair_sched_class) {
- /*
- * For !fair tasks do:
- *
- update_cfs_rq_load_avg(now, cfs_rq);
- attach_entity_load_avg(cfs_rq, se);
- switched_from_fair(rq, p);
- *
- * such that the next switched_to_fair() has the
- * expected state.
- */
- se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
- return;
- }
- if (cap > 0) {
- if (cfs_rq->avg.util_avg != 0) {
- sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
- sa->util_avg /= (cfs_rq->avg.load_avg + 1);
- if (sa->util_avg > cap)
- sa->util_avg = cap;
- } else {
- sa->util_avg = cap;
- }
- }
- sa->runnable_avg = sa->util_avg;
- /* Hook before this se's util is attached to cfs_rq's util */
- trace_android_rvh_post_init_entity_util_avg(se);
- }
- #else /* !CONFIG_SMP */
- void init_entity_runnable_average(struct sched_entity *se)
- {
- }
- void post_init_entity_util_avg(struct task_struct *p)
- {
- }
- static void update_tg_load_avg(struct cfs_rq *cfs_rq)
- {
- }
- #endif /* CONFIG_SMP */
- /*
- * Update the current task's runtime statistics.
- */
- static void update_curr(struct cfs_rq *cfs_rq)
- {
- struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_clock_task(rq_of(cfs_rq));
- u64 delta_exec;
- if (unlikely(!curr))
- return;
- delta_exec = now - curr->exec_start;
- if (unlikely((s64)delta_exec <= 0))
- return;
- curr->exec_start = now;
- if (schedstat_enabled()) {
- struct sched_statistics *stats;
- stats = __schedstats_from_se(curr);
- __schedstat_set(stats->exec_max,
- max(delta_exec, stats->exec_max));
- }
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq->exec_clock, delta_exec);
- curr->vruntime += calc_delta_fair(delta_exec, curr);
- update_min_vruntime(cfs_rq);
- if (entity_is_task(curr)) {
- struct task_struct *curtask = task_of(curr);
- trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
- cgroup_account_cputime(curtask, delta_exec);
- account_group_exec_runtime(curtask, delta_exec);
- }
- account_cfs_rq_runtime(cfs_rq, delta_exec);
- }
- static void update_curr_fair(struct rq *rq)
- {
- update_curr(cfs_rq_of(&rq->curr->se));
- }
- static inline void
- update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- struct sched_statistics *stats;
- struct task_struct *p = NULL;
- if (!schedstat_enabled())
- return;
- stats = __schedstats_from_se(se);
- if (entity_is_task(se))
- p = task_of(se);
- __update_stats_wait_start(rq_of(cfs_rq), p, stats);
- }
- static inline void
- update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- struct sched_statistics *stats;
- struct task_struct *p = NULL;
- if (!schedstat_enabled())
- return;
- stats = __schedstats_from_se(se);
- /*
- * When the sched_schedstat changes from 0 to 1, some sched se
- * maybe already in the runqueue, the se->statistics.wait_start
- * will be 0.So it will let the delta wrong. We need to avoid this
- * scenario.
- */
- if (unlikely(!schedstat_val(stats->wait_start)))
- return;
- if (entity_is_task(se))
- p = task_of(se);
- __update_stats_wait_end(rq_of(cfs_rq), p, stats);
- }
- static inline void
- update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- struct sched_statistics *stats;
- struct task_struct *tsk = NULL;
- if (!schedstat_enabled())
- return;
- stats = __schedstats_from_se(se);
- if (entity_is_task(se))
- tsk = task_of(se);
- __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats);
- }
- /*
- * Task is being enqueued - update stats:
- */
- static inline void
- update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
- if (!schedstat_enabled())
- return;
- /*
- * Are we enqueueing a waiting task? (for current tasks
- * a dequeue/enqueue event is a NOP)
- */
- if (se != cfs_rq->curr)
- update_stats_wait_start_fair(cfs_rq, se);
- if (flags & ENQUEUE_WAKEUP)
- update_stats_enqueue_sleeper_fair(cfs_rq, se);
- }
- static inline void
- update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
- if (!schedstat_enabled())
- return;
- /*
- * Mark the end of the wait period if dequeueing a
- * waiting task:
- */
- if (se != cfs_rq->curr)
- update_stats_wait_end_fair(cfs_rq, se);
- if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
- struct task_struct *tsk = task_of(se);
- unsigned int state;
- /* XXX racy against TTWU */
- state = READ_ONCE(tsk->__state);
- if (state & TASK_INTERRUPTIBLE)
- __schedstat_set(tsk->stats.sleep_start,
- rq_clock(rq_of(cfs_rq)));
- if (state & TASK_UNINTERRUPTIBLE)
- __schedstat_set(tsk->stats.block_start,
- rq_clock(rq_of(cfs_rq)));
- }
- }
- /*
- * We are picking a new current task - update its stats:
- */
- static inline void
- update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- /*
- * We are starting a new run period:
- */
- se->exec_start = rq_clock_task(rq_of(cfs_rq));
- }
- /**************************************************
- * Scheduling class queueing methods:
- */
- #ifdef CONFIG_NUMA
- #define NUMA_IMBALANCE_MIN 2
- static inline long
- adjust_numa_imbalance(int imbalance, int dst_running, int imb_numa_nr)
- {
- /*
- * Allow a NUMA imbalance if busy CPUs is less than the maximum
- * threshold. Above this threshold, individual tasks may be contending
- * for both memory bandwidth and any shared HT resources. This is an
- * approximation as the number of running tasks may not be related to
- * the number of busy CPUs due to sched_setaffinity.
- */
- if (dst_running > imb_numa_nr)
- return imbalance;
- /*
- * Allow a small imbalance based on a simple pair of communicating
- * tasks that remain local when the destination is lightly loaded.
- */
- if (imbalance <= NUMA_IMBALANCE_MIN)
- return 0;
- return imbalance;
- }
- #endif /* CONFIG_NUMA */
- #ifdef CONFIG_NUMA_BALANCING
- /*
- * Approximate time to scan a full NUMA task in ms. The task scan period is
- * calculated based on the tasks virtual memory size and
- * numa_balancing_scan_size.
- */
- unsigned int sysctl_numa_balancing_scan_period_min = 1000;
- unsigned int sysctl_numa_balancing_scan_period_max = 60000;
- /* Portion of address space to scan in MB */
- unsigned int sysctl_numa_balancing_scan_size = 256;
- /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
- unsigned int sysctl_numa_balancing_scan_delay = 1000;
- /* The page with hint page fault latency < threshold in ms is considered hot */
- unsigned int sysctl_numa_balancing_hot_threshold = MSEC_PER_SEC;
- /* Restrict the NUMA promotion throughput (MB/s) for each target node. */
- unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
- struct numa_group {
- refcount_t refcount;
- spinlock_t lock; /* nr_tasks, tasks */
- int nr_tasks;
- pid_t gid;
- int active_nodes;
- struct rcu_head rcu;
- unsigned long total_faults;
- unsigned long max_faults_cpu;
- /*
- * faults[] array is split into two regions: faults_mem and faults_cpu.
- *
- * Faults_cpu is used to decide whether memory should move
- * towards the CPU. As a consequence, these stats are weighted
- * more by CPU use than by memory faults.
- */
- unsigned long faults[];
- };
- /*
- * For functions that can be called in multiple contexts that permit reading
- * ->numa_group (see struct task_struct for locking rules).
- */
- static struct numa_group *deref_task_numa_group(struct task_struct *p)
- {
- return rcu_dereference_check(p->numa_group, p == current ||
- (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu)));
- }
- static struct numa_group *deref_curr_numa_group(struct task_struct *p)
- {
- return rcu_dereference_protected(p->numa_group, p == current);
- }
- static inline unsigned long group_faults_priv(struct numa_group *ng);
- static inline unsigned long group_faults_shared(struct numa_group *ng);
- static unsigned int task_nr_scan_windows(struct task_struct *p)
- {
- unsigned long rss = 0;
- unsigned long nr_scan_pages;
- /*
- * Calculations based on RSS as non-present and empty pages are skipped
- * by the PTE scanner and NUMA hinting faults should be trapped based
- * on resident pages
- */
- nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
- rss = get_mm_rss(p->mm);
- if (!rss)
- rss = nr_scan_pages;
- rss = round_up(rss, nr_scan_pages);
- return rss / nr_scan_pages;
- }
- /* For sanity's sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
- #define MAX_SCAN_WINDOW 2560
- static unsigned int task_scan_min(struct task_struct *p)
- {
- unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
- unsigned int scan, floor;
- unsigned int windows = 1;
- if (scan_size < MAX_SCAN_WINDOW)
- windows = MAX_SCAN_WINDOW / scan_size;
- floor = 1000 / windows;
- scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
- return max_t(unsigned int, floor, scan);
- }
- static unsigned int task_scan_start(struct task_struct *p)
- {
- unsigned long smin = task_scan_min(p);
- unsigned long period = smin;
- struct numa_group *ng;
- /* Scale the maximum scan period with the amount of shared memory. */
- rcu_read_lock();
- ng = rcu_dereference(p->numa_group);
- if (ng) {
- unsigned long shared = group_faults_shared(ng);
- unsigned long private = group_faults_priv(ng);
- period *= refcount_read(&ng->refcount);
- period *= shared + 1;
- period /= private + shared + 1;
- }
- rcu_read_unlock();
- return max(smin, period);
- }
- static unsigned int task_scan_max(struct task_struct *p)
- {
- unsigned long smin = task_scan_min(p);
- unsigned long smax;
- struct numa_group *ng;
- /* Watch for min being lower than max due to floor calculations */
- smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
- /* Scale the maximum scan period with the amount of shared memory. */
- ng = deref_curr_numa_group(p);
- if (ng) {
- unsigned long shared = group_faults_shared(ng);
- unsigned long private = group_faults_priv(ng);
- unsigned long period = smax;
- period *= refcount_read(&ng->refcount);
- period *= shared + 1;
- period /= private + shared + 1;
- smax = max(smax, period);
- }
- return max(smin, smax);
- }
- static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
- {
- rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE);
- rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
- }
- static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
- {
- rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE);
- rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
- }
- /* Shared or private faults. */
- #define NR_NUMA_HINT_FAULT_TYPES 2
- /* Memory and CPU locality */
- #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
- /* Averaged statistics, and temporary buffers. */
- #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
- pid_t task_numa_group_id(struct task_struct *p)
- {
- struct numa_group *ng;
- pid_t gid = 0;
- rcu_read_lock();
- ng = rcu_dereference(p->numa_group);
- if (ng)
- gid = ng->gid;
- rcu_read_unlock();
- return gid;
- }
- /*
- * The averaged statistics, shared & private, memory & CPU,
- * occupy the first half of the array. The second half of the
- * array is for current counters, which are averaged into the
- * first set by task_numa_placement.
- */
- static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
- {
- return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
- }
- static inline unsigned long task_faults(struct task_struct *p, int nid)
- {
- if (!p->numa_faults)
- return 0;
- return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
- p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
- }
- static inline unsigned long group_faults(struct task_struct *p, int nid)
- {
- struct numa_group *ng = deref_task_numa_group(p);
- if (!ng)
- return 0;
- return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
- ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
- }
- static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
- {
- return group->faults[task_faults_idx(NUMA_CPU, nid, 0)] +
- group->faults[task_faults_idx(NUMA_CPU, nid, 1)];
- }
- static inline unsigned long group_faults_priv(struct numa_group *ng)
- {
- unsigned long faults = 0;
- int node;
- for_each_online_node(node) {
- faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
- }
- return faults;
- }
- static inline unsigned long group_faults_shared(struct numa_group *ng)
- {
- unsigned long faults = 0;
- int node;
- for_each_online_node(node) {
- faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
- }
- return faults;
- }
- /*
- * A node triggering more than 1/3 as many NUMA faults as the maximum is
- * considered part of a numa group's pseudo-interleaving set. Migrations
- * between these nodes are slowed down, to allow things to settle down.
- */
- #define ACTIVE_NODE_FRACTION 3
- static bool numa_is_active_node(int nid, struct numa_group *ng)
- {
- return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
- }
- /* Handle placement on systems where not all nodes are directly connected. */
- static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
- int lim_dist, bool task)
- {
- unsigned long score = 0;
- int node, max_dist;
- /*
- * All nodes are directly connected, and the same distance
- * from each other. No need for fancy placement algorithms.
- */
- if (sched_numa_topology_type == NUMA_DIRECT)
- return 0;
- /* sched_max_numa_distance may be changed in parallel. */
- max_dist = READ_ONCE(sched_max_numa_distance);
- /*
- * This code is called for each node, introducing N^2 complexity,
- * which should be ok given the number of nodes rarely exceeds 8.
- */
- for_each_online_node(node) {
- unsigned long faults;
- int dist = node_distance(nid, node);
- /*
- * The furthest away nodes in the system are not interesting
- * for placement; nid was already counted.
- */
- if (dist >= max_dist || node == nid)
- continue;
- /*
- * On systems with a backplane NUMA topology, compare groups
- * of nodes, and move tasks towards the group with the most
- * memory accesses. When comparing two nodes at distance
- * "hoplimit", only nodes closer by than "hoplimit" are part
- * of each group. Skip other nodes.
- */
- if (sched_numa_topology_type == NUMA_BACKPLANE && dist >= lim_dist)
- continue;
- /* Add up the faults from nearby nodes. */
- if (task)
- faults = task_faults(p, node);
- else
- faults = group_faults(p, node);
- /*
- * On systems with a glueless mesh NUMA topology, there are
- * no fixed "groups of nodes". Instead, nodes that are not
- * directly connected bounce traffic through intermediate
- * nodes; a numa_group can occupy any set of nodes.
- * The further away a node is, the less the faults count.
- * This seems to result in good task placement.
- */
- if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
- faults *= (max_dist - dist);
- faults /= (max_dist - LOCAL_DISTANCE);
- }
- score += faults;
- }
- return score;
- }
- /*
- * These return the fraction of accesses done by a particular task, or
- * task group, on a particular numa node. The group weight is given a
- * larger multiplier, in order to group tasks together that are almost
- * evenly spread out between numa nodes.
- */
- static inline unsigned long task_weight(struct task_struct *p, int nid,
- int dist)
- {
- unsigned long faults, total_faults;
- if (!p->numa_faults)
- return 0;
- total_faults = p->total_numa_faults;
- if (!total_faults)
- return 0;
- faults = task_faults(p, nid);
- faults += score_nearby_nodes(p, nid, dist, true);
- return 1000 * faults / total_faults;
- }
- static inline unsigned long group_weight(struct task_struct *p, int nid,
- int dist)
- {
- struct numa_group *ng = deref_task_numa_group(p);
- unsigned long faults, total_faults;
- if (!ng)
- return 0;
- total_faults = ng->total_faults;
- if (!total_faults)
- return 0;
- faults = group_faults(p, nid);
- faults += score_nearby_nodes(p, nid, dist, false);
- return 1000 * faults / total_faults;
- }
- /*
- * If memory tiering mode is enabled, cpupid of slow memory page is
- * used to record scan time instead of CPU and PID. When tiering mode
- * is disabled at run time, the scan time (in cpupid) will be
- * interpreted as CPU and PID. So CPU needs to be checked to avoid to
- * access out of array bound.
- */
- static inline bool cpupid_valid(int cpupid)
- {
- return cpupid_to_cpu(cpupid) < nr_cpu_ids;
- }
- /*
- * For memory tiering mode, if there are enough free pages (more than
- * enough watermark defined here) in fast memory node, to take full
- * advantage of fast memory capacity, all recently accessed slow
- * memory pages will be migrated to fast memory node without
- * considering hot threshold.
- */
- static bool pgdat_free_space_enough(struct pglist_data *pgdat)
- {
- int z;
- unsigned long enough_wmark;
- enough_wmark = max(1UL * 1024 * 1024 * 1024 >> PAGE_SHIFT,
- pgdat->node_present_pages >> 4);
- for (z = pgdat->nr_zones - 1; z >= 0; z--) {
- struct zone *zone = pgdat->node_zones + z;
- if (!populated_zone(zone))
- continue;
- if (zone_watermark_ok(zone, 0,
- wmark_pages(zone, WMARK_PROMO) + enough_wmark,
- ZONE_MOVABLE, 0))
- return true;
- }
- return false;
- }
- /*
- * For memory tiering mode, when page tables are scanned, the scan
- * time will be recorded in struct page in addition to make page
- * PROT_NONE for slow memory page. So when the page is accessed, in
- * hint page fault handler, the hint page fault latency is calculated
- * via,
- *
- * hint page fault latency = hint page fault time - scan time
- *
- * The smaller the hint page fault latency, the higher the possibility
- * for the page to be hot.
- */
- static int numa_hint_fault_latency(struct page *page)
- {
- int last_time, time;
- time = jiffies_to_msecs(jiffies);
- last_time = xchg_page_access_time(page, time);
- return (time - last_time) & PAGE_ACCESS_TIME_MASK;
- }
- /*
- * For memory tiering mode, too high promotion/demotion throughput may
- * hurt application latency. So we provide a mechanism to rate limit
- * the number of pages that are tried to be promoted.
- */
- static bool numa_promotion_rate_limit(struct pglist_data *pgdat,
- unsigned long rate_limit, int nr)
- {
- unsigned long nr_cand;
- unsigned int now, start;
- now = jiffies_to_msecs(jiffies);
- mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr);
- nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
- start = pgdat->nbp_rl_start;
- if (now - start > MSEC_PER_SEC &&
- cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
- pgdat->nbp_rl_nr_cand = nr_cand;
- if (nr_cand - pgdat->nbp_rl_nr_cand >= rate_limit)
- return true;
- return false;
- }
- #define NUMA_MIGRATION_ADJUST_STEPS 16
- static void numa_promotion_adjust_threshold(struct pglist_data *pgdat,
- unsigned long rate_limit,
- unsigned int ref_th)
- {
- unsigned int now, start, th_period, unit_th, th;
- unsigned long nr_cand, ref_cand, diff_cand;
- now = jiffies_to_msecs(jiffies);
- th_period = sysctl_numa_balancing_scan_period_max;
- start = pgdat->nbp_th_start;
- if (now - start > th_period &&
- cmpxchg(&pgdat->nbp_th_start, start, now) == start) {
- ref_cand = rate_limit *
- sysctl_numa_balancing_scan_period_max / MSEC_PER_SEC;
- nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
- diff_cand = nr_cand - pgdat->nbp_th_nr_cand;
- unit_th = ref_th * 2 / NUMA_MIGRATION_ADJUST_STEPS;
- th = pgdat->nbp_threshold ? : ref_th;
- if (diff_cand > ref_cand * 11 / 10)
- th = max(th - unit_th, unit_th);
- else if (diff_cand < ref_cand * 9 / 10)
- th = min(th + unit_th, ref_th * 2);
- pgdat->nbp_th_nr_cand = nr_cand;
- pgdat->nbp_threshold = th;
- }
- }
- bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
- int src_nid, int dst_cpu)
- {
- struct numa_group *ng = deref_curr_numa_group(p);
- int dst_nid = cpu_to_node(dst_cpu);
- int last_cpupid, this_cpupid;
- /*
- * The pages in slow memory node should be migrated according
- * to hot/cold instead of private/shared.
- */
- if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
- !node_is_toptier(src_nid)) {
- struct pglist_data *pgdat;
- unsigned long rate_limit;
- unsigned int latency, th, def_th;
- pgdat = NODE_DATA(dst_nid);
- if (pgdat_free_space_enough(pgdat)) {
- /* workload changed, reset hot threshold */
- pgdat->nbp_threshold = 0;
- return true;
- }
- def_th = sysctl_numa_balancing_hot_threshold;
- rate_limit = sysctl_numa_balancing_promote_rate_limit << \
- (20 - PAGE_SHIFT);
- numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
- th = pgdat->nbp_threshold ? : def_th;
- latency = numa_hint_fault_latency(page);
- if (latency >= th)
- return false;
- return !numa_promotion_rate_limit(pgdat, rate_limit,
- thp_nr_pages(page));
- }
- this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
- last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
- if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
- !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid))
- return false;
- /*
- * Allow first faults or private faults to migrate immediately early in
- * the lifetime of a task. The magic number 4 is based on waiting for
- * two full passes of the "multi-stage node selection" test that is
- * executed below.
- */
- if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) &&
- (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
- return true;
- /*
- * Multi-stage node selection is used in conjunction with a periodic
- * migration fault to build a temporal task<->page relation. By using
- * a two-stage filter we remove short/unlikely relations.
- *
- * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
- * a task's usage of a particular page (n_p) per total usage of this
- * page (n_t) (in a given time-span) to a probability.
- *
- * Our periodic faults will sample this probability and getting the
- * same result twice in a row, given these samples are fully
- * independent, is then given by P(n)^2, provided our sample period
- * is sufficiently short compared to the usage pattern.
- *
- * This quadric squishes small probabilities, making it less likely we
- * act on an unlikely task<->page relation.
- */
- if (!cpupid_pid_unset(last_cpupid) &&
- cpupid_to_nid(last_cpupid) != dst_nid)
- return false;
- /* Always allow migrate on private faults */
- if (cpupid_match_pid(p, last_cpupid))
- return true;
- /* A shared fault, but p->numa_group has not been set up yet. */
- if (!ng)
- return true;
- /*
- * Destination node is much more heavily used than the source
- * node? Allow migration.
- */
- if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
- ACTIVE_NODE_FRACTION)
- return true;
- /*
- * Distribute memory according to CPU & memory use on each node,
- * with 3/4 hysteresis to avoid unnecessary memory migrations:
- *
- * faults_cpu(dst) 3 faults_cpu(src)
- * --------------- * - > ---------------
- * faults_mem(dst) 4 faults_mem(src)
- */
- return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
- group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
- }
- /*
- * 'numa_type' describes the node at the moment of load balancing.
- */
- enum numa_type {
- /* The node has spare capacity that can be used to run more tasks. */
- node_has_spare = 0,
- /*
- * The node is fully used and the tasks don't compete for more CPU
- * cycles. Nevertheless, some tasks might wait before running.
- */
- node_fully_busy,
- /*
- * The node is overloaded and can't provide expected CPU cycles to all
- * tasks.
- */
- node_overloaded
- };
- /* Cached statistics for all CPUs within a node */
- struct numa_stats {
- unsigned long load;
- unsigned long runnable;
- unsigned long util;
- /* Total compute capacity of CPUs on a node */
- unsigned long compute_capacity;
- unsigned int nr_running;
- unsigned int weight;
- enum numa_type node_type;
- int idle_cpu;
- };
- static inline bool is_core_idle(int cpu)
- {
- #ifdef CONFIG_SCHED_SMT
- int sibling;
- for_each_cpu(sibling, cpu_smt_mask(cpu)) {
- if (cpu == sibling)
- continue;
- if (!idle_cpu(sibling))
- return false;
- }
- #endif
- return true;
- }
- struct task_numa_env {
- struct task_struct *p;
- int src_cpu, src_nid;
- int dst_cpu, dst_nid;
- int imb_numa_nr;
- struct numa_stats src_stats, dst_stats;
- int imbalance_pct;
- int dist;
- struct task_struct *best_task;
- long best_imp;
- int best_cpu;
- };
- static unsigned long cpu_load(struct rq *rq);
- static unsigned long cpu_runnable(struct rq *rq);
- static inline enum
- numa_type numa_classify(unsigned int imbalance_pct,
- struct numa_stats *ns)
- {
- if ((ns->nr_running > ns->weight) &&
- (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) ||
- ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100))))
- return node_overloaded;
- if ((ns->nr_running < ns->weight) ||
- (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) &&
- ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100))))
- return node_has_spare;
- return node_fully_busy;
- }
- #ifdef CONFIG_SCHED_SMT
- /* Forward declarations of select_idle_sibling helpers */
- static inline bool test_idle_cores(int cpu);
- static inline int numa_idle_core(int idle_core, int cpu)
- {
- if (!static_branch_likely(&sched_smt_present) ||
- idle_core >= 0 || !test_idle_cores(cpu))
- return idle_core;
- /*
- * Prefer cores instead of packing HT siblings
- * and triggering future load balancing.
- */
- if (is_core_idle(cpu))
- idle_core = cpu;
- return idle_core;
- }
- #else
- static inline int numa_idle_core(int idle_core, int cpu)
- {
- return idle_core;
- }
- #endif
- /*
- * Gather all necessary information to make NUMA balancing placement
- * decisions that are compatible with standard load balancer. This
- * borrows code and logic from update_sg_lb_stats but sharing a
- * common implementation is impractical.
- */
- static void update_numa_stats(struct task_numa_env *env,
- struct numa_stats *ns, int nid,
- bool find_idle)
- {
- int cpu, idle_core = -1;
- memset(ns, 0, sizeof(*ns));
- ns->idle_cpu = -1;
- rcu_read_lock();
- for_each_cpu(cpu, cpumask_of_node(nid)) {
- struct rq *rq = cpu_rq(cpu);
- ns->load += cpu_load(rq);
- ns->runnable += cpu_runnable(rq);
- ns->util += cpu_util_cfs(cpu);
- ns->nr_running += rq->cfs.h_nr_running;
- ns->compute_capacity += capacity_of(cpu);
- if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
- if (READ_ONCE(rq->numa_migrate_on) ||
- !cpumask_test_cpu(cpu, env->p->cpus_ptr))
- continue;
- if (ns->idle_cpu == -1)
- ns->idle_cpu = cpu;
- idle_core = numa_idle_core(idle_core, cpu);
- }
- }
- rcu_read_unlock();
- ns->weight = cpumask_weight(cpumask_of_node(nid));
- ns->node_type = numa_classify(env->imbalance_pct, ns);
- if (idle_core >= 0)
- ns->idle_cpu = idle_core;
- }
- static void task_numa_assign(struct task_numa_env *env,
- struct task_struct *p, long imp)
- {
- struct rq *rq = cpu_rq(env->dst_cpu);
- /* Check if run-queue part of active NUMA balance. */
- if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) {
- int cpu;
- int start = env->dst_cpu;
- /* Find alternative idle CPU. */
- for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) {
- if (cpu == env->best_cpu || !idle_cpu(cpu) ||
- !cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
- continue;
- }
- env->dst_cpu = cpu;
- rq = cpu_rq(env->dst_cpu);
- if (!xchg(&rq->numa_migrate_on, 1))
- goto assign;
- }
- /* Failed to find an alternative idle CPU */
- return;
- }
- assign:
- /*
- * Clear previous best_cpu/rq numa-migrate flag, since task now
- * found a better CPU to move/swap.
- */
- if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) {
- rq = cpu_rq(env->best_cpu);
- WRITE_ONCE(rq->numa_migrate_on, 0);
- }
- if (env->best_task)
- put_task_struct(env->best_task);
- if (p)
- get_task_struct(p);
- env->best_task = p;
- env->best_imp = imp;
- env->best_cpu = env->dst_cpu;
- }
- static bool load_too_imbalanced(long src_load, long dst_load,
- struct task_numa_env *env)
- {
- long imb, old_imb;
- long orig_src_load, orig_dst_load;
- long src_capacity, dst_capacity;
- /*
- * The load is corrected for the CPU capacity available on each node.
- *
- * src_load dst_load
- * ------------ vs ---------
- * src_capacity dst_capacity
- */
- src_capacity = env->src_stats.compute_capacity;
- dst_capacity = env->dst_stats.compute_capacity;
- imb = abs(dst_load * src_capacity - src_load * dst_capacity);
- orig_src_load = env->src_stats.load;
- orig_dst_load = env->dst_stats.load;
- old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
- /* Would this change make things worse? */
- return (imb > old_imb);
- }
- /*
- * Maximum NUMA importance can be 1998 (2*999);
- * SMALLIMP @ 30 would be close to 1998/64.
- * Used to deter task migration.
- */
- #define SMALLIMP 30
- /*
- * This checks if the overall compute and NUMA accesses of the system would
- * be improved if the source tasks was migrated to the target dst_cpu taking
- * into account that it might be best if task running on the dst_cpu should
- * be exchanged with the source task
- */
- static bool task_numa_compare(struct task_numa_env *env,
- long taskimp, long groupimp, bool maymove)
- {
- struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
- struct rq *dst_rq = cpu_rq(env->dst_cpu);
- long imp = p_ng ? groupimp : taskimp;
- struct task_struct *cur;
- long src_load, dst_load;
- int dist = env->dist;
- long moveimp = imp;
- long load;
- bool stopsearch = false;
- if (READ_ONCE(dst_rq->numa_migrate_on))
- return false;
- rcu_read_lock();
- cur = rcu_dereference(dst_rq->curr);
- if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
- cur = NULL;
- /*
- * Because we have preemption enabled we can get migrated around and
- * end try selecting ourselves (current == env->p) as a swap candidate.
- */
- if (cur == env->p) {
- stopsearch = true;
- goto unlock;
- }
- if (!cur) {
- if (maymove && moveimp >= env->best_imp)
- goto assign;
- else
- goto unlock;
- }
- /* Skip this swap candidate if cannot move to the source cpu. */
- if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
- goto unlock;
- /*
- * Skip this swap candidate if it is not moving to its preferred
- * node and the best task is.
- */
- if (env->best_task &&
- env->best_task->numa_preferred_nid == env->src_nid &&
- cur->numa_preferred_nid != env->src_nid) {
- goto unlock;
- }
- /*
- * "imp" is the fault differential for the source task between the
- * source and destination node. Calculate the total differential for
- * the source task and potential destination task. The more negative
- * the value is, the more remote accesses that would be expected to
- * be incurred if the tasks were swapped.
- *
- * If dst and source tasks are in the same NUMA group, or not
- * in any group then look only at task weights.
- */
- cur_ng = rcu_dereference(cur->numa_group);
- if (cur_ng == p_ng) {
- /*
- * Do not swap within a group or between tasks that have
- * no group if there is spare capacity. Swapping does
- * not address the load imbalance and helps one task at
- * the cost of punishing another.
- */
- if (env->dst_stats.node_type == node_has_spare)
- goto unlock;
- imp = taskimp + task_weight(cur, env->src_nid, dist) -
- task_weight(cur, env->dst_nid, dist);
- /*
- * Add some hysteresis to prevent swapping the
- * tasks within a group over tiny differences.
- */
- if (cur_ng)
- imp -= imp / 16;
- } else {
- /*
- * Compare the group weights. If a task is all by itself
- * (not part of a group), use the task weight instead.
- */
- if (cur_ng && p_ng)
- imp += group_weight(cur, env->src_nid, dist) -
- group_weight(cur, env->dst_nid, dist);
- else
- imp += task_weight(cur, env->src_nid, dist) -
- task_weight(cur, env->dst_nid, dist);
- }
- /* Discourage picking a task already on its preferred node */
- if (cur->numa_preferred_nid == env->dst_nid)
- imp -= imp / 16;
- /*
- * Encourage picking a task that moves to its preferred node.
- * This potentially makes imp larger than it's maximum of
- * 1998 (see SMALLIMP and task_weight for why) but in this
- * case, it does not matter.
- */
- if (cur->numa_preferred_nid == env->src_nid)
- imp += imp / 8;
- if (maymove && moveimp > imp && moveimp > env->best_imp) {
- imp = moveimp;
- cur = NULL;
- goto assign;
- }
- /*
- * Prefer swapping with a task moving to its preferred node over a
- * task that is not.
- */
- if (env->best_task && cur->numa_preferred_nid == env->src_nid &&
- env->best_task->numa_preferred_nid != env->src_nid) {
- goto assign;
- }
- /*
- * If the NUMA importance is less than SMALLIMP,
- * task migration might only result in ping pong
- * of tasks and also hurt performance due to cache
- * misses.
- */
- if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
- goto unlock;
- /*
- * In the overloaded case, try and keep the load balanced.
- */
- load = task_h_load(env->p) - task_h_load(cur);
- if (!load)
- goto assign;
- dst_load = env->dst_stats.load + load;
- src_load = env->src_stats.load - load;
- if (load_too_imbalanced(src_load, dst_load, env))
- goto unlock;
- assign:
- /* Evaluate an idle CPU for a task numa move. */
- if (!cur) {
- int cpu = env->dst_stats.idle_cpu;
- /* Nothing cached so current CPU went idle since the search. */
- if (cpu < 0)
- cpu = env->dst_cpu;
- /*
- * If the CPU is no longer truly idle and the previous best CPU
- * is, keep using it.
- */
- if (!idle_cpu(cpu) && env->best_cpu >= 0 &&
- idle_cpu(env->best_cpu)) {
- cpu = env->best_cpu;
- }
- env->dst_cpu = cpu;
- }
- task_numa_assign(env, cur, imp);
- /*
- * If a move to idle is allowed because there is capacity or load
- * balance improves then stop the search. While a better swap
- * candidate may exist, a search is not free.
- */
- if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu))
- stopsearch = true;
- /*
- * If a swap candidate must be identified and the current best task
- * moves its preferred node then stop the search.
- */
- if (!maymove && env->best_task &&
- env->best_task->numa_preferred_nid == env->src_nid) {
- stopsearch = true;
- }
- unlock:
- rcu_read_unlock();
- return stopsearch;
- }
- static void task_numa_find_cpu(struct task_numa_env *env,
- long taskimp, long groupimp)
- {
- bool maymove = false;
- int cpu;
- /*
- * If dst node has spare capacity, then check if there is an
- * imbalance that would be overruled by the load balancer.
- */
- if (env->dst_stats.node_type == node_has_spare) {
- unsigned int imbalance;
- int src_running, dst_running;
- /*
- * Would movement cause an imbalance? Note that if src has
- * more running tasks that the imbalance is ignored as the
- * move improves the imbalance from the perspective of the
- * CPU load balancer.
- * */
- src_running = env->src_stats.nr_running - 1;
- dst_running = env->dst_stats.nr_running + 1;
- imbalance = max(0, dst_running - src_running);
- imbalance = adjust_numa_imbalance(imbalance, dst_running,
- env->imb_numa_nr);
- /* Use idle CPU if there is no imbalance */
- if (!imbalance) {
- maymove = true;
- if (env->dst_stats.idle_cpu >= 0) {
- env->dst_cpu = env->dst_stats.idle_cpu;
- task_numa_assign(env, NULL, 0);
- return;
- }
- }
- } else {
- long src_load, dst_load, load;
- /*
- * If the improvement from just moving env->p direction is better
- * than swapping tasks around, check if a move is possible.
- */
- load = task_h_load(env->p);
- dst_load = env->dst_stats.load + load;
- src_load = env->src_stats.load - load;
- maymove = !load_too_imbalanced(src_load, dst_load, env);
- }
- for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
- /* Skip this CPU if the source task cannot migrate */
- if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
- continue;
- env->dst_cpu = cpu;
- if (task_numa_compare(env, taskimp, groupimp, maymove))
- break;
- }
- }
- static int task_numa_migrate(struct task_struct *p)
- {
- struct task_numa_env env = {
- .p = p,
- .src_cpu = task_cpu(p),
- .src_nid = task_node(p),
- .imbalance_pct = 112,
- .best_task = NULL,
- .best_imp = 0,
- .best_cpu = -1,
- };
- unsigned long taskweight, groupweight;
- struct sched_domain *sd;
- long taskimp, groupimp;
- struct numa_group *ng;
- struct rq *best_rq;
- int nid, ret, dist;
- /*
- * Pick the lowest SD_NUMA domain, as that would have the smallest
- * imbalance and would be the first to start moving tasks about.
- *
- * And we want to avoid any moving of tasks about, as that would create
- * random movement of tasks -- counter the numa conditions we're trying
- * to satisfy here.
- */
- rcu_read_lock();
- sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
- if (sd) {
- env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
- env.imb_numa_nr = sd->imb_numa_nr;
- }
- rcu_read_unlock();
- /*
- * Cpusets can break the scheduler domain tree into smaller
- * balance domains, some of which do not cross NUMA boundaries.
- * Tasks that are "trapped" in such domains cannot be migrated
- * elsewhere, so there is no point in (re)trying.
- */
- if (unlikely(!sd)) {
- sched_setnuma(p, task_node(p));
- return -EINVAL;
- }
- env.dst_nid = p->numa_preferred_nid;
- dist = env.dist = node_distance(env.src_nid, env.dst_nid);
- taskweight = task_weight(p, env.src_nid, dist);
- groupweight = group_weight(p, env.src_nid, dist);
- update_numa_stats(&env, &env.src_stats, env.src_nid, false);
- taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
- groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
- update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
- /* Try to find a spot on the preferred nid. */
- task_numa_find_cpu(&env, taskimp, groupimp);
- /*
- * Look at other nodes in these cases:
- * - there is no space available on the preferred_nid
- * - the task is part of a numa_group that is interleaved across
- * multiple NUMA nodes; in order to better consolidate the group,
- * we need to check other locations.
- */
- ng = deref_curr_numa_group(p);
- if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
- for_each_node_state(nid, N_CPU) {
- if (nid == env.src_nid || nid == p->numa_preferred_nid)
- continue;
- dist = node_distance(env.src_nid, env.dst_nid);
- if (sched_numa_topology_type == NUMA_BACKPLANE &&
- dist != env.dist) {
- taskweight = task_weight(p, env.src_nid, dist);
- groupweight = group_weight(p, env.src_nid, dist);
- }
- /* Only consider nodes where both task and groups benefit */
- taskimp = task_weight(p, nid, dist) - taskweight;
- groupimp = group_weight(p, nid, dist) - groupweight;
- if (taskimp < 0 && groupimp < 0)
- continue;
- env.dist = dist;
- env.dst_nid = nid;
- update_numa_stats(&env, &env.dst_stats, env.dst_nid, true);
- task_numa_find_cpu(&env, taskimp, groupimp);
- }
- }
- /*
- * If the task is part of a workload that spans multiple NUMA nodes,
- * and is migrating into one of the workload's active nodes, remember
- * this node as the task's preferred numa node, so the workload can
- * settle down.
- * A task that migrated to a second choice node will be better off
- * trying for a better one later. Do not set the preferred node here.
- */
- if (ng) {
- if (env.best_cpu == -1)
- nid = env.src_nid;
- else
- nid = cpu_to_node(env.best_cpu);
- if (nid != p->numa_preferred_nid)
- sched_setnuma(p, nid);
- }
- /* No better CPU than the current one was found. */
- if (env.best_cpu == -1) {
- trace_sched_stick_numa(p, env.src_cpu, NULL, -1);
- return -EAGAIN;
- }
- best_rq = cpu_rq(env.best_cpu);
- if (env.best_task == NULL) {
- ret = migrate_task_to(p, env.best_cpu);
- WRITE_ONCE(best_rq->numa_migrate_on, 0);
- if (ret != 0)
- trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu);
- return ret;
- }
- ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
- WRITE_ONCE(best_rq->numa_migrate_on, 0);
- if (ret != 0)
- trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu);
- put_task_struct(env.best_task);
- return ret;
- }
- /* Attempt to migrate a task to a CPU on the preferred node. */
- static void numa_migrate_preferred(struct task_struct *p)
- {
- unsigned long interval = HZ;
- /* This task has no NUMA fault statistics yet */
- if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults))
- return;
- /* Periodically retry migrating the task to the preferred node */
- interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
- p->numa_migrate_retry = jiffies + interval;
- /* Success if task is already running on preferred CPU */
- if (task_node(p) == p->numa_preferred_nid)
- return;
- /* Otherwise, try migrate to a CPU on the preferred node */
- task_numa_migrate(p);
- }
- /*
- * Find out how many nodes the workload is actively running on. Do this by
- * tracking the nodes from which NUMA hinting faults are triggered. This can
- * be different from the set of nodes where the workload's memory is currently
- * located.
- */
- static void numa_group_count_active_nodes(struct numa_group *numa_group)
- {
- unsigned long faults, max_faults = 0;
- int nid, active_nodes = 0;
- for_each_node_state(nid, N_CPU) {
- faults = group_faults_cpu(numa_group, nid);
- if (faults > max_faults)
- max_faults = faults;
- }
- for_each_node_state(nid, N_CPU) {
- faults = group_faults_cpu(numa_group, nid);
- if (faults * ACTIVE_NODE_FRACTION > max_faults)
- active_nodes++;
- }
- numa_group->max_faults_cpu = max_faults;
- numa_group->active_nodes = active_nodes;
- }
- /*
- * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
- * increments. The more local the fault statistics are, the higher the scan
- * period will be for the next scan window. If local/(local+remote) ratio is
- * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
- * the scan period will decrease. Aim for 70% local accesses.
- */
- #define NUMA_PERIOD_SLOTS 10
- #define NUMA_PERIOD_THRESHOLD 7
- /*
- * Increase the scan period (slow down scanning) if the majority of
- * our memory is already on our local node, or if the majority of
- * the page accesses are shared with other processes.
- * Otherwise, decrease the scan period.
- */
- static void update_task_scan_period(struct task_struct *p,
- unsigned long shared, unsigned long private)
- {
- unsigned int period_slot;
- int lr_ratio, ps_ratio;
- int diff;
- unsigned long remote = p->numa_faults_locality[0];
- unsigned long local = p->numa_faults_locality[1];
- /*
- * If there were no record hinting faults then either the task is
- * completely idle or all activity is in areas that are not of interest
- * to automatic numa balancing. Related to that, if there were failed
- * migration then it implies we are migrating too quickly or the local
- * node is overloaded. In either case, scan slower
- */
- if (local + shared == 0 || p->numa_faults_locality[2]) {
- p->numa_scan_period = min(p->numa_scan_period_max,
- p->numa_scan_period << 1);
- p->mm->numa_next_scan = jiffies +
- msecs_to_jiffies(p->numa_scan_period);
- return;
- }
- /*
- * Prepare to scale scan period relative to the current period.
- * == NUMA_PERIOD_THRESHOLD scan period stays the same
- * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
- * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
- */
- period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
- lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
- ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
- if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
- /*
- * Most memory accesses are local. There is no need to
- * do fast NUMA scanning, since memory is already local.
- */
- int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
- if (!slot)
- slot = 1;
- diff = slot * period_slot;
- } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
- /*
- * Most memory accesses are shared with other tasks.
- * There is no point in continuing fast NUMA scanning,
- * since other tasks may just move the memory elsewhere.
- */
- int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
- if (!slot)
- slot = 1;
- diff = slot * period_slot;
- } else {
- /*
- * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
- * yet they are not on the local NUMA node. Speed up
- * NUMA scanning to get the memory moved over.
- */
- int ratio = max(lr_ratio, ps_ratio);
- diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
- }
- p->numa_scan_period = clamp(p->numa_scan_period + diff,
- task_scan_min(p), task_scan_max(p));
- memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
- }
- /*
- * Get the fraction of time the task has been running since the last
- * NUMA placement cycle. The scheduler keeps similar statistics, but
- * decays those on a 32ms period, which is orders of magnitude off
- * from the dozens-of-seconds NUMA balancing period. Use the scheduler
- * stats only if the task is so new there are no NUMA statistics yet.
- */
- static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
- {
- u64 runtime, delta, now;
- /* Use the start of this time slice to avoid calculations. */
- now = p->se.exec_start;
- runtime = p->se.sum_exec_runtime;
- if (p->last_task_numa_placement) {
- delta = runtime - p->last_sum_exec_runtime;
- *period = now - p->last_task_numa_placement;
- /* Avoid time going backwards, prevent potential divide error: */
- if (unlikely((s64)*period < 0))
- *period = 0;
- } else {
- delta = p->se.avg.load_sum;
- *period = LOAD_AVG_MAX;
- }
- p->last_sum_exec_runtime = runtime;
- p->last_task_numa_placement = now;
- return delta;
- }
- /*
- * Determine the preferred nid for a task in a numa_group. This needs to
- * be done in a way that produces consistent results with group_weight,
- * otherwise workloads might not converge.
- */
- static int preferred_group_nid(struct task_struct *p, int nid)
- {
- nodemask_t nodes;
- int dist;
- /* Direct connections between all NUMA nodes. */
- if (sched_numa_topology_type == NUMA_DIRECT)
- return nid;
- /*
- * On a system with glueless mesh NUMA topology, group_weight
- * scores nodes according to the number of NUMA hinting faults on
- * both the node itself, and on nearby nodes.
- */
- if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
- unsigned long score, max_score = 0;
- int node, max_node = nid;
- dist = sched_max_numa_distance;
- for_each_node_state(node, N_CPU) {
- score = group_weight(p, node, dist);
- if (score > max_score) {
- max_score = score;
- max_node = node;
- }
- }
- return max_node;
- }
- /*
- * Finding the preferred nid in a system with NUMA backplane
- * interconnect topology is more involved. The goal is to locate
- * tasks from numa_groups near each other in the system, and
- * untangle workloads from different sides of the system. This requires
- * searching down the hierarchy of node groups, recursively searching
- * inside the highest scoring group of nodes. The nodemask tricks
- * keep the complexity of the search down.
- */
- nodes = node_states[N_CPU];
- for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
- unsigned long max_faults = 0;
- nodemask_t max_group = NODE_MASK_NONE;
- int a, b;
- /* Are there nodes at this distance from each other? */
- if (!find_numa_distance(dist))
- continue;
- for_each_node_mask(a, nodes) {
- unsigned long faults = 0;
- nodemask_t this_group;
- nodes_clear(this_group);
- /* Sum group's NUMA faults; includes a==b case. */
- for_each_node_mask(b, nodes) {
- if (node_distance(a, b) < dist) {
- faults += group_faults(p, b);
- node_set(b, this_group);
- node_clear(b, nodes);
- }
- }
- /* Remember the top group. */
- if (faults > max_faults) {
- max_faults = faults;
- max_group = this_group;
- /*
- * subtle: at the smallest distance there is
- * just one node left in each "group", the
- * winner is the preferred nid.
- */
- nid = a;
- }
- }
- /* Next round, evaluate the nodes within max_group. */
- if (!max_faults)
- break;
- nodes = max_group;
- }
- return nid;
- }
- static void task_numa_placement(struct task_struct *p)
- {
- int seq, nid, max_nid = NUMA_NO_NODE;
- unsigned long max_faults = 0;
- unsigned long fault_types[2] = { 0, 0 };
- unsigned long total_faults;
- u64 runtime, period;
- spinlock_t *group_lock = NULL;
- struct numa_group *ng;
- /*
- * The p->mm->numa_scan_seq field gets updated without
- * exclusive access. Use READ_ONCE() here to ensure
- * that the field is read in a single access:
- */
- seq = READ_ONCE(p->mm->numa_scan_seq);
- if (p->numa_scan_seq == seq)
- return;
- p->numa_scan_seq = seq;
- p->numa_scan_period_max = task_scan_max(p);
- total_faults = p->numa_faults_locality[0] +
- p->numa_faults_locality[1];
- runtime = numa_get_avg_runtime(p, &period);
- /* If the task is part of a group prevent parallel updates to group stats */
- ng = deref_curr_numa_group(p);
- if (ng) {
- group_lock = &ng->lock;
- spin_lock_irq(group_lock);
- }
- /* Find the node with the highest number of faults */
- for_each_online_node(nid) {
- /* Keep track of the offsets in numa_faults array */
- int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
- unsigned long faults = 0, group_faults = 0;
- int priv;
- for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
- long diff, f_diff, f_weight;
- mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
- membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
- cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
- cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
- /* Decay existing window, copy faults since last scan */
- diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
- fault_types[priv] += p->numa_faults[membuf_idx];
- p->numa_faults[membuf_idx] = 0;
- /*
- * Normalize the faults_from, so all tasks in a group
- * count according to CPU use, instead of by the raw
- * number of faults. Tasks with little runtime have
- * little over-all impact on throughput, and thus their
- * faults are less important.
- */
- f_weight = div64_u64(runtime << 16, period + 1);
- f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
- (total_faults + 1);
- f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
- p->numa_faults[cpubuf_idx] = 0;
- p->numa_faults[mem_idx] += diff;
- p->numa_faults[cpu_idx] += f_diff;
- faults += p->numa_faults[mem_idx];
- p->total_numa_faults += diff;
- if (ng) {
- /*
- * safe because we can only change our own group
- *
- * mem_idx represents the offset for a given
- * nid and priv in a specific region because it
- * is at the beginning of the numa_faults array.
- */
- ng->faults[mem_idx] += diff;
- ng->faults[cpu_idx] += f_diff;
- ng->total_faults += diff;
- group_faults += ng->faults[mem_idx];
- }
- }
- if (!ng) {
- if (faults > max_faults) {
- max_faults = faults;
- max_nid = nid;
- }
- } else if (group_faults > max_faults) {
- max_faults = group_faults;
- max_nid = nid;
- }
- }
- /* Cannot migrate task to CPU-less node */
- if (max_nid != NUMA_NO_NODE && !node_state(max_nid, N_CPU)) {
- int near_nid = max_nid;
- int distance, near_distance = INT_MAX;
- for_each_node_state(nid, N_CPU) {
- distance = node_distance(max_nid, nid);
- if (distance < near_distance) {
- near_nid = nid;
- near_distance = distance;
- }
- }
- max_nid = near_nid;
- }
- if (ng) {
- numa_group_count_active_nodes(ng);
- spin_unlock_irq(group_lock);
- max_nid = preferred_group_nid(p, max_nid);
- }
- if (max_faults) {
- /* Set the new preferred node */
- if (max_nid != p->numa_preferred_nid)
- sched_setnuma(p, max_nid);
- }
- update_task_scan_period(p, fault_types[0], fault_types[1]);
- }
- static inline int get_numa_group(struct numa_group *grp)
- {
- return refcount_inc_not_zero(&grp->refcount);
- }
- static inline void put_numa_group(struct numa_group *grp)
- {
- if (refcount_dec_and_test(&grp->refcount))
- kfree_rcu(grp, rcu);
- }
- static void task_numa_group(struct task_struct *p, int cpupid, int flags,
- int *priv)
- {
- struct numa_group *grp, *my_grp;
- struct task_struct *tsk;
- bool join = false;
- int cpu = cpupid_to_cpu(cpupid);
- int i;
- if (unlikely(!deref_curr_numa_group(p))) {
- unsigned int size = sizeof(struct numa_group) +
- NR_NUMA_HINT_FAULT_STATS *
- nr_node_ids * sizeof(unsigned long);
- grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!grp)
- return;
- refcount_set(&grp->refcount, 1);
- grp->active_nodes = 1;
- grp->max_faults_cpu = 0;
- spin_lock_init(&grp->lock);
- grp->gid = p->pid;
- for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
- grp->faults[i] = p->numa_faults[i];
- grp->total_faults = p->total_numa_faults;
- grp->nr_tasks++;
- rcu_assign_pointer(p->numa_group, grp);
- }
- rcu_read_lock();
- tsk = READ_ONCE(cpu_rq(cpu)->curr);
- if (!cpupid_match_pid(tsk, cpupid))
- goto no_join;
- grp = rcu_dereference(tsk->numa_group);
- if (!grp)
- goto no_join;
- my_grp = deref_curr_numa_group(p);
- if (grp == my_grp)
- goto no_join;
- /*
- * Only join the other group if its bigger; if we're the bigger group,
- * the other task will join us.
- */
- if (my_grp->nr_tasks > grp->nr_tasks)
- goto no_join;
- /*
- * Tie-break on the grp address.
- */
- if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
- goto no_join;
- /* Always join threads in the same process. */
- if (tsk->mm == current->mm)
- join = true;
- /* Simple filter to avoid false positives due to PID collisions */
- if (flags & TNF_SHARED)
- join = true;
- /* Update priv based on whether false sharing was detected */
- *priv = !join;
- if (join && !get_numa_group(grp))
- goto no_join;
- rcu_read_unlock();
- if (!join)
- return;
- WARN_ON_ONCE(irqs_disabled());
- double_lock_irq(&my_grp->lock, &grp->lock);
- for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
- my_grp->faults[i] -= p->numa_faults[i];
- grp->faults[i] += p->numa_faults[i];
- }
- my_grp->total_faults -= p->total_numa_faults;
- grp->total_faults += p->total_numa_faults;
- my_grp->nr_tasks--;
- grp->nr_tasks++;
- spin_unlock(&my_grp->lock);
- spin_unlock_irq(&grp->lock);
- rcu_assign_pointer(p->numa_group, grp);
- put_numa_group(my_grp);
- return;
- no_join:
- rcu_read_unlock();
- return;
- }
- /*
- * Get rid of NUMA statistics associated with a task (either current or dead).
- * If @final is set, the task is dead and has reached refcount zero, so we can
- * safely free all relevant data structures. Otherwise, there might be
- * concurrent reads from places like load balancing and procfs, and we should
- * reset the data back to default state without freeing ->numa_faults.
- */
- void task_numa_free(struct task_struct *p, bool final)
- {
- /* safe: p either is current or is being freed by current */
- struct numa_group *grp = rcu_dereference_raw(p->numa_group);
- unsigned long *numa_faults = p->numa_faults;
- unsigned long flags;
- int i;
- if (!numa_faults)
- return;
- if (grp) {
- spin_lock_irqsave(&grp->lock, flags);
- for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
- grp->faults[i] -= p->numa_faults[i];
- grp->total_faults -= p->total_numa_faults;
- grp->nr_tasks--;
- spin_unlock_irqrestore(&grp->lock, flags);
- RCU_INIT_POINTER(p->numa_group, NULL);
- put_numa_group(grp);
- }
- if (final) {
- p->numa_faults = NULL;
- kfree(numa_faults);
- } else {
- p->total_numa_faults = 0;
- for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
- numa_faults[i] = 0;
- }
- }
- /*
- * Got a PROT_NONE fault for a page on @node.
- */
- void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
- {
- struct task_struct *p = current;
- bool migrated = flags & TNF_MIGRATED;
- int cpu_node = task_node(current);
- int local = !!(flags & TNF_FAULT_LOCAL);
- struct numa_group *ng;
- int priv;
- if (!static_branch_likely(&sched_numa_balancing))
- return;
- /* for example, ksmd faulting in a user's mm */
- if (!p->mm)
- return;
- /*
- * NUMA faults statistics are unnecessary for the slow memory
- * node for memory tiering mode.
- */
- if (!node_is_toptier(mem_node) &&
- (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ||
- !cpupid_valid(last_cpupid)))
- return;
- /* Allocate buffer to track faults on a per-node basis */
- if (unlikely(!p->numa_faults)) {
- int size = sizeof(*p->numa_faults) *
- NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
- p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
- if (!p->numa_faults)
- return;
- p->total_numa_faults = 0;
- memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
- }
- /*
- * First accesses are treated as private, otherwise consider accesses
- * to be private if the accessing pid has not changed
- */
- if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
- priv = 1;
- } else {
- priv = cpupid_match_pid(p, last_cpupid);
- if (!priv && !(flags & TNF_NO_GROUP))
- task_numa_group(p, last_cpupid, flags, &priv);
- }
- /*
- * If a workload spans multiple NUMA nodes, a shared fault that
- * occurs wholly within the set of nodes that the workload is
- * actively using should be counted as local. This allows the
- * scan rate to slow down when a workload has settled down.
- */
- ng = deref_curr_numa_group(p);
- if (!priv && !local && ng && ng->active_nodes > 1 &&
- numa_is_active_node(cpu_node, ng) &&
- numa_is_active_node(mem_node, ng))
- local = 1;
- /*
- * Retry to migrate task to preferred node periodically, in case it
- * previously failed, or the scheduler moved us.
- */
- if (time_after(jiffies, p->numa_migrate_retry)) {
- task_numa_placement(p);
- numa_migrate_preferred(p);
- }
- if (migrated)
- p->numa_pages_migrated += pages;
- if (flags & TNF_MIGRATE_FAIL)
- p->numa_faults_locality[2] += pages;
- p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
- p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
- p->numa_faults_locality[local] += pages;
- }
- static void reset_ptenuma_scan(struct task_struct *p)
- {
- /*
- * We only did a read acquisition of the mmap sem, so
- * p->mm->numa_scan_seq is written to without exclusive access
- * and the update is not guaranteed to be atomic. That's not
- * much of an issue though, since this is just used for
- * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
- * expensive, to avoid any form of compiler optimizations:
- */
- WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
- p->mm->numa_scan_offset = 0;
- }
- /*
- * The expensive part of numa migration is done from task_work context.
- * Triggered from task_tick_numa().
- */
- static void task_numa_work(struct callback_head *work)
- {
- unsigned long migrate, next_scan, now = jiffies;
- struct task_struct *p = current;
- struct mm_struct *mm = p->mm;
- u64 runtime = p->se.sum_exec_runtime;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
- struct vm_area_struct *vma;
- unsigned long start, end;
- unsigned long nr_pte_updates = 0;
- long pages, virtpages;
- SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
- work->next = work;
- /*
- * Who cares about NUMA placement when they're dying.
- *
- * NOTE: make sure not to dereference p->mm before this check,
- * exit_task_work() happens _after_ exit_mm() so we could be called
- * without p->mm even though we still had it when we enqueued this
- * work.
- */
- if (p->flags & PF_EXITING)
- return;
- if (!mm->numa_next_scan) {
- mm->numa_next_scan = now +
- msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
- }
- /*
- * Enforce maximal scan/migration frequency..
- */
- migrate = mm->numa_next_scan;
- if (time_before(now, migrate))
- return;
- if (p->numa_scan_period == 0) {
- p->numa_scan_period_max = task_scan_max(p);
- p->numa_scan_period = task_scan_start(p);
- }
- next_scan = now + msecs_to_jiffies(p->numa_scan_period);
- if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
- return;
- /*
- * Delay this task enough that another task of this mm will likely win
- * the next time around.
- */
- p->node_stamp += 2 * TICK_NSEC;
- start = mm->numa_scan_offset;
- pages = sysctl_numa_balancing_scan_size;
- pages <<= 20 - PAGE_SHIFT; /* MB in pages */
- virtpages = pages * 8; /* Scan up to this much virtual space */
- if (!pages)
- return;
- if (!mmap_read_trylock(mm))
- return;
- mas_set(&mas, start);
- vma = mas_find(&mas, ULONG_MAX);
- if (!vma) {
- reset_ptenuma_scan(p);
- start = 0;
- mas_set(&mas, start);
- vma = mas_find(&mas, ULONG_MAX);
- }
- for (; vma; vma = mas_find(&mas, ULONG_MAX)) {
- if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
- is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
- continue;
- }
- /*
- * Shared library pages mapped by multiple processes are not
- * migrated as it is expected they are cache replicated. Avoid
- * hinting faults in read-only file-backed mappings or the vdso
- * as migrating the pages will be of marginal benefit.
- */
- if (!vma->vm_mm ||
- (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
- continue;
- /*
- * Skip inaccessible VMAs to avoid any confusion between
- * PROT_NONE and NUMA hinting ptes
- */
- if (!vma_is_accessible(vma))
- continue;
- do {
- start = max(start, vma->vm_start);
- end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
- end = min(end, vma->vm_end);
- nr_pte_updates = change_prot_numa(vma, start, end);
- /*
- * Try to scan sysctl_numa_balancing_size worth of
- * hpages that have at least one present PTE that
- * is not already pte-numa. If the VMA contains
- * areas that are unused or already full of prot_numa
- * PTEs, scan up to virtpages, to skip through those
- * areas faster.
- */
- if (nr_pte_updates)
- pages -= (end - start) >> PAGE_SHIFT;
- virtpages -= (end - start) >> PAGE_SHIFT;
- start = end;
- if (pages <= 0 || virtpages <= 0)
- goto out;
- cond_resched();
- } while (end != vma->vm_end);
- }
- out:
- /*
- * It is possible to reach the end of the VMA list but the last few
- * VMAs are not guaranteed to the vma_migratable. If they are not, we
- * would find the !migratable VMA on the next scan but not reset the
- * scanner to the start so check it now.
- */
- if (vma)
- mm->numa_scan_offset = start;
- else
- reset_ptenuma_scan(p);
- mmap_read_unlock(mm);
- /*
- * Make sure tasks use at least 32x as much time to run other code
- * than they used here, to limit NUMA PTE scanning overhead to 3% max.
- * Usually update_task_scan_period slows down scanning enough; on an
- * overloaded system we need to limit overhead on a per task basis.
- */
- if (unlikely(p->se.sum_exec_runtime != runtime)) {
- u64 diff = p->se.sum_exec_runtime - runtime;
- p->node_stamp += 32 * diff;
- }
- }
- void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
- {
- int mm_users = 0;
- struct mm_struct *mm = p->mm;
- if (mm) {
- mm_users = atomic_read(&mm->mm_users);
- if (mm_users == 1) {
- mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
- mm->numa_scan_seq = 0;
- }
- }
- p->node_stamp = 0;
- p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
- p->numa_scan_period = sysctl_numa_balancing_scan_delay;
- p->numa_migrate_retry = 0;
- /* Protect against double add, see task_tick_numa and task_numa_work */
- p->numa_work.next = &p->numa_work;
- p->numa_faults = NULL;
- p->numa_pages_migrated = 0;
- p->total_numa_faults = 0;
- RCU_INIT_POINTER(p->numa_group, NULL);
- p->last_task_numa_placement = 0;
- p->last_sum_exec_runtime = 0;
- init_task_work(&p->numa_work, task_numa_work);
- /* New address space, reset the preferred nid */
- if (!(clone_flags & CLONE_VM)) {
- p->numa_preferred_nid = NUMA_NO_NODE;
- return;
- }
- /*
- * New thread, keep existing numa_preferred_nid which should be copied
- * already by arch_dup_task_struct but stagger when scans start.
- */
- if (mm) {
- unsigned int delay;
- delay = min_t(unsigned int, task_scan_max(current),
- current->numa_scan_period * mm_users * NSEC_PER_MSEC);
- delay += 2 * TICK_NSEC;
- p->node_stamp = delay;
- }
- }
- /*
- * Drive the periodic memory faults..
- */
- static void task_tick_numa(struct rq *rq, struct task_struct *curr)
- {
- struct callback_head *work = &curr->numa_work;
- u64 period, now;
- /*
- * We don't care about NUMA placement if we don't have memory.
- */
- if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
- return;
- /*
- * Using runtime rather than walltime has the dual advantage that
- * we (mostly) drive the selection from busy threads and that the
- * task needs to have done some actual work before we bother with
- * NUMA placement.
- */
- now = curr->se.sum_exec_runtime;
- period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
- if (now > curr->node_stamp + period) {
- if (!curr->node_stamp)
- curr->numa_scan_period = task_scan_start(curr);
- curr->node_stamp += period;
- if (!time_before(jiffies, curr->mm->numa_next_scan))
- task_work_add(curr, work, TWA_RESUME);
- }
- }
- static void update_scan_period(struct task_struct *p, int new_cpu)
- {
- int src_nid = cpu_to_node(task_cpu(p));
- int dst_nid = cpu_to_node(new_cpu);
- if (!static_branch_likely(&sched_numa_balancing))
- return;
- if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
- return;
- if (src_nid == dst_nid)
- return;
- /*
- * Allow resets if faults have been trapped before one scan
- * has completed. This is most likely due to a new task that
- * is pulled cross-node due to wakeups or load balancing.
- */
- if (p->numa_scan_seq) {
- /*
- * Avoid scan adjustments if moving to the preferred
- * node or if the task was not previously running on
- * the preferred node.
- */
- if (dst_nid == p->numa_preferred_nid ||
- (p->numa_preferred_nid != NUMA_NO_NODE &&
- src_nid != p->numa_preferred_nid))
- return;
- }
- p->numa_scan_period = task_scan_start(p);
- }
- #else
- static void task_tick_numa(struct rq *rq, struct task_struct *curr)
- {
- }
- static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
- {
- }
- static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
- {
- }
- static inline void update_scan_period(struct task_struct *p, int new_cpu)
- {
- }
- #endif /* CONFIG_NUMA_BALANCING */
- static void
- account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- update_load_add(&cfs_rq->load, se->load.weight);
- #ifdef CONFIG_SMP
- if (entity_is_task(se)) {
- struct rq *rq = rq_of(cfs_rq);
- account_numa_enqueue(rq, task_of(se));
- list_add(&se->group_node, &rq->cfs_tasks);
- }
- #endif
- cfs_rq->nr_running++;
- if (se_is_idle(se))
- cfs_rq->idle_nr_running++;
- }
- static void
- account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- update_load_sub(&cfs_rq->load, se->load.weight);
- #ifdef CONFIG_SMP
- if (entity_is_task(se)) {
- account_numa_dequeue(rq_of(cfs_rq), task_of(se));
- list_del_init(&se->group_node);
- }
- #endif
- cfs_rq->nr_running--;
- if (se_is_idle(se))
- cfs_rq->idle_nr_running--;
- }
- /*
- * Signed add and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
- #define add_positive(_ptr, _val) do { \
- typeof(_ptr) ptr = (_ptr); \
- typeof(_val) val = (_val); \
- typeof(*ptr) res, var = READ_ONCE(*ptr); \
- \
- res = var + val; \
- \
- if (val < 0 && res > var) \
- res = 0; \
- \
- WRITE_ONCE(*ptr, res); \
- } while (0)
- /*
- * Unsigned subtract and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
- #define sub_positive(_ptr, _val) do { \
- typeof(_ptr) ptr = (_ptr); \
- typeof(*ptr) val = (_val); \
- typeof(*ptr) res, var = READ_ONCE(*ptr); \
- res = var - val; \
- if (res > var) \
- res = 0; \
- WRITE_ONCE(*ptr, res); \
- } while (0)
- /*
- * Remove and clamp on negative, from a local variable.
- *
- * A variant of sub_positive(), which does not use explicit load-store
- * and is thus optimized for local variable updates.
- */
- #define lsub_positive(_ptr, _val) do { \
- typeof(_ptr) ptr = (_ptr); \
- *ptr -= min_t(typeof(*ptr), *ptr, _val); \
- } while (0)
- #ifdef CONFIG_SMP
- static inline void
- enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- cfs_rq->avg.load_avg += se->avg.load_avg;
- cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
- }
- static inline void
- dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
- sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
- /* See update_cfs_rq_load_avg() */
- cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
- cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
- }
- #else
- static inline void
- enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
- static inline void
- dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
- #endif
- static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
- unsigned long weight)
- {
- if (se->on_rq) {
- /* commit outstanding execution time */
- if (cfs_rq->curr == se)
- update_curr(cfs_rq);
- update_load_sub(&cfs_rq->load, se->load.weight);
- }
- dequeue_load_avg(cfs_rq, se);
- update_load_set(&se->load, weight);
- #ifdef CONFIG_SMP
- do {
- u32 divider = get_pelt_divider(&se->avg);
- se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
- } while (0);
- #endif
- enqueue_load_avg(cfs_rq, se);
- if (se->on_rq)
- update_load_add(&cfs_rq->load, se->load.weight);
- }
- void reweight_task(struct task_struct *p, int prio)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- struct load_weight *load = &se->load;
- unsigned long weight = scale_load(sched_prio_to_weight[prio]);
- reweight_entity(cfs_rq, se, weight);
- load->inv_weight = sched_prio_to_wmult[prio];
- }
- EXPORT_SYMBOL_GPL(reweight_task);
- static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
- #ifdef CONFIG_FAIR_GROUP_SCHED
- #ifdef CONFIG_SMP
- /*
- * All this does is approximate the hierarchical proportion which includes that
- * global sum we all love to hate.
- *
- * That is, the weight of a group entity, is the proportional share of the
- * group weight based on the group runqueue weights. That is:
- *
- * tg->weight * grq->load.weight
- * ge->load.weight = ----------------------------- (1)
- * \Sum grq->load.weight
- *
- * Now, because computing that sum is prohibitively expensive to compute (been
- * there, done that) we approximate it with this average stuff. The average
- * moves slower and therefore the approximation is cheaper and more stable.
- *
- * So instead of the above, we substitute:
- *
- * grq->load.weight -> grq->avg.load_avg (2)
- *
- * which yields the following:
- *
- * tg->weight * grq->avg.load_avg
- * ge->load.weight = ------------------------------ (3)
- * tg->load_avg
- *
- * Where: tg->load_avg ~= \Sum grq->avg.load_avg
- *
- * That is shares_avg, and it is right (given the approximation (2)).
- *
- * The problem with it is that because the average is slow -- it was designed
- * to be exactly that of course -- this leads to transients in boundary
- * conditions. In specific, the case where the group was idle and we start the
- * one task. It takes time for our CPU's grq->avg.load_avg to build up,
- * yielding bad latency etc..
- *
- * Now, in that special case (1) reduces to:
- *
- * tg->weight * grq->load.weight
- * ge->load.weight = ----------------------------- = tg->weight (4)
- * grp->load.weight
- *
- * That is, the sum collapses because all other CPUs are idle; the UP scenario.
- *
- * So what we do is modify our approximation (3) to approach (4) in the (near)
- * UP case, like:
- *
- * ge->load.weight =
- *
- * tg->weight * grq->load.weight
- * --------------------------------------------------- (5)
- * tg->load_avg - grq->avg.load_avg + grq->load.weight
- *
- * But because grq->load.weight can drop to 0, resulting in a divide by zero,
- * we need to use grq->avg.load_avg as its lower bound, which then gives:
- *
- *
- * tg->weight * grq->load.weight
- * ge->load.weight = ----------------------------- (6)
- * tg_load_avg'
- *
- * Where:
- *
- * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
- * max(grq->load.weight, grq->avg.load_avg)
- *
- * And that is shares_weight and is icky. In the (near) UP case it approaches
- * (4) while in the normal case it approaches (3). It consistently
- * overestimates the ge->load.weight and therefore:
- *
- * \Sum ge->load.weight >= tg->weight
- *
- * hence icky!
- */
- static long calc_group_shares(struct cfs_rq *cfs_rq)
- {
- long tg_weight, tg_shares, load, shares;
- struct task_group *tg = cfs_rq->tg;
- tg_shares = READ_ONCE(tg->shares);
- load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
- tg_weight = atomic_long_read(&tg->load_avg);
- /* Ensure tg_weight >= load */
- tg_weight -= cfs_rq->tg_load_avg_contrib;
- tg_weight += load;
- shares = (tg_shares * load);
- if (tg_weight)
- shares /= tg_weight;
- /*
- * MIN_SHARES has to be unscaled here to support per-CPU partitioning
- * of a group with small tg->shares value. It is a floor value which is
- * assigned as a minimum load.weight to the sched_entity representing
- * the group on a CPU.
- *
- * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
- * on an 8-core system with 8 tasks each runnable on one CPU shares has
- * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
- * case no task is runnable on a CPU MIN_SHARES=2 should be returned
- * instead of 0.
- */
- return clamp_t(long, shares, MIN_SHARES, tg_shares);
- }
- #endif /* CONFIG_SMP */
- /*
- * Recomputes the group entity based on the current state of its group
- * runqueue.
- */
- static void update_cfs_group(struct sched_entity *se)
- {
- struct cfs_rq *gcfs_rq = group_cfs_rq(se);
- long shares;
- if (!gcfs_rq)
- return;
- if (throttled_hierarchy(gcfs_rq))
- return;
- #ifndef CONFIG_SMP
- shares = READ_ONCE(gcfs_rq->tg->shares);
- if (likely(se->load.weight == shares))
- return;
- #else
- shares = calc_group_shares(gcfs_rq);
- #endif
- reweight_entity(cfs_rq_of(se), se, shares);
- }
- #else /* CONFIG_FAIR_GROUP_SCHED */
- static inline void update_cfs_group(struct sched_entity *se)
- {
- }
- #endif /* CONFIG_FAIR_GROUP_SCHED */
- static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
- {
- struct rq *rq = rq_of(cfs_rq);
- if (&rq->cfs == cfs_rq) {
- /*
- * There are a few boundary cases this might miss but it should
- * get called often enough that that should (hopefully) not be
- * a real problem.
- *
- * It will not get called when we go idle, because the idle
- * thread is a different class (!fair), nor will the utilization
- * number include things like RT tasks.
- *
- * As is, the util number is not freq-invariant (we'd have to
- * implement arch_scale_freq_capacity() for that).
- *
- * See cpu_util_cfs().
- */
- cpufreq_update_util(rq, flags);
- }
- }
- #ifdef CONFIG_SMP
- static inline bool load_avg_is_decayed(struct sched_avg *sa)
- {
- if (sa->load_sum)
- return false;
- if (sa->util_sum)
- return false;
- if (sa->runnable_sum)
- return false;
- /*
- * _avg must be null when _sum are null because _avg = _sum / divider
- * Make sure that rounding and/or propagation of PELT values never
- * break this.
- */
- SCHED_WARN_ON(sa->load_avg ||
- sa->util_avg ||
- sa->runnable_avg);
- return true;
- }
- static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
- {
- return u64_u32_load_copy(cfs_rq->avg.last_update_time,
- cfs_rq->last_update_time_copy);
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- /*
- * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
- * immediately before a parent cfs_rq, and cfs_rqs are removed from the list
- * bottom-up, we only have to test whether the cfs_rq before us on the list
- * is our child.
- * If cfs_rq is not on the list, test whether a child needs its to be added to
- * connect a branch to the tree * (see list_add_leaf_cfs_rq() for details).
- */
- static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
- {
- struct cfs_rq *prev_cfs_rq;
- struct list_head *prev;
- if (cfs_rq->on_list) {
- prev = cfs_rq->leaf_cfs_rq_list.prev;
- } else {
- struct rq *rq = rq_of(cfs_rq);
- prev = rq->tmp_alone_branch;
- }
- prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
- return (prev_cfs_rq->tg->parent == cfs_rq->tg);
- }
- static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
- {
- if (cfs_rq->load.weight)
- return false;
- if (!load_avg_is_decayed(&cfs_rq->avg))
- return false;
- if (child_cfs_rq_on_list(cfs_rq))
- return false;
- return true;
- }
- /**
- * update_tg_load_avg - update the tg's load avg
- * @cfs_rq: the cfs_rq whose avg changed
- *
- * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
- * However, because tg->load_avg is a global value there are performance
- * considerations.
- *
- * In order to avoid having to look at the other cfs_rq's, we use a
- * differential update where we store the last value we propagated. This in
- * turn allows skipping updates if the differential is 'small'.
- *
- * Updating tg's load_avg is necessary before update_cfs_share().
- */
- static inline void update_tg_load_avg(struct cfs_rq *cfs_rq)
- {
- long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
- /*
- * No need to update load_avg for root_task_group as it is not used.
- */
- if (cfs_rq->tg == &root_task_group)
- return;
- if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
- atomic_long_add(delta, &cfs_rq->tg->load_avg);
- cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
- }
- }
- /*
- * Called within set_task_rq() right before setting a task's CPU. The
- * caller only guarantees p->pi_lock is held; no other assumptions,
- * including the state of rq->lock, should be made.
- */
- void set_task_rq_fair(struct sched_entity *se,
- struct cfs_rq *prev, struct cfs_rq *next)
- {
- u64 p_last_update_time;
- u64 n_last_update_time;
- if (!sched_feat(ATTACH_AGE_LOAD))
- return;
- /*
- * We are supposed to update the task to "current" time, then its up to
- * date and ready to go to new CPU/cfs_rq. But we have difficulty in
- * getting what current time is, so simply throw away the out-of-date
- * time. This will result in the wakee task is less decayed, but giving
- * the wakee more load sounds not bad.
- */
- if (!(se->avg.last_update_time && prev))
- return;
- p_last_update_time = cfs_rq_last_update_time(prev);
- n_last_update_time = cfs_rq_last_update_time(next);
- __update_load_avg_blocked_se(p_last_update_time, se);
- se->avg.last_update_time = n_last_update_time;
- }
- /*
- * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
- * propagate its contribution. The key to this propagation is the invariant
- * that for each group:
- *
- * ge->avg == grq->avg (1)
- *
- * _IFF_ we look at the pure running and runnable sums. Because they
- * represent the very same entity, just at different points in the hierarchy.
- *
- * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial
- * and simply copies the running/runnable sum over (but still wrong, because
- * the group entity and group rq do not have their PELT windows aligned).
- *
- * However, update_tg_cfs_load() is more complex. So we have:
- *
- * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
- *
- * And since, like util, the runnable part should be directly transferable,
- * the following would _appear_ to be the straight forward approach:
- *
- * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
- *
- * And per (1) we have:
- *
- * ge->avg.runnable_avg == grq->avg.runnable_avg
- *
- * Which gives:
- *
- * ge->load.weight * grq->avg.load_avg
- * ge->avg.load_avg = ----------------------------------- (4)
- * grq->load.weight
- *
- * Except that is wrong!
- *
- * Because while for entities historical weight is not important and we
- * really only care about our future and therefore can consider a pure
- * runnable sum, runqueues can NOT do this.
- *
- * We specifically want runqueues to have a load_avg that includes
- * historical weights. Those represent the blocked load, the load we expect
- * to (shortly) return to us. This only works by keeping the weights as
- * integral part of the sum. We therefore cannot decompose as per (3).
- *
- * Another reason this doesn't work is that runnable isn't a 0-sum entity.
- * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
- * rq itself is runnable anywhere between 2/3 and 1 depending on how the
- * runnable section of these tasks overlap (or not). If they were to perfectly
- * align the rq as a whole would be runnable 2/3 of the time. If however we
- * always have at least 1 runnable task, the rq as a whole is always runnable.
- *
- * So we'll have to approximate.. :/
- *
- * Given the constraint:
- *
- * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
- *
- * We can construct a rule that adds runnable to a rq by assuming minimal
- * overlap.
- *
- * On removal, we'll assume each task is equally runnable; which yields:
- *
- * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
- *
- * XXX: only do this for the part of runnable > running ?
- *
- */
- static inline void
- update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
- {
- long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
- u32 new_sum, divider;
- /* Nothing to update */
- if (!delta_avg)
- return;
- /*
- * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
- * See ___update_load_avg() for details.
- */
- divider = get_pelt_divider(&cfs_rq->avg);
- /* Set new sched_entity's utilization */
- se->avg.util_avg = gcfs_rq->avg.util_avg;
- new_sum = se->avg.util_avg * divider;
- delta_sum = (long)new_sum - (long)se->avg.util_sum;
- se->avg.util_sum = new_sum;
- /* Update parent cfs_rq utilization */
- add_positive(&cfs_rq->avg.util_avg, delta_avg);
- add_positive(&cfs_rq->avg.util_sum, delta_sum);
- /* See update_cfs_rq_load_avg() */
- cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
- cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
- }
- static inline void
- update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
- {
- long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
- u32 new_sum, divider;
- /* Nothing to update */
- if (!delta_avg)
- return;
- /*
- * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
- * See ___update_load_avg() for details.
- */
- divider = get_pelt_divider(&cfs_rq->avg);
- /* Set new sched_entity's runnable */
- se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
- new_sum = se->avg.runnable_avg * divider;
- delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
- se->avg.runnable_sum = new_sum;
- /* Update parent cfs_rq runnable */
- add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
- add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
- /* See update_cfs_rq_load_avg() */
- cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
- cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
- }
- static inline void
- update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
- {
- long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
- unsigned long load_avg;
- u64 load_sum = 0;
- s64 delta_sum;
- u32 divider;
- if (!runnable_sum)
- return;
- gcfs_rq->prop_runnable_sum = 0;
- /*
- * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
- * See ___update_load_avg() for details.
- */
- divider = get_pelt_divider(&cfs_rq->avg);
- if (runnable_sum >= 0) {
- /*
- * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
- * the CPU is saturated running == runnable.
- */
- runnable_sum += se->avg.load_sum;
- runnable_sum = min_t(long, runnable_sum, divider);
- } else {
- /*
- * Estimate the new unweighted runnable_sum of the gcfs_rq by
- * assuming all tasks are equally runnable.
- */
- if (scale_load_down(gcfs_rq->load.weight)) {
- load_sum = div_u64(gcfs_rq->avg.load_sum,
- scale_load_down(gcfs_rq->load.weight));
- }
- /* But make sure to not inflate se's runnable */
- runnable_sum = min(se->avg.load_sum, load_sum);
- }
- /*
- * runnable_sum can't be lower than running_sum
- * Rescale running sum to be in the same range as runnable sum
- * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT]
- * runnable_sum is in [0 : LOAD_AVG_MAX]
- */
- running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
- runnable_sum = max(runnable_sum, running_sum);
- load_sum = se_weight(se) * runnable_sum;
- load_avg = div_u64(load_sum, divider);
- delta_avg = load_avg - se->avg.load_avg;
- if (!delta_avg)
- return;
- delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
- se->avg.load_sum = runnable_sum;
- se->avg.load_avg = load_avg;
- add_positive(&cfs_rq->avg.load_avg, delta_avg);
- add_positive(&cfs_rq->avg.load_sum, delta_sum);
- /* See update_cfs_rq_load_avg() */
- cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
- cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
- }
- static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
- {
- cfs_rq->propagate = 1;
- cfs_rq->prop_runnable_sum += runnable_sum;
- }
- /* Update task and its cfs_rq load average */
- static inline int propagate_entity_load_avg(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq, *gcfs_rq;
- if (entity_is_task(se))
- return 0;
- gcfs_rq = group_cfs_rq(se);
- if (!gcfs_rq->propagate)
- return 0;
- gcfs_rq->propagate = 0;
- cfs_rq = cfs_rq_of(se);
- add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
- update_tg_cfs_util(cfs_rq, se, gcfs_rq);
- update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
- update_tg_cfs_load(cfs_rq, se, gcfs_rq);
- trace_pelt_cfs_tp(cfs_rq);
- trace_pelt_se_tp(se);
- return 1;
- }
- /*
- * Check if we need to update the load and the utilization of a blocked
- * group_entity:
- */
- static inline bool skip_blocked_update(struct sched_entity *se)
- {
- struct cfs_rq *gcfs_rq = group_cfs_rq(se);
- /*
- * If sched_entity still have not zero load or utilization, we have to
- * decay it:
- */
- if (se->avg.load_avg || se->avg.util_avg)
- return false;
- /*
- * If there is a pending propagation, we have to update the load and
- * the utilization of the sched_entity:
- */
- if (gcfs_rq->propagate)
- return false;
- /*
- * Otherwise, the load and the utilization of the sched_entity is
- * already zero and there is no pending propagation, so it will be a
- * waste of time to try to decay it:
- */
- return true;
- }
- #else /* CONFIG_FAIR_GROUP_SCHED */
- static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {}
- static inline int propagate_entity_load_avg(struct sched_entity *se)
- {
- return 0;
- }
- static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
- #endif /* CONFIG_FAIR_GROUP_SCHED */
- #ifdef CONFIG_NO_HZ_COMMON
- static inline void migrate_se_pelt_lag(struct sched_entity *se)
- {
- u64 throttled = 0, now, lut;
- struct cfs_rq *cfs_rq;
- struct rq *rq;
- bool is_idle;
- if (load_avg_is_decayed(&se->avg))
- return;
- cfs_rq = cfs_rq_of(se);
- rq = rq_of(cfs_rq);
- rcu_read_lock();
- is_idle = is_idle_task(rcu_dereference(rq->curr));
- rcu_read_unlock();
- /*
- * The lag estimation comes with a cost we don't want to pay all the
- * time. Hence, limiting to the case where the source CPU is idle and
- * we know we are at the greatest risk to have an outdated clock.
- */
- if (!is_idle)
- return;
- /*
- * Estimated "now" is: last_update_time + cfs_idle_lag + rq_idle_lag, where:
- *
- * last_update_time (the cfs_rq's last_update_time)
- * = cfs_rq_clock_pelt()@cfs_rq_idle
- * = rq_clock_pelt()@cfs_rq_idle
- * - cfs->throttled_clock_pelt_time@cfs_rq_idle
- *
- * cfs_idle_lag (delta between rq's update and cfs_rq's update)
- * = rq_clock_pelt()@rq_idle - rq_clock_pelt()@cfs_rq_idle
- *
- * rq_idle_lag (delta between now and rq's update)
- * = sched_clock_cpu() - rq_clock()@rq_idle
- *
- * We can then write:
- *
- * now = rq_clock_pelt()@rq_idle - cfs->throttled_clock_pelt_time +
- * sched_clock_cpu() - rq_clock()@rq_idle
- * Where:
- * rq_clock_pelt()@rq_idle is rq->clock_pelt_idle
- * rq_clock()@rq_idle is rq->clock_idle
- * cfs->throttled_clock_pelt_time@cfs_rq_idle
- * is cfs_rq->throttled_pelt_idle
- */
- #ifdef CONFIG_CFS_BANDWIDTH
- throttled = u64_u32_load(cfs_rq->throttled_pelt_idle);
- /* The clock has been stopped for throttling */
- if (throttled == U64_MAX)
- return;
- #endif
- now = u64_u32_load(rq->clock_pelt_idle);
- /*
- * Paired with _update_idle_rq_clock_pelt(). It ensures at the worst case
- * is observed the old clock_pelt_idle value and the new clock_idle,
- * which lead to an underestimation. The opposite would lead to an
- * overestimation.
- */
- smp_rmb();
- lut = cfs_rq_last_update_time(cfs_rq);
- now -= throttled;
- if (now < lut)
- /*
- * cfs_rq->avg.last_update_time is more recent than our
- * estimation, let's use it.
- */
- now = lut;
- else
- now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle);
- __update_load_avg_blocked_se(now, se);
- }
- #else
- static void migrate_se_pelt_lag(struct sched_entity *se) {}
- #endif
- /**
- * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
- * @now: current time, as per cfs_rq_clock_pelt()
- * @cfs_rq: cfs_rq to update
- *
- * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
- * avg. The immediate corollary is that all (fair) tasks must be attached.
- *
- * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
- *
- * Return: true if the load decayed or we removed load.
- *
- * Since both these conditions indicate a changed cfs_rq->avg.load we should
- * call update_tg_load_avg() when this function returns true.
- */
- static inline int
- update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
- {
- unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0;
- struct sched_avg *sa = &cfs_rq->avg;
- int decayed = 0;
- if (cfs_rq->removed.nr) {
- unsigned long r;
- u32 divider = get_pelt_divider(&cfs_rq->avg);
- raw_spin_lock(&cfs_rq->removed.lock);
- swap(cfs_rq->removed.util_avg, removed_util);
- swap(cfs_rq->removed.load_avg, removed_load);
- swap(cfs_rq->removed.runnable_avg, removed_runnable);
- cfs_rq->removed.nr = 0;
- raw_spin_unlock(&cfs_rq->removed.lock);
- r = removed_load;
- sub_positive(&sa->load_avg, r);
- sub_positive(&sa->load_sum, r * divider);
- /* See sa->util_sum below */
- sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
- r = removed_util;
- sub_positive(&sa->util_avg, r);
- sub_positive(&sa->util_sum, r * divider);
- /*
- * Because of rounding, se->util_sum might ends up being +1 more than
- * cfs->util_sum. Although this is not a problem by itself, detaching
- * a lot of tasks with the rounding problem between 2 updates of
- * util_avg (~1ms) can make cfs->util_sum becoming null whereas
- * cfs_util_avg is not.
- * Check that util_sum is still above its lower bound for the new
- * util_avg. Given that period_contrib might have moved since the last
- * sync, we are only sure that util_sum must be above or equal to
- * util_avg * minimum possible divider
- */
- sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
- r = removed_runnable;
- sub_positive(&sa->runnable_avg, r);
- sub_positive(&sa->runnable_sum, r * divider);
- /* See sa->util_sum above */
- sa->runnable_sum = max_t(u32, sa->runnable_sum,
- sa->runnable_avg * PELT_MIN_DIVIDER);
- /*
- * removed_runnable is the unweighted version of removed_load so we
- * can use it to estimate removed_load_sum.
- */
- add_tg_cfs_propagate(cfs_rq,
- -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT);
- decayed = 1;
- }
- decayed |= __update_load_avg_cfs_rq(now, cfs_rq);
- u64_u32_store_copy(sa->last_update_time,
- cfs_rq->last_update_time_copy,
- sa->last_update_time);
- return decayed;
- }
- /**
- * attach_entity_load_avg - attach this entity to its cfs_rq load avg
- * @cfs_rq: cfs_rq to attach to
- * @se: sched_entity to attach
- *
- * Must call update_cfs_rq_load_avg() before this, since we rely on
- * cfs_rq->avg.last_update_time being current.
- */
- static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- /*
- * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
- * See ___update_load_avg() for details.
- */
- u32 divider = get_pelt_divider(&cfs_rq->avg);
- /*
- * When we attach the @se to the @cfs_rq, we must align the decay
- * window because without that, really weird and wonderful things can
- * happen.
- *
- * XXX illustrate
- */
- se->avg.last_update_time = cfs_rq->avg.last_update_time;
- se->avg.period_contrib = cfs_rq->avg.period_contrib;
- /*
- * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
- * period_contrib. This isn't strictly correct, but since we're
- * entirely outside of the PELT hierarchy, nobody cares if we truncate
- * _sum a little.
- */
- se->avg.util_sum = se->avg.util_avg * divider;
- se->avg.runnable_sum = se->avg.runnable_avg * divider;
- se->avg.load_sum = se->avg.load_avg * divider;
- if (se_weight(se) < se->avg.load_sum)
- se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
- else
- se->avg.load_sum = 1;
- trace_android_rvh_attach_entity_load_avg(cfs_rq, se);
- enqueue_load_avg(cfs_rq, se);
- cfs_rq->avg.util_avg += se->avg.util_avg;
- cfs_rq->avg.util_sum += se->avg.util_sum;
- cfs_rq->avg.runnable_avg += se->avg.runnable_avg;
- cfs_rq->avg.runnable_sum += se->avg.runnable_sum;
- add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
- cfs_rq_util_change(cfs_rq, 0);
- trace_pelt_cfs_tp(cfs_rq);
- }
- /**
- * detach_entity_load_avg - detach this entity from its cfs_rq load avg
- * @cfs_rq: cfs_rq to detach from
- * @se: sched_entity to detach
- *
- * Must call update_cfs_rq_load_avg() before this, since we rely on
- * cfs_rq->avg.last_update_time being current.
- */
- static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- trace_android_rvh_detach_entity_load_avg(cfs_rq, se);
- dequeue_load_avg(cfs_rq, se);
- sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
- sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
- /* See update_cfs_rq_load_avg() */
- cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
- cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
- sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
- sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
- /* See update_cfs_rq_load_avg() */
- cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
- cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
- add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
- cfs_rq_util_change(cfs_rq, 0);
- trace_pelt_cfs_tp(cfs_rq);
- }
- /*
- * Optional action to be done while updating the load average
- */
- #define UPDATE_TG 0x1
- #define SKIP_AGE_LOAD 0x2
- #define DO_ATTACH 0x4
- #define DO_DETACH 0x8
- /* Update task and its cfs_rq load average */
- static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
- u64 now = cfs_rq_clock_pelt(cfs_rq);
- int decayed;
- /*
- * Track task load average for carrying it to new CPU after migrated, and
- * track group sched_entity load average for task_h_load calc in migration
- */
- if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
- __update_load_avg_se(now, cfs_rq, se);
- decayed = update_cfs_rq_load_avg(now, cfs_rq);
- decayed |= propagate_entity_load_avg(se);
- trace_android_rvh_update_load_avg(now, cfs_rq, se);
- if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
- /*
- * DO_ATTACH means we're here from enqueue_entity().
- * !last_update_time means we've passed through
- * migrate_task_rq_fair() indicating we migrated.
- *
- * IOW we're enqueueing a task on a new CPU.
- */
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq);
- } else if (flags & DO_DETACH) {
- /*
- * DO_DETACH means we're here from dequeue_entity()
- * and we are migrating task out of the CPU.
- */
- detach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq);
- } else if (decayed) {
- cfs_rq_util_change(cfs_rq, 0);
- if (flags & UPDATE_TG)
- update_tg_load_avg(cfs_rq);
- }
- }
- /*
- * Synchronize entity load avg of dequeued entity without locking
- * the previous rq.
- */
- static void sync_entity_load_avg(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 last_update_time;
- last_update_time = cfs_rq_last_update_time(cfs_rq);
- __update_load_avg_blocked_se(last_update_time, se);
- }
- /*
- * Task first catches up with cfs_rq, and then subtract
- * itself from the cfs_rq (task must be off the queue now).
- */
- static void remove_entity_load_avg(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- unsigned long flags;
- /*
- * tasks cannot exit without having gone through wake_up_new_task() ->
- * enqueue_task_fair() which will have added things to the cfs_rq,
- * so we can remove unconditionally.
- */
- sync_entity_load_avg(se);
- trace_android_rvh_remove_entity_load_avg(cfs_rq, se);
- raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
- ++cfs_rq->removed.nr;
- cfs_rq->removed.util_avg += se->avg.util_avg;
- cfs_rq->removed.load_avg += se->avg.load_avg;
- cfs_rq->removed.runnable_avg += se->avg.runnable_avg;
- raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
- }
- static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq)
- {
- return cfs_rq->avg.runnable_avg;
- }
- static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
- {
- return cfs_rq->avg.load_avg;
- }
- static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
- static inline unsigned long task_util(struct task_struct *p)
- {
- return READ_ONCE(p->se.avg.util_avg);
- }
- static inline unsigned long _task_util_est(struct task_struct *p)
- {
- struct util_est ue = READ_ONCE(p->se.avg.util_est);
- return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED));
- }
- static inline unsigned long task_util_est(struct task_struct *p)
- {
- return max(task_util(p), _task_util_est(p));
- }
- static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
- struct task_struct *p)
- {
- unsigned int enqueued;
- if (!sched_feat(UTIL_EST))
- return;
- /* Update root cfs_rq's estimated utilization */
- enqueued = cfs_rq->avg.util_est.enqueued;
- enqueued += _task_util_est(p);
- WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
- trace_sched_util_est_cfs_tp(cfs_rq);
- }
- static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
- struct task_struct *p)
- {
- unsigned int enqueued;
- if (!sched_feat(UTIL_EST))
- return;
- /* Update root cfs_rq's estimated utilization */
- enqueued = cfs_rq->avg.util_est.enqueued;
- enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
- WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
- trace_sched_util_est_cfs_tp(cfs_rq);
- }
- #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100)
- /*
- * Check if a (signed) value is within a specified (unsigned) margin,
- * based on the observation that:
- *
- * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
- *
- * NOTE: this only works when value + margin < INT_MAX.
- */
- static inline bool within_margin(int value, int margin)
- {
- return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
- }
- static inline void util_est_update(struct cfs_rq *cfs_rq,
- struct task_struct *p,
- bool task_sleep)
- {
- long last_ewma_diff, last_enqueued_diff;
- struct util_est ue;
- int ret = 0;
- trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret);
- if (ret)
- return;
- if (!sched_feat(UTIL_EST))
- return;
- /*
- * Skip update of task's estimated utilization when the task has not
- * yet completed an activation, e.g. being migrated.
- */
- if (!task_sleep)
- return;
- /*
- * If the PELT values haven't changed since enqueue time,
- * skip the util_est update.
- */
- ue = p->se.avg.util_est;
- if (ue.enqueued & UTIL_AVG_UNCHANGED)
- return;
- last_enqueued_diff = ue.enqueued;
- /*
- * Reset EWMA on utilization increases, the moving average is used only
- * to smooth utilization decreases.
- */
- ue.enqueued = task_util(p);
- if (sched_feat(UTIL_EST_FASTUP)) {
- if (ue.ewma < ue.enqueued) {
- ue.ewma = ue.enqueued;
- goto done;
- }
- }
- /*
- * Skip update of task's estimated utilization when its members are
- * already ~1% close to its last activation value.
- */
- last_ewma_diff = ue.enqueued - ue.ewma;
- last_enqueued_diff -= ue.enqueued;
- if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) {
- if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN))
- goto done;
- return;
- }
- /*
- * To avoid overestimation of actual task utilization, skip updates if
- * we cannot grant there is idle time in this CPU.
- */
- if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq))))
- return;
- /*
- * Update Task's estimated utilization
- *
- * When *p completes an activation we can consolidate another sample
- * of the task size. This is done by storing the current PELT value
- * as ue.enqueued and by using this value to update the Exponential
- * Weighted Moving Average (EWMA):
- *
- * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
- * = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
- * = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
- * = w * ( last_ewma_diff ) + ewma(t-1)
- * = w * (last_ewma_diff + ewma(t-1) / w)
- *
- * Where 'w' is the weight of new samples, which is configured to be
- * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
- */
- ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
- ue.ewma += last_ewma_diff;
- ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
- done:
- ue.enqueued |= UTIL_AVG_UNCHANGED;
- WRITE_ONCE(p->se.avg.util_est, ue);
- trace_sched_util_est_se_tp(&p->se);
- }
- static inline int util_fits_cpu(unsigned long util,
- unsigned long uclamp_min,
- unsigned long uclamp_max,
- int cpu)
- {
- unsigned long capacity_orig, capacity_orig_thermal;
- unsigned long capacity = capacity_of(cpu);
- bool fits, uclamp_max_fits, done = false;
- trace_android_rvh_util_fits_cpu(util, uclamp_min, uclamp_max, cpu, &fits, &done);
- if (done)
- return fits;
- /*
- * Check if the real util fits without any uclamp boost/cap applied.
- */
- fits = fits_capacity(util, capacity);
- if (!uclamp_is_used())
- return fits;
- /*
- * We must use capacity_orig_of() for comparing against uclamp_min and
- * uclamp_max. We only care about capacity pressure (by using
- * capacity_of()) for comparing against the real util.
- *
- * If a task is boosted to 1024 for example, we don't want a tiny
- * pressure to skew the check whether it fits a CPU or not.
- *
- * Similarly if a task is capped to capacity_orig_of(little_cpu), it
- * should fit a little cpu even if there's some pressure.
- *
- * Only exception is for thermal pressure since it has a direct impact
- * on available OPP of the system.
- *
- * We honour it for uclamp_min only as a drop in performance level
- * could result in not getting the requested minimum performance level.
- *
- * For uclamp_max, we can tolerate a drop in performance level as the
- * goal is to cap the task. So it's okay if it's getting less.
- */
- capacity_orig = capacity_orig_of(cpu);
- capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
- /*
- * We want to force a task to fit a cpu as implied by uclamp_max.
- * But we do have some corner cases to cater for..
- *
- *
- * C=z
- * | ___
- * | C=y | |
- * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
- * | C=x | | | |
- * | ___ | | | |
- * | | | | | | | (util somewhere in this region)
- * | | | | | | |
- * | | | | | | |
- * +----------------------------------------
- * cpu0 cpu1 cpu2
- *
- * In the above example if a task is capped to a specific performance
- * point, y, then when:
- *
- * * util = 80% of x then it does not fit on cpu0 and should migrate
- * to cpu1
- * * util = 80% of y then it is forced to fit on cpu1 to honour
- * uclamp_max request.
- *
- * which is what we're enforcing here. A task always fits if
- * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
- * the normal upmigration rules should withhold still.
- *
- * Only exception is when we are on max capacity, then we need to be
- * careful not to block overutilized state. This is so because:
- *
- * 1. There's no concept of capping at max_capacity! We can't go
- * beyond this performance level anyway.
- * 2. The system is being saturated when we're operating near
- * max capacity, it doesn't make sense to block overutilized.
- */
- uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
- uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig);
- fits = fits || uclamp_max_fits;
- /*
- *
- * C=z
- * | ___ (region a, capped, util >= uclamp_max)
- * | C=y | |
- * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
- * | C=x | | | |
- * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max)
- * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
- * | | | | | | |
- * | | | | | | | (region c, boosted, util < uclamp_min)
- * +----------------------------------------
- * cpu0 cpu1 cpu2
- *
- * a) If util > uclamp_max, then we're capped, we don't care about
- * actual fitness value here. We only care if uclamp_max fits
- * capacity without taking margin/pressure into account.
- * See comment above.
- *
- * b) If uclamp_min <= util <= uclamp_max, then the normal
- * fits_capacity() rules apply. Except we need to ensure that we
- * enforce we remain within uclamp_max, see comment above.
- *
- * c) If util < uclamp_min, then we are boosted. Same as (b) but we
- * need to take into account the boosted value fits the CPU without
- * taking margin/pressure into account.
- *
- * Cases (a) and (b) are handled in the 'fits' variable already. We
- * just need to consider an extra check for case (c) after ensuring we
- * handle the case uclamp_min > uclamp_max.
- */
- uclamp_min = min(uclamp_min, uclamp_max);
- if (fits && (util < uclamp_min) && (uclamp_min > capacity_orig_thermal))
- return -1;
- return fits;
- }
- static inline int task_fits_cpu(struct task_struct *p, int cpu)
- {
- unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
- unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
- unsigned long util = task_util_est(p);
- /*
- * Return true only if the cpu fully fits the task requirements, which
- * include the utilization but also the performance hints.
- */
- return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
- }
- inline void update_misfit_status(struct task_struct *p, struct rq *rq)
- {
- bool need_update = true;
- trace_android_rvh_update_misfit_status(p, rq, &need_update);
- if (!sched_asym_cpucap_active() || !need_update)
- return;
- if (!p || p->nr_cpus_allowed == 1) {
- rq->misfit_task_load = 0;
- return;
- }
- if (task_fits_cpu(p, cpu_of(rq))) {
- rq->misfit_task_load = 0;
- return;
- }
- /*
- * Make sure that misfit_task_load will not be null even if
- * task_h_load() returns 0.
- */
- rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
- }
- EXPORT_SYMBOL_GPL(update_misfit_status);
- #else /* CONFIG_SMP */
- static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
- {
- return !cfs_rq->nr_running;
- }
- #define UPDATE_TG 0x0
- #define SKIP_AGE_LOAD 0x0
- #define DO_ATTACH 0x0
- #define DO_DETACH 0x0
- static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
- {
- cfs_rq_util_change(cfs_rq, 0);
- }
- static inline void remove_entity_load_avg(struct sched_entity *se) {}
- static inline void
- attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
- static inline void
- detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
- static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
- {
- return 0;
- }
- static inline void
- util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
- static inline void
- util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
- static inline void
- util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
- bool task_sleep) {}
- static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
- #endif /* CONFIG_SMP */
- static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- #ifdef CONFIG_SCHED_DEBUG
- s64 d = se->vruntime - cfs_rq->min_vruntime;
- if (d < 0)
- d = -d;
- if (d > 3*sysctl_sched_latency)
- schedstat_inc(cfs_rq->nr_spread_over);
- #endif
- }
- static inline bool entity_is_long_sleeper(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq;
- u64 sleep_time;
- if (se->exec_start == 0)
- return false;
- cfs_rq = cfs_rq_of(se);
- sleep_time = rq_clock_task(rq_of(cfs_rq));
- /* Happen while migrating because of clock task divergence */
- if (sleep_time <= se->exec_start)
- return false;
- sleep_time -= se->exec_start;
- if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
- return true;
- return false;
- }
- static void
- place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
- {
- u64 vruntime = cfs_rq->min_vruntime;
- /*
- * The 'current' period is already promised to the current tasks,
- * however the extra weight of the new task will slow them down a
- * little, place the new task so that it fits in the slot that
- * stays open at the end.
- */
- if (initial && sched_feat(START_DEBIT))
- vruntime += sched_vslice(cfs_rq, se);
- /* sleeps up to a single latency don't count. */
- if (!initial) {
- unsigned long thresh;
- if (se_is_idle(se))
- thresh = sysctl_sched_min_granularity;
- else
- thresh = sysctl_sched_latency;
- /*
- * Halve their sleep time's effect, to allow
- * for a gentler effect of sleepers:
- */
- if (sched_feat(GENTLE_FAIR_SLEEPERS))
- thresh >>= 1;
- vruntime -= thresh;
- }
- trace_android_rvh_place_entity(cfs_rq, se, initial, &vruntime);
- /*
- * Pull vruntime of the entity being placed to the base level of
- * cfs_rq, to prevent boosting it if placed backwards.
- * However, min_vruntime can advance much faster than real time, with
- * the extreme being when an entity with the minimal weight always runs
- * on the cfs_rq. If the waking entity slept for a long time, its
- * vruntime difference from min_vruntime may overflow s64 and their
- * comparison may get inversed, so ignore the entity's original
- * vruntime in that case.
- * The maximal vruntime speedup is given by the ratio of normal to
- * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
- * When placing a migrated waking entity, its exec_start has been set
- * from a different rq. In order to take into account a possible
- * divergence between new and prev rq's clocks task because of irq and
- * stolen time, we take an additional margin.
- * So, cutting off on the sleep time of
- * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
- * should be safe.
- */
- if (entity_is_long_sleeper(se))
- se->vruntime = vruntime;
- else
- se->vruntime = max_vruntime(se->vruntime, vruntime);
- }
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
- static inline bool cfs_bandwidth_used(void);
- /*
- * MIGRATION
- *
- * dequeue
- * update_curr()
- * update_min_vruntime()
- * vruntime -= min_vruntime
- *
- * enqueue
- * update_curr()
- * update_min_vruntime()
- * vruntime += min_vruntime
- *
- * this way the vruntime transition between RQs is done when both
- * min_vruntime are up-to-date.
- *
- * WAKEUP (remote)
- *
- * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
- * vruntime -= min_vruntime
- *
- * enqueue
- * update_curr()
- * update_min_vruntime()
- * vruntime += min_vruntime
- *
- * this way we don't have the most up-to-date min_vruntime on the originating
- * CPU and an up-to-date min_vruntime on the destination CPU.
- */
- static void
- enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
- bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
- bool curr = cfs_rq->curr == se;
- /*
- * If we're the current task, we must renormalise before calling
- * update_curr().
- */
- if (renorm && curr)
- se->vruntime += cfs_rq->min_vruntime;
- update_curr(cfs_rq);
- /*
- * Otherwise, renormalise after, such that we're placed at the current
- * moment in time, instead of some random moment in the past. Being
- * placed in the past could significantly boost this task to the
- * fairness detriment of existing tasks.
- */
- if (renorm && !curr)
- se->vruntime += cfs_rq->min_vruntime;
- /*
- * When enqueuing a sched_entity, we must:
- * - Update loads to have both entity and cfs_rq synced with now.
- * - For group_entity, update its runnable_weight to reflect the new
- * h_nr_running of its group cfs_rq.
- * - For group_entity, update its weight to reflect the new share of
- * its group cfs_rq
- * - Add its new weight to cfs_rq->load.weight
- */
- update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
- se_update_runnable(se);
- update_cfs_group(se);
- account_entity_enqueue(cfs_rq, se);
- if (flags & ENQUEUE_WAKEUP)
- place_entity(cfs_rq, se, 0);
- /* Entity has migrated, no longer consider this task hot */
- if (flags & ENQUEUE_MIGRATED)
- se->exec_start = 0;
- check_schedstat_required();
- update_stats_enqueue_fair(cfs_rq, se, flags);
- check_spread(cfs_rq, se);
- if (!curr)
- __enqueue_entity(cfs_rq, se);
- se->on_rq = 1;
- if (cfs_rq->nr_running == 1) {
- check_enqueue_throttle(cfs_rq);
- if (!throttled_hierarchy(cfs_rq))
- list_add_leaf_cfs_rq(cfs_rq);
- }
- }
- static void __clear_buddies_last(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->last != se)
- break;
- cfs_rq->last = NULL;
- }
- }
- static void __clear_buddies_next(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->next != se)
- break;
- cfs_rq->next = NULL;
- }
- }
- static void __clear_buddies_skip(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->skip != se)
- break;
- cfs_rq->skip = NULL;
- }
- }
- static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- if (cfs_rq->last == se)
- __clear_buddies_last(se);
- if (cfs_rq->next == se)
- __clear_buddies_next(se);
- if (cfs_rq->skip == se)
- __clear_buddies_skip(se);
- }
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
- static void
- dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
- {
- int action = UPDATE_TG;
- if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
- action |= DO_DETACH;
- /*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
- /*
- * When dequeuing a sched_entity, we must:
- * - Update loads to have both entity and cfs_rq synced with now.
- * - For group_entity, update its runnable_weight to reflect the new
- * h_nr_running of its group cfs_rq.
- * - Subtract its previous weight from cfs_rq->load.weight.
- * - For group entity, update its weight to reflect the new share
- * of its group cfs_rq.
- */
- update_load_avg(cfs_rq, se, action);
- se_update_runnable(se);
- update_stats_dequeue_fair(cfs_rq, se, flags);
- clear_buddies(cfs_rq, se);
- if (se != cfs_rq->curr)
- __dequeue_entity(cfs_rq, se);
- se->on_rq = 0;
- account_entity_dequeue(cfs_rq, se);
- /*
- * Normalize after update_curr(); which will also have moved
- * min_vruntime if @se is the one holding it back. But before doing
- * update_min_vruntime() again, which will discount @se's position and
- * can move min_vruntime forward still more.
- */
- if (!(flags & DEQUEUE_SLEEP))
- se->vruntime -= cfs_rq->min_vruntime;
- /* return excess runtime on last dequeue */
- return_cfs_rq_runtime(cfs_rq);
- update_cfs_group(se);
- /*
- * Now advance min_vruntime if @se was the entity holding it back,
- * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
- * put back on, and if we advance min_vruntime, we'll be placed back
- * further than we started -- ie. we'll be penalized.
- */
- if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
- update_min_vruntime(cfs_rq);
- if (cfs_rq->nr_running == 0)
- update_idle_cfs_rq_clock_pelt(cfs_rq);
- }
- /*
- * Preempt the current task with a newly woken task if needed:
- */
- static void
- check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- {
- unsigned long ideal_runtime, delta_exec;
- struct sched_entity *se;
- s64 delta;
- bool skip_preempt = false;
- /*
- * When many tasks blow up the sched_period; it is possible that
- * sched_slice() reports unusually large results (when many tasks are
- * very light for example). Therefore impose a maximum.
- */
- ideal_runtime = min_t(u64, sched_slice(cfs_rq, curr), sysctl_sched_latency);
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- trace_android_rvh_check_preempt_tick(current, &ideal_runtime, &skip_preempt,
- delta_exec, cfs_rq, curr, sysctl_sched_min_granularity);
- if (skip_preempt)
- return;
- if (delta_exec > ideal_runtime) {
- resched_curr(rq_of(cfs_rq));
- /*
- * The current task ran long enough, ensure it doesn't get
- * re-elected due to buddy favours.
- */
- clear_buddies(cfs_rq, curr);
- return;
- }
- /*
- * Ensure that a task that missed wakeup preemption by a
- * narrow margin doesn't have to wait for a full slice.
- * This also mitigates buddy induced latencies under load.
- */
- if (delta_exec < sysctl_sched_min_granularity)
- return;
- se = __pick_first_entity(cfs_rq);
- delta = curr->vruntime - se->vruntime;
- if (delta < 0)
- return;
- if (delta > ideal_runtime)
- resched_curr(rq_of(cfs_rq));
- }
- void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
- {
- clear_buddies(cfs_rq, se);
- /* 'current' is not kept within the tree. */
- if (se->on_rq) {
- /*
- * Any task has to be enqueued before it get to execute on
- * a CPU. So account for the time it spent waiting on the
- * runqueue.
- */
- update_stats_wait_end_fair(cfs_rq, se);
- __dequeue_entity(cfs_rq, se);
- update_load_avg(cfs_rq, se, UPDATE_TG);
- }
- update_stats_curr_start(cfs_rq, se);
- cfs_rq->curr = se;
- /*
- * Track our maximum slice length, if the CPU's load is at
- * least twice that of our own weight (i.e. dont track it
- * when there are only lesser-weight tasks around):
- */
- if (schedstat_enabled() &&
- rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
- struct sched_statistics *stats;
- stats = __schedstats_from_se(se);
- __schedstat_set(stats->slice_max,
- max((u64)stats->slice_max,
- se->sum_exec_runtime - se->prev_sum_exec_runtime));
- }
- se->prev_sum_exec_runtime = se->sum_exec_runtime;
- }
- EXPORT_SYMBOL_GPL(set_next_entity);
- static int
- wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
- /*
- * Pick the next process, keeping these things in mind, in this order:
- * 1) keep things fair between processes/task groups
- * 2) pick the "next" process, since someone really wants that to run
- * 3) pick the "last" process, for cache locality
- * 4) do not run the "skip" process, if something else is available
- */
- static struct sched_entity *
- pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
- {
- struct sched_entity *left = __pick_first_entity(cfs_rq);
- struct sched_entity *se = NULL;
- trace_android_rvh_pick_next_entity(cfs_rq, curr, &se);
- if (se)
- goto done;
- /*
- * If curr is set we have to see if its left of the leftmost entity
- * still in the tree, provided there was anything in the tree at all.
- */
- if (!left || (curr && entity_before(curr, left)))
- left = curr;
- se = left; /* ideally we run the leftmost entity */
- /*
- * Avoid running the skip buddy, if running something else can
- * be done without getting too unfair.
- */
- if (cfs_rq->skip && cfs_rq->skip == se) {
- struct sched_entity *second;
- if (se == curr) {
- second = __pick_first_entity(cfs_rq);
- } else {
- second = __pick_next_entity(se);
- if (!second || (curr && entity_before(curr, second)))
- second = curr;
- }
- if (second && (!left || wakeup_preempt_entity(second, left) < 1))
- se = second;
- }
- if (cfs_rq->next && (!left || wakeup_preempt_entity(cfs_rq->next, left) < 1)) {
- /*
- * Someone really wants this to run. If it's not unfair, run it.
- */
- se = cfs_rq->next;
- } else if (cfs_rq->last && (!left || wakeup_preempt_entity(cfs_rq->last, left) < 1)) {
- /*
- * Prefer last buddy, try to return the CPU to a preempted task.
- */
- se = cfs_rq->last;
- }
- done:
- return se;
- }
- static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
- static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
- {
- /*
- * If still on the runqueue then deactivate_task()
- * was not called and update_curr() has to be done:
- */
- if (prev->on_rq)
- update_curr(cfs_rq);
- /* throttle cfs_rqs exceeding runtime */
- check_cfs_rq_runtime(cfs_rq);
- check_spread(cfs_rq, prev);
- if (prev->on_rq) {
- update_stats_wait_start_fair(cfs_rq, prev);
- /* Put 'current' back into the tree. */
- __enqueue_entity(cfs_rq, prev);
- /* in !on_rq case, update occurred at dequeue */
- update_load_avg(cfs_rq, prev, 0);
- }
- cfs_rq->curr = NULL;
- }
- static void
- entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
- {
- /*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
- /*
- * Ensure that runnable average is periodically updated.
- */
- update_load_avg(cfs_rq, curr, UPDATE_TG);
- update_cfs_group(curr);
- #ifdef CONFIG_SCHED_HRTICK
- /*
- * queued ticks are scheduled to match the slice, so don't bother
- * validating it and just reschedule.
- */
- if (queued) {
- resched_curr(rq_of(cfs_rq));
- return;
- }
- /*
- * don't let the period tick interfere with the hrtick preemption
- */
- if (!sched_feat(DOUBLE_TICK) &&
- hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
- return;
- #endif
- if (cfs_rq->nr_running > 1)
- check_preempt_tick(cfs_rq, curr);
- trace_android_rvh_entity_tick(cfs_rq, curr);
- }
- /**************************************************
- * CFS bandwidth control machinery
- */
- #ifdef CONFIG_CFS_BANDWIDTH
- #ifdef CONFIG_JUMP_LABEL
- static struct static_key __cfs_bandwidth_used;
- static inline bool cfs_bandwidth_used(void)
- {
- return static_key_false(&__cfs_bandwidth_used);
- }
- void cfs_bandwidth_usage_inc(void)
- {
- static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
- }
- void cfs_bandwidth_usage_dec(void)
- {
- static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
- }
- #else /* CONFIG_JUMP_LABEL */
- static bool cfs_bandwidth_used(void)
- {
- return true;
- }
- void cfs_bandwidth_usage_inc(void) {}
- void cfs_bandwidth_usage_dec(void) {}
- #endif /* CONFIG_JUMP_LABEL */
- /*
- * default period for cfs group bandwidth.
- * default: 0.1s, units: nanoseconds
- */
- static inline u64 default_cfs_period(void)
- {
- return 100000000ULL;
- }
- static inline u64 sched_cfs_bandwidth_slice(void)
- {
- return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
- }
- /*
- * Replenish runtime according to assigned quota. We use sched_clock_cpu
- * directly instead of rq->clock to avoid adding additional synchronization
- * around rq->lock.
- *
- * requires cfs_b->lock
- */
- void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
- {
- s64 runtime;
- if (unlikely(cfs_b->quota == RUNTIME_INF))
- return;
- cfs_b->runtime += cfs_b->quota;
- runtime = cfs_b->runtime_snap - cfs_b->runtime;
- if (runtime > 0) {
- cfs_b->burst_time += runtime;
- cfs_b->nr_burst++;
- }
- cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
- cfs_b->runtime_snap = cfs_b->runtime;
- }
- static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
- {
- return &tg->cfs_bandwidth;
- }
- /* returns 0 on failure to allocate runtime */
- static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
- struct cfs_rq *cfs_rq, u64 target_runtime)
- {
- u64 min_amount, amount = 0;
- lockdep_assert_held(&cfs_b->lock);
- /* note: this is a positive sum as runtime_remaining <= 0 */
- min_amount = target_runtime - cfs_rq->runtime_remaining;
- if (cfs_b->quota == RUNTIME_INF)
- amount = min_amount;
- else {
- start_cfs_bandwidth(cfs_b);
- if (cfs_b->runtime > 0) {
- amount = min(cfs_b->runtime, min_amount);
- cfs_b->runtime -= amount;
- cfs_b->idle = 0;
- }
- }
- cfs_rq->runtime_remaining += amount;
- return cfs_rq->runtime_remaining > 0;
- }
- /* returns 0 on failure to allocate runtime */
- static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- int ret;
- raw_spin_lock(&cfs_b->lock);
- ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
- raw_spin_unlock(&cfs_b->lock);
- return ret;
- }
- static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
- {
- /* dock delta_exec before expiring quota (as it could span periods) */
- cfs_rq->runtime_remaining -= delta_exec;
- if (likely(cfs_rq->runtime_remaining > 0))
- return;
- if (cfs_rq->throttled)
- return;
- /*
- * if we're unable to extend our runtime we resched so that the active
- * hierarchy can be throttled
- */
- if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
- resched_curr(rq_of(cfs_rq));
- }
- static __always_inline
- void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
- {
- if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
- return;
- __account_cfs_rq_runtime(cfs_rq, delta_exec);
- }
- static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
- {
- return cfs_bandwidth_used() && cfs_rq->throttled;
- }
- /* check whether cfs_rq, or any parent, is throttled */
- static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
- {
- return cfs_bandwidth_used() && cfs_rq->throttle_count;
- }
- /*
- * Ensure that neither of the group entities corresponding to src_cpu or
- * dest_cpu are members of a throttled hierarchy when performing group
- * load-balance operations.
- */
- static inline int throttled_lb_pair(struct task_group *tg,
- int src_cpu, int dest_cpu)
- {
- struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
- src_cfs_rq = tg->cfs_rq[src_cpu];
- dest_cfs_rq = tg->cfs_rq[dest_cpu];
- return throttled_hierarchy(src_cfs_rq) ||
- throttled_hierarchy(dest_cfs_rq);
- }
- static int tg_unthrottle_up(struct task_group *tg, void *data)
- {
- struct rq *rq = data;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
- cfs_rq->throttle_count--;
- if (!cfs_rq->throttle_count) {
- cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
- cfs_rq->throttled_clock_pelt;
- /* Add cfs_rq with load or one or more already running entities to the list */
- if (!cfs_rq_is_decayed(cfs_rq))
- list_add_leaf_cfs_rq(cfs_rq);
- }
- return 0;
- }
- static int tg_throttle_down(struct task_group *tg, void *data)
- {
- struct rq *rq = data;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
- /* group is entering throttled state, stop time */
- if (!cfs_rq->throttle_count) {
- cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
- list_del_leaf_cfs_rq(cfs_rq);
- }
- cfs_rq->throttle_count++;
- return 0;
- }
- static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
- {
- struct rq *rq = rq_of(cfs_rq);
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct sched_entity *se;
- long task_delta, idle_task_delta, dequeue = 1;
- raw_spin_lock(&cfs_b->lock);
- /* This will start the period timer if necessary */
- if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
- /*
- * We have raced with bandwidth becoming available, and if we
- * actually throttled the timer might not unthrottle us for an
- * entire period. We additionally needed to make sure that any
- * subsequent check_cfs_rq_runtime calls agree not to throttle
- * us, as we may commit to do cfs put_prev+pick_next, so we ask
- * for 1ns of runtime rather than just check cfs_b.
- */
- dequeue = 0;
- } else {
- list_add_tail_rcu(&cfs_rq->throttled_list,
- &cfs_b->throttled_cfs_rq);
- }
- raw_spin_unlock(&cfs_b->lock);
- if (!dequeue)
- return false; /* Throttle no longer required. */
- se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
- /* freeze hierarchy runnable averages while throttled */
- rcu_read_lock();
- walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
- rcu_read_unlock();
- task_delta = cfs_rq->h_nr_running;
- idle_task_delta = cfs_rq->idle_h_nr_running;
- for_each_sched_entity(se) {
- struct cfs_rq *qcfs_rq = cfs_rq_of(se);
- /* throttled entity or throttle-on-deactivate */
- if (!se->on_rq)
- goto done;
- dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
- if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
- qcfs_rq->h_nr_running -= task_delta;
- qcfs_rq->idle_h_nr_running -= idle_task_delta;
- if (qcfs_rq->load.weight) {
- /* Avoid re-evaluating load for this entity: */
- se = parent_entity(se);
- break;
- }
- }
- for_each_sched_entity(se) {
- struct cfs_rq *qcfs_rq = cfs_rq_of(se);
- /* throttled entity or throttle-on-deactivate */
- if (!se->on_rq)
- goto done;
- update_load_avg(qcfs_rq, se, 0);
- se_update_runnable(se);
- if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
- qcfs_rq->h_nr_running -= task_delta;
- qcfs_rq->idle_h_nr_running -= idle_task_delta;
- }
- /* At this point se is NULL and we are at root level*/
- sub_nr_running(rq, task_delta);
- done:
- /*
- * Note: distribution will already see us throttled via the
- * throttled-list. rq->lock protects completion.
- */
- cfs_rq->throttled = 1;
- cfs_rq->throttled_clock = rq_clock(rq);
- return true;
- }
- void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
- {
- struct rq *rq = rq_of(cfs_rq);
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct sched_entity *se;
- long task_delta, idle_task_delta;
- se = cfs_rq->tg->se[cpu_of(rq)];
- cfs_rq->throttled = 0;
- update_rq_clock(rq);
- raw_spin_lock(&cfs_b->lock);
- cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
- list_del_rcu(&cfs_rq->throttled_list);
- raw_spin_unlock(&cfs_b->lock);
- /* update hierarchical throttle state */
- walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
- if (!cfs_rq->load.weight) {
- if (!cfs_rq->on_list)
- return;
- /*
- * Nothing to run but something to decay (on_list)?
- * Complete the branch.
- */
- for_each_sched_entity(se) {
- if (list_add_leaf_cfs_rq(cfs_rq_of(se)))
- break;
- }
- goto unthrottle_throttle;
- }
- task_delta = cfs_rq->h_nr_running;
- idle_task_delta = cfs_rq->idle_h_nr_running;
- for_each_sched_entity(se) {
- struct cfs_rq *qcfs_rq = cfs_rq_of(se);
- if (se->on_rq)
- break;
- enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
- if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
- qcfs_rq->h_nr_running += task_delta;
- qcfs_rq->idle_h_nr_running += idle_task_delta;
- /* end evaluation on encountering a throttled cfs_rq */
- if (cfs_rq_throttled(qcfs_rq))
- goto unthrottle_throttle;
- }
- for_each_sched_entity(se) {
- struct cfs_rq *qcfs_rq = cfs_rq_of(se);
- update_load_avg(qcfs_rq, se, UPDATE_TG);
- se_update_runnable(se);
- if (cfs_rq_is_idle(group_cfs_rq(se)))
- idle_task_delta = cfs_rq->h_nr_running;
- qcfs_rq->h_nr_running += task_delta;
- qcfs_rq->idle_h_nr_running += idle_task_delta;
- /* end evaluation on encountering a throttled cfs_rq */
- if (cfs_rq_throttled(qcfs_rq))
- goto unthrottle_throttle;
- }
- /* At this point se is NULL and we are at root level*/
- add_nr_running(rq, task_delta);
- unthrottle_throttle:
- assert_list_leaf_cfs_rq(rq);
- /* Determine whether we need to wake up potentially idle CPU: */
- if (rq->curr == rq->idle && rq->cfs.nr_running)
- resched_curr(rq);
- }
- static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
- {
- struct cfs_rq *cfs_rq;
- u64 runtime, remaining = 1;
- rcu_read_lock();
- list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
- throttled_list) {
- struct rq *rq = rq_of(cfs_rq);
- struct rq_flags rf;
- rq_lock_irqsave(rq, &rf);
- if (!cfs_rq_throttled(cfs_rq))
- goto next;
- /* By the above check, this should never be true */
- SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
- raw_spin_lock(&cfs_b->lock);
- runtime = -cfs_rq->runtime_remaining + 1;
- if (runtime > cfs_b->runtime)
- runtime = cfs_b->runtime;
- cfs_b->runtime -= runtime;
- remaining = cfs_b->runtime;
- raw_spin_unlock(&cfs_b->lock);
- cfs_rq->runtime_remaining += runtime;
- /* we check whether we're throttled above */
- if (cfs_rq->runtime_remaining > 0)
- unthrottle_cfs_rq(cfs_rq);
- next:
- rq_unlock_irqrestore(rq, &rf);
- if (!remaining)
- break;
- }
- rcu_read_unlock();
- }
- /*
- * Responsible for refilling a task_group's bandwidth and unthrottling its
- * cfs_rqs as appropriate. If there has been no activity within the last
- * period the timer is deactivated until scheduling resumes; cfs_b->idle is
- * used to track this state.
- */
- static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
- {
- int throttled;
- /* no need to continue the timer with no bandwidth constraint */
- if (cfs_b->quota == RUNTIME_INF)
- goto out_deactivate;
- throttled = !list_empty(&cfs_b->throttled_cfs_rq);
- cfs_b->nr_periods += overrun;
- /* Refill extra burst quota even if cfs_b->idle */
- __refill_cfs_bandwidth_runtime(cfs_b);
- /*
- * idle depends on !throttled (for the case of a large deficit), and if
- * we're going inactive then everything else can be deferred
- */
- if (cfs_b->idle && !throttled)
- goto out_deactivate;
- if (!throttled) {
- /* mark as potentially idle for the upcoming period */
- cfs_b->idle = 1;
- return 0;
- }
- /* account preceding periods in which throttling occurred */
- cfs_b->nr_throttled += overrun;
- /*
- * This check is repeated as we release cfs_b->lock while we unthrottle.
- */
- while (throttled && cfs_b->runtime > 0) {
- raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- /* we can't nest cfs_b->lock while distributing bandwidth */
- distribute_cfs_runtime(cfs_b);
- raw_spin_lock_irqsave(&cfs_b->lock, flags);
- throttled = !list_empty(&cfs_b->throttled_cfs_rq);
- }
- /*
- * While we are ensured activity in the period following an
- * unthrottle, this also covers the case in which the new bandwidth is
- * insufficient to cover the existing bandwidth deficit. (Forcing the
- * timer to remain active while there are any throttled entities.)
- */
- cfs_b->idle = 0;
- return 0;
- out_deactivate:
- return 1;
- }
- /* a cfs_rq won't donate quota below this amount */
- static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
- /* minimum remaining period time to redistribute slack quota */
- static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
- /* how long we wait to gather additional slack before distributing */
- static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
- /*
- * Are we near the end of the current quota period?
- *
- * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
- * hrtimer base being cleared by hrtimer_start. In the case of
- * migrate_hrtimers, base is never cleared, so we are fine.
- */
- static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
- {
- struct hrtimer *refresh_timer = &cfs_b->period_timer;
- s64 remaining;
- /* if the call-back is running a quota refresh is already occurring */
- if (hrtimer_callback_running(refresh_timer))
- return 1;
- /* is a quota refresh about to occur? */
- remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
- if (remaining < (s64)min_expire)
- return 1;
- return 0;
- }
- static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
- {
- u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
- /* if there's a quota refresh soon don't bother with slack */
- if (runtime_refresh_within(cfs_b, min_left))
- return;
- /* don't push forwards an existing deferred unthrottle */
- if (cfs_b->slack_started)
- return;
- cfs_b->slack_started = true;
- hrtimer_start(&cfs_b->slack_timer,
- ns_to_ktime(cfs_bandwidth_slack_period),
- HRTIMER_MODE_REL);
- }
- /* we know any runtime found here is valid as update_curr() precedes return */
- static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
- if (slack_runtime <= 0)
- return;
- raw_spin_lock(&cfs_b->lock);
- if (cfs_b->quota != RUNTIME_INF) {
- cfs_b->runtime += slack_runtime;
- /* we are under rq->lock, defer unthrottling using a timer */
- if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
- !list_empty(&cfs_b->throttled_cfs_rq))
- start_cfs_slack_bandwidth(cfs_b);
- }
- raw_spin_unlock(&cfs_b->lock);
- /* even if it's not valid for return we don't want to try again */
- cfs_rq->runtime_remaining -= slack_runtime;
- }
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- if (!cfs_bandwidth_used())
- return;
- if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
- return;
- __return_cfs_rq_runtime(cfs_rq);
- }
- /*
- * This is done with a timer (instead of inline with bandwidth return) since
- * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
- */
- static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
- {
- u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
- unsigned long flags;
- /* confirm we're still not at a refresh boundary */
- raw_spin_lock_irqsave(&cfs_b->lock, flags);
- cfs_b->slack_started = false;
- if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
- raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- return;
- }
- if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
- runtime = cfs_b->runtime;
- raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- if (!runtime)
- return;
- distribute_cfs_runtime(cfs_b);
- }
- /*
- * When a group wakes up we want to make sure that its quota is not already
- * expired/exceeded, otherwise it may be allowed to steal additional ticks of
- * runtime as update_curr() throttling can not trigger until it's on-rq.
- */
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
- {
- if (!cfs_bandwidth_used())
- return;
- /* an active group must be handled by the update_curr()->put() path */
- if (!cfs_rq->runtime_enabled || cfs_rq->curr)
- return;
- /* ensure the group is not already throttled */
- if (cfs_rq_throttled(cfs_rq))
- return;
- /* update runtime allocation */
- account_cfs_rq_runtime(cfs_rq, 0);
- if (cfs_rq->runtime_remaining <= 0)
- throttle_cfs_rq(cfs_rq);
- }
- static void sync_throttle(struct task_group *tg, int cpu)
- {
- struct cfs_rq *pcfs_rq, *cfs_rq;
- if (!cfs_bandwidth_used())
- return;
- if (!tg->parent)
- return;
- cfs_rq = tg->cfs_rq[cpu];
- pcfs_rq = tg->parent->cfs_rq[cpu];
- cfs_rq->throttle_count = pcfs_rq->throttle_count;
- cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
- }
- /* conditionally throttle active cfs_rq's from put_prev_entity() */
- static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- if (!cfs_bandwidth_used())
- return false;
- if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
- return false;
- /*
- * it's possible for a throttled entity to be forced into a running
- * state (e.g. set_curr_task), in this case we're finished.
- */
- if (cfs_rq_throttled(cfs_rq))
- return true;
- return throttle_cfs_rq(cfs_rq);
- }
- static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
- {
- struct cfs_bandwidth *cfs_b =
- container_of(timer, struct cfs_bandwidth, slack_timer);
- do_sched_cfs_slack_timer(cfs_b);
- return HRTIMER_NORESTART;
- }
- extern const u64 max_cfs_quota_period;
- static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
- {
- struct cfs_bandwidth *cfs_b =
- container_of(timer, struct cfs_bandwidth, period_timer);
- unsigned long flags;
- int overrun;
- int idle = 0;
- int count = 0;
- raw_spin_lock_irqsave(&cfs_b->lock, flags);
- for (;;) {
- overrun = hrtimer_forward_now(timer, cfs_b->period);
- if (!overrun)
- break;
- idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
- if (++count > 3) {
- u64 new, old = ktime_to_ns(cfs_b->period);
- /*
- * Grow period by a factor of 2 to avoid losing precision.
- * Precision loss in the quota/period ratio can cause __cfs_schedulable
- * to fail.
- */
- new = old * 2;
- if (new < max_cfs_quota_period) {
- cfs_b->period = ns_to_ktime(new);
- cfs_b->quota *= 2;
- cfs_b->burst *= 2;
- pr_warn_ratelimited(
- "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
- smp_processor_id(),
- div_u64(new, NSEC_PER_USEC),
- div_u64(cfs_b->quota, NSEC_PER_USEC));
- } else {
- pr_warn_ratelimited(
- "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
- smp_processor_id(),
- div_u64(old, NSEC_PER_USEC),
- div_u64(cfs_b->quota, NSEC_PER_USEC));
- }
- /* reset count so we don't come right back in here */
- count = 0;
- }
- }
- if (idle)
- cfs_b->period_active = 0;
- raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
- return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
- }
- void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
- {
- raw_spin_lock_init(&cfs_b->lock);
- cfs_b->runtime = 0;
- cfs_b->quota = RUNTIME_INF;
- cfs_b->period = ns_to_ktime(default_cfs_period());
- cfs_b->burst = 0;
- INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
- hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- cfs_b->period_timer.function = sched_cfs_period_timer;
- hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- cfs_b->slack_timer.function = sched_cfs_slack_timer;
- cfs_b->slack_started = false;
- }
- static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
- {
- cfs_rq->runtime_enabled = 0;
- INIT_LIST_HEAD(&cfs_rq->throttled_list);
- }
- void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
- {
- lockdep_assert_held(&cfs_b->lock);
- if (cfs_b->period_active)
- return;
- cfs_b->period_active = 1;
- hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
- hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
- }
- static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
- {
- /* init_cfs_bandwidth() was not called */
- if (!cfs_b->throttled_cfs_rq.next)
- return;
- hrtimer_cancel(&cfs_b->period_timer);
- hrtimer_cancel(&cfs_b->slack_timer);
- }
- /*
- * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
- *
- * The race is harmless, since modifying bandwidth settings of unhooked group
- * bits doesn't do much.
- */
- /* cpu online callback */
- static void __maybe_unused update_runtime_enabled(struct rq *rq)
- {
- struct task_group *tg;
- lockdep_assert_rq_held(rq);
- rcu_read_lock();
- list_for_each_entry_rcu(tg, &task_groups, list) {
- struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
- raw_spin_lock(&cfs_b->lock);
- cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
- raw_spin_unlock(&cfs_b->lock);
- }
- rcu_read_unlock();
- }
- /* cpu offline callback */
- static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
- {
- struct task_group *tg;
- lockdep_assert_rq_held(rq);
- rcu_read_lock();
- list_for_each_entry_rcu(tg, &task_groups, list) {
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
- if (!cfs_rq->runtime_enabled)
- continue;
- /*
- * clock_task is not advancing so we just need to make sure
- * there's some valid quota amount
- */
- cfs_rq->runtime_remaining = 1;
- /*
- * Offline rq is schedulable till CPU is completely disabled
- * in take_cpu_down(), so we prevent new cfs throttling here.
- */
- cfs_rq->runtime_enabled = 0;
- if (cfs_rq_throttled(cfs_rq))
- unthrottle_cfs_rq(cfs_rq);
- }
- rcu_read_unlock();
- }
- #else /* CONFIG_CFS_BANDWIDTH */
- static inline bool cfs_bandwidth_used(void)
- {
- return false;
- }
- static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
- static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
- static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
- static inline void sync_throttle(struct task_group *tg, int cpu) {}
- static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
- static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
- {
- return 0;
- }
- static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
- {
- return 0;
- }
- static inline int throttled_lb_pair(struct task_group *tg,
- int src_cpu, int dest_cpu)
- {
- return 0;
- }
- void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
- #endif
- static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
- {
- return NULL;
- }
- static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
- static inline void update_runtime_enabled(struct rq *rq) {}
- static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
- #endif /* CONFIG_CFS_BANDWIDTH */
- /**************************************************
- * CFS operations on tasks:
- */
- #ifdef CONFIG_SCHED_HRTICK
- static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- SCHED_WARN_ON(task_rq(p) != rq);
- if (rq->cfs.h_nr_running > 1) {
- u64 slice = sched_slice(cfs_rq, se);
- u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
- s64 delta = slice - ran;
- if (delta < 0) {
- if (task_current(rq, p))
- resched_curr(rq);
- return;
- }
- hrtick_start(rq, delta);
- }
- }
- /*
- * called from enqueue/dequeue and updates the hrtick when the
- * current task is from our class and nr_running is low enough
- * to matter.
- */
- static void hrtick_update(struct rq *rq)
- {
- struct task_struct *curr = rq->curr;
- if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
- return;
- if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
- hrtick_start_fair(rq, curr);
- }
- #else /* !CONFIG_SCHED_HRTICK */
- static inline void
- hrtick_start_fair(struct rq *rq, struct task_struct *p)
- {
- }
- static inline void hrtick_update(struct rq *rq)
- {
- }
- #endif
- #ifdef CONFIG_SMP
- static inline bool cpu_overutilized(int cpu)
- {
- unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
- unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
- int overutilized = -1;
- trace_android_rvh_cpu_overutilized(cpu, &overutilized);
- if (overutilized != -1)
- return overutilized;
- /* Return true only if the utilization doesn't fit CPU's capacity */
- return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
- }
- static inline void update_overutilized_status(struct rq *rq)
- {
- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
- WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
- }
- }
- #else
- static inline void update_overutilized_status(struct rq *rq) { }
- #endif
- /* Runqueue only has SCHED_IDLE tasks enqueued */
- static int sched_idle_rq(struct rq *rq)
- {
- return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
- rq->nr_running);
- }
- /*
- * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use
- * of idle_nr_running, which does not consider idle descendants of normal
- * entities.
- */
- static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq)
- {
- return cfs_rq->nr_running &&
- cfs_rq->nr_running == cfs_rq->idle_nr_running;
- }
- #ifdef CONFIG_SMP
- static int sched_idle_cpu(int cpu)
- {
- return sched_idle_rq(cpu_rq(cpu));
- }
- #endif
- /*
- * The enqueue_task method is called before nr_running is
- * increased. Here we update the fair scheduling stats and
- * then put the task into the rbtree:
- */
- static void
- enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- int idle_h_nr_running = task_has_idle_policy(p);
- int task_new = !(flags & ENQUEUE_WAKEUP);
- int should_iowait_boost;
- /*
- * The code below (indirectly) updates schedutil which looks at
- * the cfs_rq utilization to select a frequency.
- * Let's add the task's estimated utilization to the cfs_rq's
- * estimated utilization, before we update schedutil.
- */
- util_est_enqueue(&rq->cfs, p);
- /*
- * If in_iowait is set, the code below may not trigger any cpufreq
- * utilization updates, so do it here explicitly with the IOWAIT flag
- * passed.
- */
- should_iowait_boost = p->in_iowait;
- trace_android_rvh_set_iowait(p, rq, &should_iowait_boost);
- if (should_iowait_boost)
- cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
- for_each_sched_entity(se) {
- if (se->on_rq)
- break;
- cfs_rq = cfs_rq_of(se);
- enqueue_entity(cfs_rq, se, flags);
- cfs_rq->h_nr_running++;
- cfs_rq->idle_h_nr_running += idle_h_nr_running;
- if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
- /* end evaluation on encountering a throttled cfs_rq */
- if (cfs_rq_throttled(cfs_rq))
- goto enqueue_throttle;
- flags = ENQUEUE_WAKEUP;
- }
- trace_android_rvh_enqueue_task_fair(rq, p, flags);
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- update_load_avg(cfs_rq, se, UPDATE_TG);
- se_update_runnable(se);
- update_cfs_group(se);
- cfs_rq->h_nr_running++;
- cfs_rq->idle_h_nr_running += idle_h_nr_running;
- if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
- /* end evaluation on encountering a throttled cfs_rq */
- if (cfs_rq_throttled(cfs_rq))
- goto enqueue_throttle;
- }
- /* At this point se is NULL and we are at root level*/
- add_nr_running(rq, 1);
- /*
- * Since new tasks are assigned an initial util_avg equal to
- * half of the spare capacity of their CPU, tiny tasks have the
- * ability to cross the overutilized threshold, which will
- * result in the load balancer ruining all the task placement
- * done by EAS. As a way to mitigate that effect, do not account
- * for the first enqueue operation of new tasks during the
- * overutilized flag detection.
- *
- * A better way of solving this problem would be to wait for
- * the PELT signals of tasks to converge before taking them
- * into account, but that is not straightforward to implement,
- * and the following generally works well enough in practice.
- */
- if (!task_new)
- update_overutilized_status(rq);
- enqueue_throttle:
- assert_list_leaf_cfs_rq(rq);
- hrtick_update(rq);
- }
- static void set_next_buddy(struct sched_entity *se);
- /*
- * The dequeue_task method is called before nr_running is
- * decreased. We remove the task from the rbtree and
- * update the fair scheduling stats:
- */
- static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se;
- int task_sleep = flags & DEQUEUE_SLEEP;
- int idle_h_nr_running = task_has_idle_policy(p);
- bool was_sched_idle = sched_idle_rq(rq);
- util_est_dequeue(&rq->cfs, p);
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- dequeue_entity(cfs_rq, se, flags);
- cfs_rq->h_nr_running--;
- cfs_rq->idle_h_nr_running -= idle_h_nr_running;
- if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
- /* end evaluation on encountering a throttled cfs_rq */
- if (cfs_rq_throttled(cfs_rq))
- goto dequeue_throttle;
- /* Don't dequeue parent if it has other entities besides us */
- if (cfs_rq->load.weight) {
- /* Avoid re-evaluating load for this entity: */
- se = parent_entity(se);
- /*
- * Bias pick_next to pick a task from this cfs_rq, as
- * p is sleeping when it is within its sched_slice.
- */
- if (task_sleep && se && !throttled_hierarchy(cfs_rq))
- set_next_buddy(se);
- break;
- }
- flags |= DEQUEUE_SLEEP;
- }
- trace_android_rvh_dequeue_task_fair(rq, p, flags);
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- update_load_avg(cfs_rq, se, UPDATE_TG);
- se_update_runnable(se);
- update_cfs_group(se);
- cfs_rq->h_nr_running--;
- cfs_rq->idle_h_nr_running -= idle_h_nr_running;
- if (cfs_rq_is_idle(cfs_rq))
- idle_h_nr_running = 1;
- /* end evaluation on encountering a throttled cfs_rq */
- if (cfs_rq_throttled(cfs_rq))
- goto dequeue_throttle;
- }
- /* At this point se is NULL and we are at root level*/
- sub_nr_running(rq, 1);
- /* balance early to pull high priority tasks */
- if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
- rq->next_balance = jiffies;
- dequeue_throttle:
- util_est_update(&rq->cfs, p, task_sleep);
- hrtick_update(rq);
- }
- #ifdef CONFIG_SMP
- /* Working cpumask for: load_balance, load_balance_newidle. */
- static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
- static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
- #ifdef CONFIG_NO_HZ_COMMON
- static struct {
- cpumask_var_t idle_cpus_mask;
- atomic_t nr_cpus;
- int has_blocked; /* Idle CPUS has blocked load */
- int needs_update; /* Newly idle CPUs need their next_balance collated */
- unsigned long next_balance; /* in jiffy units */
- unsigned long next_blocked; /* Next update of blocked load in jiffies */
- } nohz ____cacheline_aligned;
- #endif /* CONFIG_NO_HZ_COMMON */
- static unsigned long cpu_load(struct rq *rq)
- {
- return cfs_rq_load_avg(&rq->cfs);
- }
- /*
- * cpu_load_without - compute CPU load without any contributions from *p
- * @cpu: the CPU which load is requested
- * @p: the task which load should be discounted
- *
- * The load of a CPU is defined by the load of tasks currently enqueued on that
- * CPU as well as tasks which are currently sleeping after an execution on that
- * CPU.
- *
- * This method returns the load of the specified CPU by discounting the load of
- * the specified task, whenever the task is currently contributing to the CPU
- * load.
- */
- static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
- {
- struct cfs_rq *cfs_rq;
- unsigned int load;
- /* Task has no contribution or is new */
- if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
- return cpu_load(rq);
- cfs_rq = &rq->cfs;
- load = READ_ONCE(cfs_rq->avg.load_avg);
- /* Discount task's util from CPU's util */
- lsub_positive(&load, task_h_load(p));
- return load;
- }
- static unsigned long cpu_runnable(struct rq *rq)
- {
- return cfs_rq_runnable_avg(&rq->cfs);
- }
- static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p)
- {
- struct cfs_rq *cfs_rq;
- unsigned int runnable;
- /* Task has no contribution or is new */
- if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
- return cpu_runnable(rq);
- cfs_rq = &rq->cfs;
- runnable = READ_ONCE(cfs_rq->avg.runnable_avg);
- /* Discount task's runnable from CPU's runnable */
- lsub_positive(&runnable, p->se.avg.runnable_avg);
- return runnable;
- }
- static unsigned long capacity_of(int cpu)
- {
- return cpu_rq(cpu)->cpu_capacity;
- }
- static void record_wakee(struct task_struct *p)
- {
- /*
- * Only decay a single time; tasks that have less then 1 wakeup per
- * jiffy will not have built up many flips.
- */
- if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
- current->wakee_flips >>= 1;
- current->wakee_flip_decay_ts = jiffies;
- }
- if (current->last_wakee != p) {
- current->last_wakee = p;
- current->wakee_flips++;
- }
- }
- /*
- * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
- *
- * A waker of many should wake a different task than the one last awakened
- * at a frequency roughly N times higher than one of its wakees.
- *
- * In order to determine whether we should let the load spread vs consolidating
- * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
- * partner, and a factor of lls_size higher frequency in the other.
- *
- * With both conditions met, we can be relatively sure that the relationship is
- * non-monogamous, with partner count exceeding socket size.
- *
- * Waker/wakee being client/server, worker/dispatcher, interrupt source or
- * whatever is irrelevant, spread criteria is apparent partner count exceeds
- * socket size.
- */
- static int wake_wide(struct task_struct *p)
- {
- unsigned int master = current->wakee_flips;
- unsigned int slave = p->wakee_flips;
- int factor = __this_cpu_read(sd_llc_size);
- if (master < slave)
- swap(master, slave);
- if (slave < factor || master < slave * factor)
- return 0;
- return 1;
- }
- /*
- * The purpose of wake_affine() is to quickly determine on which CPU we can run
- * soonest. For the purpose of speed we only consider the waking and previous
- * CPU.
- *
- * wake_affine_idle() - only considers 'now', it check if the waking CPU is
- * cache-affine and is (or will be) idle.
- *
- * wake_affine_weight() - considers the weight to reflect the average
- * scheduling latency of the CPUs. This seems to work
- * for the overloaded case.
- */
- static int
- wake_affine_idle(int this_cpu, int prev_cpu, int sync)
- {
- /*
- * If this_cpu is idle, it implies the wakeup is from interrupt
- * context. Only allow the move if cache is shared. Otherwise an
- * interrupt intensive workload could force all tasks onto one
- * node depending on the IO topology or IRQ affinity settings.
- *
- * If the prev_cpu is idle and cache affine then avoid a migration.
- * There is no guarantee that the cache hot data from an interrupt
- * is more important than cache hot data on the prev_cpu and from
- * a cpufreq perspective, it's better to have higher utilisation
- * on one CPU.
- */
- if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
- return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
- if (sync && cpu_rq(this_cpu)->nr_running == 1)
- return this_cpu;
- if (available_idle_cpu(prev_cpu))
- return prev_cpu;
- return nr_cpumask_bits;
- }
- static int
- wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
- int this_cpu, int prev_cpu, int sync)
- {
- s64 this_eff_load, prev_eff_load;
- unsigned long task_load;
- this_eff_load = cpu_load(cpu_rq(this_cpu));
- if (sync) {
- unsigned long current_load = task_h_load(current);
- if (current_load > this_eff_load)
- return this_cpu;
- this_eff_load -= current_load;
- }
- task_load = task_h_load(p);
- this_eff_load += task_load;
- if (sched_feat(WA_BIAS))
- this_eff_load *= 100;
- this_eff_load *= capacity_of(prev_cpu);
- prev_eff_load = cpu_load(cpu_rq(prev_cpu));
- prev_eff_load -= task_load;
- if (sched_feat(WA_BIAS))
- prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= capacity_of(this_cpu);
- /*
- * If sync, adjust the weight of prev_eff_load such that if
- * prev_eff == this_eff that select_idle_sibling() will consider
- * stacking the wakee on top of the waker if no other CPU is
- * idle.
- */
- if (sync)
- prev_eff_load += 1;
- return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
- }
- static int wake_affine(struct sched_domain *sd, struct task_struct *p,
- int this_cpu, int prev_cpu, int sync)
- {
- int target = nr_cpumask_bits;
- if (sched_feat(WA_IDLE))
- target = wake_affine_idle(this_cpu, prev_cpu, sync);
- if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
- target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
- schedstat_inc(p->stats.nr_wakeups_affine_attempts);
- if (target != this_cpu)
- return prev_cpu;
- schedstat_inc(sd->ttwu_move_affine);
- schedstat_inc(p->stats.nr_wakeups_affine);
- return target;
- }
- static struct sched_group *
- find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu);
- /*
- * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
- */
- static int
- find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
- {
- unsigned long load, min_load = ULONG_MAX;
- unsigned int min_exit_latency = UINT_MAX;
- u64 latest_idle_timestamp = 0;
- int least_loaded_cpu = this_cpu;
- int shallowest_idle_cpu = -1;
- int i;
- /* Check if we have any choice: */
- if (group->group_weight == 1)
- return cpumask_first(sched_group_span(group));
- /* Traverse only the allowed CPUs */
- for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
- struct rq *rq = cpu_rq(i);
- if (!sched_core_cookie_match(rq, p))
- continue;
- if (sched_idle_cpu(i))
- return i;
- if (available_idle_cpu(i)) {
- struct cpuidle_state *idle = idle_get_state(rq);
- if (idle && idle->exit_latency < min_exit_latency) {
- /*
- * We give priority to a CPU whose idle state
- * has the smallest exit latency irrespective
- * of any idle timestamp.
- */
- min_exit_latency = idle->exit_latency;
- latest_idle_timestamp = rq->idle_stamp;
- shallowest_idle_cpu = i;
- } else if ((!idle || idle->exit_latency == min_exit_latency) &&
- rq->idle_stamp > latest_idle_timestamp) {
- /*
- * If equal or no active idle state, then
- * the most recently idled CPU might have
- * a warmer cache.
- */
- latest_idle_timestamp = rq->idle_stamp;
- shallowest_idle_cpu = i;
- }
- } else if (shallowest_idle_cpu == -1) {
- load = cpu_load(cpu_rq(i));
- if (load < min_load) {
- min_load = load;
- least_loaded_cpu = i;
- }
- }
- }
- return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
- }
- static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
- int cpu, int prev_cpu, int sd_flag)
- {
- int new_cpu = cpu;
- if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
- return prev_cpu;
- /*
- * We need task's util for cpu_util_without, sync it up to
- * prev_cpu's last_update_time.
- */
- if (!(sd_flag & SD_BALANCE_FORK))
- sync_entity_load_avg(&p->se);
- while (sd) {
- struct sched_group *group;
- struct sched_domain *tmp;
- int weight;
- if (!(sd->flags & sd_flag)) {
- sd = sd->child;
- continue;
- }
- group = find_idlest_group(sd, p, cpu);
- if (!group) {
- sd = sd->child;
- continue;
- }
- new_cpu = find_idlest_group_cpu(group, p, cpu);
- if (new_cpu == cpu) {
- /* Now try balancing at a lower domain level of 'cpu': */
- sd = sd->child;
- continue;
- }
- /* Now try balancing at a lower domain level of 'new_cpu': */
- cpu = new_cpu;
- weight = sd->span_weight;
- sd = NULL;
- for_each_domain(cpu, tmp) {
- if (weight <= tmp->span_weight)
- break;
- if (tmp->flags & sd_flag)
- sd = tmp;
- }
- }
- return new_cpu;
- }
- static inline int __select_idle_cpu(int cpu, struct task_struct *p)
- {
- if ((available_idle_cpu(cpu) || sched_idle_cpu(cpu)) &&
- sched_cpu_cookie_match(cpu_rq(cpu), p))
- return cpu;
- return -1;
- }
- #ifdef CONFIG_SCHED_SMT
- DEFINE_STATIC_KEY_FALSE(sched_smt_present);
- EXPORT_SYMBOL_GPL(sched_smt_present);
- static inline void set_idle_cores(int cpu, int val)
- {
- struct sched_domain_shared *sds;
- sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
- if (sds)
- WRITE_ONCE(sds->has_idle_cores, val);
- }
- static inline bool test_idle_cores(int cpu)
- {
- struct sched_domain_shared *sds;
- sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
- if (sds)
- return READ_ONCE(sds->has_idle_cores);
- return false;
- }
- /*
- * Scans the local SMT mask to see if the entire core is idle, and records this
- * information in sd_llc_shared->has_idle_cores.
- *
- * Since SMT siblings share all cache levels, inspecting this limited remote
- * state should be fairly cheap.
- */
- void __update_idle_core(struct rq *rq)
- {
- int core = cpu_of(rq);
- int cpu;
- rcu_read_lock();
- if (test_idle_cores(core))
- goto unlock;
- for_each_cpu(cpu, cpu_smt_mask(core)) {
- if (cpu == core)
- continue;
- if (!available_idle_cpu(cpu))
- goto unlock;
- }
- set_idle_cores(core, 1);
- unlock:
- rcu_read_unlock();
- }
- /*
- * Scan the entire LLC domain for idle cores; this dynamically switches off if
- * there are no idle cores left in the system; tracked through
- * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
- */
- static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
- {
- bool idle = true;
- int cpu;
- for_each_cpu(cpu, cpu_smt_mask(core)) {
- if (!available_idle_cpu(cpu)) {
- idle = false;
- if (*idle_cpu == -1) {
- if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
- *idle_cpu = cpu;
- break;
- }
- continue;
- }
- break;
- }
- if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
- *idle_cpu = cpu;
- }
- if (idle)
- return core;
- cpumask_andnot(cpus, cpus, cpu_smt_mask(core));
- return -1;
- }
- /*
- * Scan the local SMT mask for idle CPUs.
- */
- static int select_idle_smt(struct task_struct *p, int target)
- {
- int cpu;
- for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
- if (cpu == target)
- continue;
- if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
- return cpu;
- }
- return -1;
- }
- #else /* CONFIG_SCHED_SMT */
- static inline void set_idle_cores(int cpu, int val)
- {
- }
- static inline bool test_idle_cores(int cpu)
- {
- return false;
- }
- static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
- {
- return __select_idle_cpu(core, p);
- }
- static inline int select_idle_smt(struct task_struct *p, int target)
- {
- return -1;
- }
- #endif /* CONFIG_SCHED_SMT */
- /*
- * Scan the LLC domain for idle CPUs; this is dynamically regulated by
- * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
- * average idle time for this rq (as found in rq->avg_idle).
- */
- static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
- {
- struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
- int i, cpu, idle_cpu = -1, nr = INT_MAX;
- struct sched_domain_shared *sd_share;
- struct rq *this_rq = this_rq();
- int this = smp_processor_id();
- struct sched_domain *this_sd = NULL;
- u64 time = 0;
- cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
- if (sched_feat(SIS_PROP) && !has_idle_core) {
- u64 avg_cost, avg_idle, span_avg;
- unsigned long now = jiffies;
- this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
- if (!this_sd)
- return -1;
- /*
- * If we're busy, the assumption that the last idle period
- * predicts the future is flawed; age away the remaining
- * predicted idle time.
- */
- if (unlikely(this_rq->wake_stamp < now)) {
- while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
- this_rq->wake_stamp++;
- this_rq->wake_avg_idle >>= 1;
- }
- }
- avg_idle = this_rq->wake_avg_idle;
- avg_cost = this_sd->avg_scan_cost + 1;
- span_avg = sd->span_weight * avg_idle;
- if (span_avg > 4*avg_cost)
- nr = div_u64(span_avg, avg_cost);
- else
- nr = 4;
- time = cpu_clock(this);
- }
- if (sched_feat(SIS_UTIL)) {
- sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
- if (sd_share) {
- /* because !--nr is the condition to stop scan */
- nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
- /* overloaded LLC is unlikely to have idle cpu/core */
- if (nr == 1)
- return -1;
- }
- }
- for_each_cpu_wrap(cpu, cpus, target + 1) {
- if (has_idle_core) {
- i = select_idle_core(p, cpu, cpus, &idle_cpu);
- if ((unsigned int)i < nr_cpumask_bits)
- return i;
- } else {
- if (!--nr)
- return -1;
- idle_cpu = __select_idle_cpu(cpu, p);
- if ((unsigned int)idle_cpu < nr_cpumask_bits)
- break;
- }
- }
- if (has_idle_core)
- set_idle_cores(target, false);
- if (sched_feat(SIS_PROP) && this_sd && !has_idle_core) {
- time = cpu_clock(this) - time;
- /*
- * Account for the scan cost of wakeups against the average
- * idle time.
- */
- this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
- update_avg(&this_sd->avg_scan_cost, time);
- }
- return idle_cpu;
- }
- /*
- * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
- * the task fits. If no CPU is big enough, but there are idle ones, try to
- * maximize capacity.
- */
- static int
- select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
- {
- unsigned long task_util, util_min, util_max, best_cap = 0;
- int fits, best_fits = 0;
- int cpu, best_cpu = -1;
- struct cpumask *cpus;
- cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
- cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
- task_util = task_util_est(p);
- util_min = uclamp_eff_value(p, UCLAMP_MIN);
- util_max = uclamp_eff_value(p, UCLAMP_MAX);
- for_each_cpu_wrap(cpu, cpus, target) {
- unsigned long cpu_cap = capacity_of(cpu);
- if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
- continue;
- fits = util_fits_cpu(task_util, util_min, util_max, cpu);
- /* This CPU fits with all requirements */
- if (fits > 0)
- return cpu;
- /*
- * Only the min performance hint (i.e. uclamp_min) doesn't fit.
- * Look for the CPU with best capacity.
- */
- else if (fits < 0)
- cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
- /*
- * First, select CPU which fits better (-1 being better than 0).
- * Then, select the one with best capacity at same level.
- */
- if ((fits < best_fits) ||
- ((fits == best_fits) && (cpu_cap > best_cap))) {
- best_cap = cpu_cap;
- best_cpu = cpu;
- best_fits = fits;
- }
- }
- return best_cpu;
- }
- static inline bool asym_fits_cpu(unsigned long util,
- unsigned long util_min,
- unsigned long util_max,
- int cpu)
- {
- if (sched_asym_cpucap_active())
- /*
- * Return true only if the cpu fully fits the task requirements
- * which include the utilization and the performance hints.
- */
- return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
- return true;
- }
- /*
- * Try and locate an idle core/thread in the LLC cache domain.
- */
- static int select_idle_sibling(struct task_struct *p, int prev, int target)
- {
- bool has_idle_core = false;
- struct sched_domain *sd;
- unsigned long task_util, util_min, util_max;
- int i, recent_used_cpu;
- /*
- * On asymmetric system, update task utilization because we will check
- * that the task fits with cpu's capacity.
- */
- if (sched_asym_cpucap_active()) {
- sync_entity_load_avg(&p->se);
- task_util = task_util_est(p);
- util_min = uclamp_eff_value(p, UCLAMP_MIN);
- util_max = uclamp_eff_value(p, UCLAMP_MAX);
- }
- /*
- * per-cpu select_rq_mask usage
- */
- lockdep_assert_irqs_disabled();
- if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
- asym_fits_cpu(task_util, util_min, util_max, target))
- return target;
- /*
- * If the previous CPU is cache affine and idle, don't be stupid:
- */
- if (prev != target && cpus_share_cache(prev, target) &&
- (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
- asym_fits_cpu(task_util, util_min, util_max, prev))
- return prev;
- /*
- * Allow a per-cpu kthread to stack with the wakee if the
- * kworker thread and the tasks previous CPUs are the same.
- * The assumption is that the wakee queued work for the
- * per-cpu kthread that is now complete and the wakeup is
- * essentially a sync wakeup. An obvious example of this
- * pattern is IO completions.
- */
- if (is_per_cpu_kthread(current) &&
- in_task() &&
- prev == smp_processor_id() &&
- this_rq()->nr_running <= 1 &&
- asym_fits_cpu(task_util, util_min, util_max, prev)) {
- return prev;
- }
- /* Check a recently used CPU as a potential idle candidate: */
- recent_used_cpu = p->recent_used_cpu;
- p->recent_used_cpu = prev;
- if (recent_used_cpu != prev &&
- recent_used_cpu != target &&
- cpus_share_cache(recent_used_cpu, target) &&
- (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
- cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
- asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
- return recent_used_cpu;
- }
- /*
- * For asymmetric CPU capacity systems, our domain of interest is
- * sd_asym_cpucapacity rather than sd_llc.
- */
- if (sched_asym_cpucap_active()) {
- sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
- /*
- * On an asymmetric CPU capacity system where an exclusive
- * cpuset defines a symmetric island (i.e. one unique
- * capacity_orig value through the cpuset), the key will be set
- * but the CPUs within that cpuset will not have a domain with
- * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
- * capacity path.
- */
- if (sd) {
- i = select_idle_capacity(p, sd, target);
- return ((unsigned)i < nr_cpumask_bits) ? i : target;
- }
- }
- sd = rcu_dereference(per_cpu(sd_llc, target));
- if (!sd)
- return target;
- if (sched_smt_active()) {
- has_idle_core = test_idle_cores(target);
- if (!has_idle_core && cpus_share_cache(prev, target)) {
- i = select_idle_smt(p, prev);
- if ((unsigned int)i < nr_cpumask_bits)
- return i;
- }
- }
- i = select_idle_cpu(p, sd, has_idle_core, target);
- if ((unsigned)i < nr_cpumask_bits)
- return i;
- return target;
- }
- /*
- * Predicts what cpu_util(@cpu) would return if @p was removed from @cpu
- * (@dst_cpu = -1) or migrated to @dst_cpu.
- */
- static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
- {
- struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
- unsigned long util = READ_ONCE(cfs_rq->avg.util_avg);
- /*
- * If @dst_cpu is -1 or @p migrates from @cpu to @dst_cpu remove its
- * contribution. If @p migrates from another CPU to @cpu add its
- * contribution. In all the other cases @cpu is not impacted by the
- * migration so its util_avg is already correct.
- */
- if (task_cpu(p) == cpu && dst_cpu != cpu)
- lsub_positive(&util, task_util(p));
- else if (task_cpu(p) != cpu && dst_cpu == cpu)
- util += task_util(p);
- if (sched_feat(UTIL_EST)) {
- unsigned long util_est;
- util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
- /*
- * During wake-up @p isn't enqueued yet and doesn't contribute
- * to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
- * If @dst_cpu == @cpu add it to "simulate" cpu_util after @p
- * has been enqueued.
- *
- * During exec (@dst_cpu = -1) @p is enqueued and does
- * contribute to cpu_rq(cpu)->cfs.util_est.enqueued.
- * Remove it to "simulate" cpu_util without @p's contribution.
- *
- * Despite the task_on_rq_queued(@p) check there is still a
- * small window for a possible race when an exec
- * select_task_rq_fair() races with LB's detach_task().
- *
- * detach_task()
- * deactivate_task()
- * p->on_rq = TASK_ON_RQ_MIGRATING;
- * -------------------------------- A
- * dequeue_task() \
- * dequeue_task_fair() + Race Time
- * util_est_dequeue() /
- * -------------------------------- B
- *
- * The additional check "current == p" is required to further
- * reduce the race window.
- */
- if (dst_cpu == cpu)
- util_est += _task_util_est(p);
- else if (unlikely(task_on_rq_queued(p) || current == p))
- lsub_positive(&util_est, _task_util_est(p));
- util = max(util, util_est);
- }
- return min(util, capacity_orig_of(cpu));
- }
- /*
- * cpu_util_without: compute cpu utilization without any contributions from *p
- * @cpu: the CPU which utilization is requested
- * @p: the task which utilization should be discounted
- *
- * The utilization of a CPU is defined by the utilization of tasks currently
- * enqueued on that CPU as well as tasks which are currently sleeping after an
- * execution on that CPU.
- *
- * This method returns the utilization of the specified CPU by discounting the
- * utilization of the specified task, whenever the task is currently
- * contributing to the CPU utilization.
- */
- static unsigned long cpu_util_without(int cpu, struct task_struct *p)
- {
- /* Task has no contribution or is new */
- if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
- return cpu_util_cfs(cpu);
- return cpu_util_next(cpu, p, -1);
- }
- /*
- * energy_env - Utilization landscape for energy estimation.
- * @task_busy_time: Utilization contribution by the task for which we test the
- * placement. Given by eenv_task_busy_time().
- * @pd_busy_time: Utilization of the whole perf domain without the task
- * contribution. Given by eenv_pd_busy_time().
- * @cpu_cap: Maximum CPU capacity for the perf domain.
- * @pd_cap: Entire perf domain capacity. (pd->nr_cpus * cpu_cap).
- */
- struct energy_env {
- unsigned long task_busy_time;
- unsigned long pd_busy_time;
- unsigned long cpu_cap;
- unsigned long pd_cap;
- };
- /*
- * Compute the task busy time for compute_energy(). This time cannot be
- * injected directly into effective_cpu_util() because of the IRQ scaling.
- * The latter only makes sense with the most recent CPUs where the task has
- * run.
- */
- static inline void eenv_task_busy_time(struct energy_env *eenv,
- struct task_struct *p, int prev_cpu)
- {
- unsigned long busy_time, max_cap = arch_scale_cpu_capacity(prev_cpu);
- unsigned long irq = cpu_util_irq(cpu_rq(prev_cpu));
- if (unlikely(irq >= max_cap))
- busy_time = max_cap;
- else
- busy_time = scale_irq_capacity(task_util_est(p), irq, max_cap);
- eenv->task_busy_time = busy_time;
- }
- /*
- * Compute the perf_domain (PD) busy time for compute_energy(). Based on the
- * utilization for each @pd_cpus, it however doesn't take into account
- * clamping since the ratio (utilization / cpu_capacity) is already enough to
- * scale the EM reported power consumption at the (eventually clamped)
- * cpu_capacity.
- *
- * The contribution of the task @p for which we want to estimate the
- * energy cost is removed (by cpu_util_next()) and must be calculated
- * separately (see eenv_task_busy_time). This ensures:
- *
- * - A stable PD utilization, no matter which CPU of that PD we want to place
- * the task on.
- *
- * - A fair comparison between CPUs as the task contribution (task_util())
- * will always be the same no matter which CPU utilization we rely on
- * (util_avg or util_est).
- *
- * Set @eenv busy time for the PD that spans @pd_cpus. This busy time can't
- * exceed @eenv->pd_cap.
- */
- static inline void eenv_pd_busy_time(struct energy_env *eenv,
- struct cpumask *pd_cpus,
- struct task_struct *p)
- {
- unsigned long busy_time = 0;
- int cpu;
- for_each_cpu(cpu, pd_cpus) {
- unsigned long util = cpu_util_next(cpu, p, -1);
- busy_time += effective_cpu_util(cpu, util, ENERGY_UTIL, NULL);
- }
- eenv->pd_busy_time = min(eenv->pd_cap, busy_time);
- }
- /*
- * Compute the maximum utilization for compute_energy() when the task @p
- * is placed on the cpu @dst_cpu.
- *
- * Returns the maximum utilization among @eenv->cpus. This utilization can't
- * exceed @eenv->cpu_cap.
- */
- static inline unsigned long
- eenv_pd_max_util(struct energy_env *eenv, struct cpumask *pd_cpus,
- struct task_struct *p, int dst_cpu)
- {
- unsigned long max_util = 0;
- int cpu;
- for_each_cpu(cpu, pd_cpus) {
- struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL;
- unsigned long util = cpu_util_next(cpu, p, dst_cpu);
- unsigned long cpu_util;
- /*
- * Performance domain frequency: utilization clamping
- * must be considered since it affects the selection
- * of the performance domain frequency.
- * NOTE: in case RT tasks are running, by default the
- * FREQUENCY_UTIL's utilization can be max OPP.
- */
- cpu_util = effective_cpu_util(cpu, util, FREQUENCY_UTIL, tsk);
- max_util = max(max_util, cpu_util);
- }
- return min(max_util, eenv->cpu_cap);
- }
- /*
- * compute_energy(): Use the Energy Model to estimate the energy that @pd would
- * consume for a given utilization landscape @eenv. When @dst_cpu < 0, the task
- * contribution is ignored.
- */
- static inline unsigned long
- compute_energy(struct energy_env *eenv, struct perf_domain *pd,
- struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu)
- {
- unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu);
- unsigned long busy_time = eenv->pd_busy_time;
- if (dst_cpu >= 0)
- busy_time = min(eenv->pd_cap, busy_time + eenv->task_busy_time);
- return em_cpu_energy(pd->em_pd, max_util, busy_time, eenv->cpu_cap);
- }
- /*
- * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
- * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
- * spare capacity in each performance domain and uses it as a potential
- * candidate to execute the task. Then, it uses the Energy Model to figure
- * out which of the CPU candidates is the most energy-efficient.
- *
- * The rationale for this heuristic is as follows. In a performance domain,
- * all the most energy efficient CPU candidates (according to the Energy
- * Model) are those for which we'll request a low frequency. When there are
- * several CPUs for which the frequency request will be the same, we don't
- * have enough data to break the tie between them, because the Energy Model
- * only includes active power costs. With this model, if we assume that
- * frequency requests follow utilization (e.g. using schedutil), the CPU with
- * the maximum spare capacity in a performance domain is guaranteed to be among
- * the best candidates of the performance domain.
- *
- * In practice, it could be preferable from an energy standpoint to pack
- * small tasks on a CPU in order to let other CPUs go in deeper idle states,
- * but that could also hurt our chances to go cluster idle, and we have no
- * ways to tell with the current Energy Model if this is actually a good
- * idea or not. So, find_energy_efficient_cpu() basically favors
- * cluster-packing, and spreading inside a cluster. That should at least be
- * a good thing for latency, and this is consistent with the idea that most
- * of the energy savings of EAS come from the asymmetry of the system, and
- * not so much from breaking the tie between identical CPUs. That's also the
- * reason why EAS is enabled in the topology code only for systems where
- * SD_ASYM_CPUCAPACITY is set.
- *
- * NOTE: Forkees are not accepted in the energy-aware wake-up path because
- * they don't have any useful utilization data yet and it's not possible to
- * forecast their impact on energy consumption. Consequently, they will be
- * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
- * to be energy-inefficient in some use-cases. The alternative would be to
- * bias new tasks towards specific types of CPUs first, or to try to infer
- * their util_avg from the parent task, but those heuristics could hurt
- * other use-cases too. So, until someone finds a better way to solve this,
- * let's keep things simple by re-using the existing slow path.
- */
- static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
- {
- struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
- unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
- unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
- unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
- struct root_domain *rd = this_rq()->rd;
- int cpu, best_energy_cpu, target = -1;
- int prev_fits = -1, best_fits = -1;
- unsigned long best_thermal_cap = 0;
- unsigned long prev_thermal_cap = 0;
- struct sched_domain *sd;
- struct perf_domain *pd;
- struct energy_env eenv;
- int new_cpu = INT_MAX;
- trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu);
- if (new_cpu != INT_MAX)
- return new_cpu;
- sync_entity_load_avg(&p->se);
- rcu_read_lock();
- pd = rcu_dereference(rd->pd);
- if (!pd || READ_ONCE(rd->overutilized))
- goto unlock;
- cpu = smp_processor_id();
- if (sync && cpu_rq(cpu)->nr_running == 1 &&
- cpumask_test_cpu(cpu, p->cpus_ptr) &&
- task_fits_cpu(p, cpu)) {
- rcu_read_unlock();
- return cpu;
- }
- /*
- * Energy-aware wake-up happens on the lowest sched_domain starting
- * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
- */
- sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
- while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
- sd = sd->parent;
- if (!sd)
- goto unlock;
- target = prev_cpu;
- if (!task_util_est(p) && p_util_min == 0)
- goto unlock;
- eenv_task_busy_time(&eenv, p, prev_cpu);
- for (; pd; pd = pd->next) {
- unsigned long util_min = p_util_min, util_max = p_util_max;
- unsigned long cpu_cap, cpu_thermal_cap, util;
- long prev_spare_cap = -1, max_spare_cap = -1;
- unsigned long rq_util_min, rq_util_max;
- unsigned long cur_delta, base_energy;
- int max_spare_cap_cpu = -1;
- int fits, max_fits = -1;
- cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
- if (cpumask_empty(cpus))
- continue;
- /* Account thermal pressure for the energy estimation */
- cpu = cpumask_first(cpus);
- cpu_thermal_cap = arch_scale_cpu_capacity(cpu);
- cpu_thermal_cap -= arch_scale_thermal_pressure(cpu);
- eenv.cpu_cap = cpu_thermal_cap;
- eenv.pd_cap = 0;
- for_each_cpu(cpu, cpus) {
- struct rq *rq = cpu_rq(cpu);
- eenv.pd_cap += cpu_thermal_cap;
- if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
- continue;
- if (!cpumask_test_cpu(cpu, p->cpus_ptr))
- continue;
- util = cpu_util_next(cpu, p, cpu);
- cpu_cap = capacity_of(cpu);
- /*
- * Skip CPUs that cannot satisfy the capacity request.
- * IOW, placing the task there would make the CPU
- * overutilized. Take uclamp into account to see how
- * much capacity we can get out of the CPU; this is
- * aligned with sched_cpu_util().
- */
- if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
- /*
- * Open code uclamp_rq_util_with() except for
- * the clamp() part. Ie: apply max aggregation
- * only. util_fits_cpu() logic requires to
- * operate on non clamped util but must use the
- * max-aggregated uclamp_{min, max}.
- */
- rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
- rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
- util_min = max(rq_util_min, p_util_min);
- util_max = max(rq_util_max, p_util_max);
- }
- fits = util_fits_cpu(util, util_min, util_max, cpu);
- if (!fits)
- continue;
- lsub_positive(&cpu_cap, util);
- if (cpu == prev_cpu) {
- /* Always use prev_cpu as a candidate. */
- prev_spare_cap = cpu_cap;
- prev_fits = fits;
- } else if ((fits > max_fits) ||
- ((fits == max_fits) && ((long)cpu_cap > max_spare_cap))) {
- /*
- * Find the CPU with the maximum spare capacity
- * among the remaining CPUs in the performance
- * domain.
- */
- max_spare_cap = cpu_cap;
- max_spare_cap_cpu = cpu;
- max_fits = fits;
- }
- }
- if (max_spare_cap_cpu < 0 && prev_spare_cap < 0)
- continue;
- eenv_pd_busy_time(&eenv, cpus, p);
- /* Compute the 'base' energy of the pd, without @p */
- base_energy = compute_energy(&eenv, pd, cpus, p, -1);
- /* Evaluate the energy impact of using prev_cpu. */
- if (prev_spare_cap > -1) {
- prev_delta = compute_energy(&eenv, pd, cpus, p,
- prev_cpu);
- /* CPU utilization has changed */
- if (prev_delta < base_energy)
- goto unlock;
- prev_delta -= base_energy;
- prev_thermal_cap = cpu_thermal_cap;
- best_delta = min(best_delta, prev_delta);
- }
- /* Evaluate the energy impact of using max_spare_cap_cpu. */
- if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
- /* Current best energy cpu fits better */
- if (max_fits < best_fits)
- continue;
- /*
- * Both don't fit performance hint (i.e. uclamp_min)
- * but best energy cpu has better capacity.
- */
- if ((max_fits < 0) &&
- (cpu_thermal_cap <= best_thermal_cap))
- continue;
- cur_delta = compute_energy(&eenv, pd, cpus, p,
- max_spare_cap_cpu);
- /* CPU utilization has changed */
- if (cur_delta < base_energy)
- goto unlock;
- cur_delta -= base_energy;
- /*
- * Both fit for the task but best energy cpu has lower
- * energy impact.
- */
- if ((max_fits > 0) && (best_fits > 0) &&
- (cur_delta >= best_delta))
- continue;
- best_delta = cur_delta;
- best_energy_cpu = max_spare_cap_cpu;
- best_fits = max_fits;
- best_thermal_cap = cpu_thermal_cap;
- }
- }
- rcu_read_unlock();
- if ((best_fits > prev_fits) ||
- ((best_fits > 0) && (best_delta < prev_delta)) ||
- ((best_fits < 0) && (best_thermal_cap > prev_thermal_cap)))
- target = best_energy_cpu;
- return target;
- unlock:
- rcu_read_unlock();
- return target;
- }
- /*
- * select_task_rq_fair: Select target runqueue for the waking task in domains
- * that have the relevant SD flag set. In practice, this is SD_BALANCE_WAKE,
- * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
- *
- * Balances load by selecting the idlest CPU in the idlest group, or under
- * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
- *
- * Returns the target CPU number.
- */
- static int
- select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
- {
- int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
- struct sched_domain *tmp, *sd = NULL;
- int cpu = smp_processor_id();
- int new_cpu = prev_cpu;
- int want_affine = 0;
- int target_cpu = -1;
- /* SD_flags and WF_flags share the first nibble */
- int sd_flag = wake_flags & 0xF;
- if (trace_android_rvh_select_task_rq_fair_enabled() &&
- !(sd_flag & SD_BALANCE_FORK))
- sync_entity_load_avg(&p->se);
- trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag,
- wake_flags, &target_cpu);
- if (target_cpu >= 0)
- return target_cpu;
- /*
- * required for stable ->cpus_allowed
- */
- lockdep_assert_held(&p->pi_lock);
- if (wake_flags & WF_TTWU) {
- record_wakee(p);
- if (sched_energy_enabled()) {
- new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
- if (new_cpu >= 0)
- return new_cpu;
- new_cpu = prev_cpu;
- }
- want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr);
- }
- rcu_read_lock();
- for_each_domain(cpu, tmp) {
- /*
- * If both 'cpu' and 'prev_cpu' are part of this domain,
- * cpu is a valid SD_WAKE_AFFINE target.
- */
- if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
- cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
- if (cpu != prev_cpu)
- new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
- sd = NULL; /* Prefer wake_affine over balance flags */
- break;
- }
- /*
- * Usually only true for WF_EXEC and WF_FORK, as sched_domains
- * usually do not have SD_BALANCE_WAKE set. That means wakeup
- * will usually go to the fast path.
- */
- if (tmp->flags & sd_flag)
- sd = tmp;
- else if (!want_affine)
- break;
- }
- if (unlikely(sd)) {
- /* Slow path */
- new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
- } else if (wake_flags & WF_TTWU) { /* XXX always ? */
- /* Fast path */
- new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
- }
- rcu_read_unlock();
- return new_cpu;
- }
- /*
- * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
- * cfs_rq_of(p) references at time of call are still valid and identify the
- * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
- */
- static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
- {
- struct sched_entity *se = &p->se;
- /*
- * As blocked tasks retain absolute vruntime the migration needs to
- * deal with this by subtracting the old and adding the new
- * min_vruntime -- the latter is done by enqueue_entity() when placing
- * the task on the new runqueue.
- */
- if (READ_ONCE(p->__state) == TASK_WAKING) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- se->vruntime -= u64_u32_load(cfs_rq->min_vruntime);
- }
- if (!task_on_rq_migrating(p)) {
- remove_entity_load_avg(se);
- /*
- * Here, the task's PELT values have been updated according to
- * the current rq's clock. But if that clock hasn't been
- * updated in a while, a substantial idle time will be missed,
- * leading to an inflation after wake-up on the new rq.
- *
- * Estimate the missing time from the cfs_rq last_update_time
- * and update sched_avg to improve the PELT continuity after
- * migration.
- */
- migrate_se_pelt_lag(se);
- }
- /* Tell new CPU we are migrated */
- se->avg.last_update_time = 0;
- update_scan_period(p, new_cpu);
- }
- static void task_dead_fair(struct task_struct *p)
- {
- remove_entity_load_avg(&p->se);
- }
- static int
- balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
- {
- if (rq->nr_running)
- return 1;
- return newidle_balance(rq, rf) != 0;
- }
- #endif /* CONFIG_SMP */
- static unsigned long wakeup_gran(struct sched_entity *se)
- {
- unsigned long gran = sysctl_sched_wakeup_granularity;
- /*
- * Since its curr running now, convert the gran from real-time
- * to virtual-time in his units.
- *
- * By using 'se' instead of 'curr' we penalize light tasks, so
- * they get preempted easier. That is, if 'se' < 'curr' then
- * the resulting gran will be larger, therefore penalizing the
- * lighter, if otoh 'se' > 'curr' then the resulting gran will
- * be smaller, again penalizing the lighter task.
- *
- * This is especially important for buddies when the leftmost
- * task is higher priority than the buddy.
- */
- return calc_delta_fair(gran, se);
- }
- /*
- * Should 'se' preempt 'curr'.
- *
- * |s1
- * |s2
- * |s3
- * g
- * |<--->|c
- *
- * w(c, s1) = -1
- * w(c, s2) = 0
- * w(c, s3) = 1
- *
- */
- static int
- wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
- {
- s64 gran, vdiff = curr->vruntime - se->vruntime;
- if (vdiff <= 0)
- return -1;
- gran = wakeup_gran(se);
- if (vdiff > gran)
- return 1;
- return 0;
- }
- static void set_last_buddy(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- if (SCHED_WARN_ON(!se->on_rq))
- return;
- if (se_is_idle(se))
- return;
- cfs_rq_of(se)->last = se;
- }
- }
- static void set_next_buddy(struct sched_entity *se)
- {
- for_each_sched_entity(se) {
- if (SCHED_WARN_ON(!se->on_rq))
- return;
- if (se_is_idle(se))
- return;
- cfs_rq_of(se)->next = se;
- }
- }
- static void set_skip_buddy(struct sched_entity *se)
- {
- for_each_sched_entity(se)
- cfs_rq_of(se)->skip = se;
- }
- /*
- * Preempt the current task with a newly woken task if needed:
- */
- static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
- {
- struct task_struct *curr = rq->curr;
- struct sched_entity *se = &curr->se, *pse = &p->se;
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- int scale = cfs_rq->nr_running >= sched_nr_latency;
- int next_buddy_marked = 0;
- int cse_is_idle, pse_is_idle;
- bool ignore = false;
- bool preempt = false;
- if (unlikely(se == pse))
- return;
- trace_android_rvh_check_preempt_wakeup_ignore(curr, &ignore);
- if (ignore)
- return;
- /*
- * This is possible from callers such as attach_tasks(), in which we
- * unconditionally check_preempt_curr() after an enqueue (which may have
- * lead to a throttle). This both saves work and prevents false
- * next-buddy nomination below.
- */
- if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
- return;
- if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
- set_next_buddy(pse);
- next_buddy_marked = 1;
- }
- /*
- * We can come here with TIF_NEED_RESCHED already set from new task
- * wake up path.
- *
- * Note: this also catches the edge-case of curr being in a throttled
- * group (e.g. via set_curr_task), since update_curr() (in the
- * enqueue of curr) will have resulted in resched being set. This
- * prevents us from potentially nominating it as a false LAST_BUDDY
- * below.
- */
- if (test_tsk_need_resched(curr))
- return;
- /* Idle tasks are by definition preempted by non-idle tasks. */
- if (unlikely(task_has_idle_policy(curr)) &&
- likely(!task_has_idle_policy(p)))
- goto preempt;
- /*
- * Batch and idle tasks do not preempt non-idle tasks (their preemption
- * is driven by the tick):
- */
- if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
- return;
- find_matching_se(&se, &pse);
- WARN_ON_ONCE(!pse);
- cse_is_idle = se_is_idle(se);
- pse_is_idle = se_is_idle(pse);
- /*
- * Preempt an idle group in favor of a non-idle group (and don't preempt
- * in the inverse case).
- */
- if (cse_is_idle && !pse_is_idle)
- goto preempt;
- if (cse_is_idle != pse_is_idle)
- return;
- update_curr(cfs_rq_of(se));
- trace_android_rvh_check_preempt_wakeup(rq, p, &preempt, &ignore,
- wake_flags, se, pse, next_buddy_marked, sysctl_sched_wakeup_granularity);
- if (preempt)
- goto preempt;
- if (ignore)
- return;
- if (wakeup_preempt_entity(se, pse) == 1) {
- /*
- * Bias pick_next to pick the sched entity that is
- * triggering this preemption.
- */
- if (!next_buddy_marked)
- set_next_buddy(pse);
- goto preempt;
- }
- return;
- preempt:
- resched_curr(rq);
- /*
- * Only set the backward buddy when the current task is still
- * on the rq. This can happen when a wakeup gets interleaved
- * with schedule on the ->pre_schedule() or idle_balance()
- * point, either of which can * drop the rq lock.
- *
- * Also, during early boot the idle thread is in the fair class,
- * for obvious reasons its a bad idea to schedule back to it.
- */
- if (unlikely(!se->on_rq || curr == rq->idle))
- return;
- if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
- set_last_buddy(se);
- }
- #ifdef CONFIG_SMP
- static struct task_struct *pick_task_fair(struct rq *rq)
- {
- struct sched_entity *se;
- struct cfs_rq *cfs_rq;
- again:
- cfs_rq = &rq->cfs;
- if (!cfs_rq->nr_running)
- return NULL;
- do {
- struct sched_entity *curr = cfs_rq->curr;
- /* When we pick for a remote RQ, we'll not have done put_prev_entity() */
- if (curr) {
- if (curr->on_rq)
- update_curr(cfs_rq);
- else
- curr = NULL;
- if (unlikely(check_cfs_rq_runtime(cfs_rq)))
- goto again;
- }
- se = pick_next_entity(cfs_rq, curr);
- if (unlikely(!se))
- goto again;
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
- return task_of(se);
- }
- #endif
- struct task_struct *
- pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
- {
- struct cfs_rq *cfs_rq = &rq->cfs;
- struct sched_entity *se = NULL;
- struct task_struct *p = NULL;
- int new_tasks;
- bool repick = false;
- again:
- if (!sched_fair_runnable(rq))
- goto idle;
- #ifdef CONFIG_FAIR_GROUP_SCHED
- if (!prev || prev->sched_class != &fair_sched_class)
- goto simple;
- /*
- * Because of the set_next_buddy() in dequeue_task_fair() it is rather
- * likely that a next task is from the same cgroup as the current.
- *
- * Therefore attempt to avoid putting and setting the entire cgroup
- * hierarchy, only change the part that actually changes.
- */
- do {
- struct sched_entity *curr = cfs_rq->curr;
- /*
- * Since we got here without doing put_prev_entity() we also
- * have to consider cfs_rq->curr. If it is still a runnable
- * entity, update_curr() will update its vruntime, otherwise
- * forget we've ever seen it.
- */
- if (curr) {
- if (curr->on_rq)
- update_curr(cfs_rq);
- else
- curr = NULL;
- /*
- * This call to check_cfs_rq_runtime() will do the
- * throttle and dequeue its entity in the parent(s).
- * Therefore the nr_running test will indeed
- * be correct.
- */
- if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
- cfs_rq = &rq->cfs;
- if (!cfs_rq->nr_running)
- goto idle;
- goto simple;
- }
- }
- se = pick_next_entity(cfs_rq, curr);
- if (unlikely(!se)) {
- cfs_rq = &rq->cfs;
- goto again;
- }
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
- p = task_of(se);
- trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, false, prev);
- /*
- * Since we haven't yet done put_prev_entity and if the selected task
- * is a different task than we started out with, try and touch the
- * least amount of cfs_rqs.
- */
- if (prev != p) {
- struct sched_entity *pse = &prev->se;
- while (!(cfs_rq = is_same_group(se, pse))) {
- int se_depth = se->depth;
- int pse_depth = pse->depth;
- if (se_depth <= pse_depth) {
- put_prev_entity(cfs_rq_of(pse), pse);
- pse = parent_entity(pse);
- }
- if (se_depth >= pse_depth) {
- set_next_entity(cfs_rq_of(se), se);
- se = parent_entity(se);
- }
- }
- put_prev_entity(cfs_rq, pse);
- set_next_entity(cfs_rq, se);
- }
- goto done;
- simple:
- #endif
- if (prev)
- put_prev_task(rq, prev);
- trace_android_rvh_replace_next_task_fair(rq, &p, &se, &repick, true, prev);
- if (repick)
- goto done;
- do {
- se = pick_next_entity(cfs_rq, NULL);
- if (unlikely(!se)) {
- cfs_rq = &rq->cfs;
- goto again;
- }
- set_next_entity(cfs_rq, se);
- cfs_rq = group_cfs_rq(se);
- } while (cfs_rq);
- p = task_of(se);
- done: __maybe_unused;
- #ifdef CONFIG_SMP
- /*
- * Move the next running task to the front of
- * the list, so our cfs_tasks list becomes MRU
- * one.
- */
- list_move(&p->se.group_node, &rq->cfs_tasks);
- #endif
- if (hrtick_enabled_fair(rq))
- hrtick_start_fair(rq, p);
- update_misfit_status(p, rq);
- return p;
- idle:
- if (!rf)
- return NULL;
- new_tasks = newidle_balance(rq, rf);
- /*
- * Because newidle_balance() releases (and re-acquires) rq->lock, it is
- * possible for any higher priority task to appear. In that case we
- * must re-start the pick_next_entity() loop.
- */
- if (new_tasks < 0)
- return RETRY_TASK;
- if (new_tasks > 0)
- goto again;
- /*
- * rq is about to be idle, check if we need to update the
- * lost_idle_time of clock_pelt
- */
- update_idle_rq_clock_pelt(rq);
- return NULL;
- }
- static struct task_struct *__pick_next_task_fair(struct rq *rq)
- {
- return pick_next_task_fair(rq, NULL, NULL);
- }
- /*
- * Account for a descheduled task:
- */
- static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
- {
- struct sched_entity *se = &prev->se;
- struct cfs_rq *cfs_rq;
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- put_prev_entity(cfs_rq, se);
- }
- }
- /*
- * sched_yield() is very simple
- *
- * The magic of dealing with the ->skip buddy is in pick_next_entity.
- */
- static void yield_task_fair(struct rq *rq)
- {
- struct task_struct *curr = rq->curr;
- struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- struct sched_entity *se = &curr->se;
- /*
- * Are we the only task in the tree?
- */
- if (unlikely(rq->nr_running == 1))
- return;
- clear_buddies(cfs_rq, se);
- if (curr->policy != SCHED_BATCH) {
- update_rq_clock(rq);
- /*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
- /*
- * Tell update_rq_clock() that we've just updated,
- * so we don't do microscopic update in schedule()
- * and double the fastpath cost.
- */
- rq_clock_skip_update(rq);
- }
- set_skip_buddy(se);
- }
- static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- /* throttled hierarchies are not runnable */
- if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
- return false;
- /* Tell the scheduler that we'd really like pse to run next. */
- set_next_buddy(se);
- yield_task_fair(rq);
- return true;
- }
- #ifdef CONFIG_SMP
- /**************************************************
- * Fair scheduling class load-balancing methods.
- *
- * BASICS
- *
- * The purpose of load-balancing is to achieve the same basic fairness the
- * per-CPU scheduler provides, namely provide a proportional amount of compute
- * time to each task. This is expressed in the following equation:
- *
- * W_i,n/P_i == W_j,n/P_j for all i,j (1)
- *
- * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
- * W_i,0 is defined as:
- *
- * W_i,0 = \Sum_j w_i,j (2)
- *
- * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
- * is derived from the nice value as per sched_prio_to_weight[].
- *
- * The weight average is an exponential decay average of the instantaneous
- * weight:
- *
- * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
- *
- * C_i is the compute capacity of CPU i, typically it is the
- * fraction of 'recent' time available for SCHED_OTHER task execution. But it
- * can also include other factors [XXX].
- *
- * To achieve this balance we define a measure of imbalance which follows
- * directly from (1):
- *
- * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
- *
- * We them move tasks around to minimize the imbalance. In the continuous
- * function space it is obvious this converges, in the discrete case we get
- * a few fun cases generally called infeasible weight scenarios.
- *
- * [XXX expand on:
- * - infeasible weights;
- * - local vs global optima in the discrete case. ]
- *
- *
- * SCHED DOMAINS
- *
- * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
- * for all i,j solution, we create a tree of CPUs that follows the hardware
- * topology where each level pairs two lower groups (or better). This results
- * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
- * tree to only the first of the previous level and we decrease the frequency
- * of load-balance at each level inv. proportional to the number of CPUs in
- * the groups.
- *
- * This yields:
- *
- * log_2 n 1 n
- * \Sum { --- * --- * 2^i } = O(n) (5)
- * i = 0 2^i 2^i
- * `- size of each group
- * | | `- number of CPUs doing load-balance
- * | `- freq
- * `- sum over all levels
- *
- * Coupled with a limit on how many tasks we can migrate every balance pass,
- * this makes (5) the runtime complexity of the balancer.
- *
- * An important property here is that each CPU is still (indirectly) connected
- * to every other CPU in at most O(log n) steps:
- *
- * The adjacency matrix of the resulting graph is given by:
- *
- * log_2 n
- * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
- * k = 0
- *
- * And you'll find that:
- *
- * A^(log_2 n)_i,j != 0 for all i,j (7)
- *
- * Showing there's indeed a path between every CPU in at most O(log n) steps.
- * The task movement gives a factor of O(m), giving a convergence complexity
- * of:
- *
- * O(nm log n), n := nr_cpus, m := nr_tasks (8)
- *
- *
- * WORK CONSERVING
- *
- * In order to avoid CPUs going idle while there's still work to do, new idle
- * balancing is more aggressive and has the newly idle CPU iterate up the domain
- * tree itself instead of relying on other CPUs to bring it work.
- *
- * This adds some complexity to both (5) and (8) but it reduces the total idle
- * time.
- *
- * [XXX more?]
- *
- *
- * CGROUPS
- *
- * Cgroups make a horror show out of (2), instead of a simple sum we get:
- *
- * s_k,i
- * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
- * S_k
- *
- * Where
- *
- * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
- *
- * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
- *
- * The big problem is S_k, its a global sum needed to compute a local (W_i)
- * property.
- *
- * [XXX write more on how we solve this.. _after_ merging pjt's patches that
- * rewrite all of this once again.]
- */
- unsigned long __read_mostly max_load_balance_interval = HZ/10;
- EXPORT_SYMBOL_GPL(max_load_balance_interval);
- enum fbq_type { regular, remote, all };
- /*
- * 'group_type' describes the group of CPUs at the moment of load balancing.
- *
- * The enum is ordered by pulling priority, with the group with lowest priority
- * first so the group_type can simply be compared when selecting the busiest
- * group. See update_sd_pick_busiest().
- */
- enum group_type {
- /* The group has spare capacity that can be used to run more tasks. */
- group_has_spare = 0,
- /*
- * The group is fully used and the tasks don't compete for more CPU
- * cycles. Nevertheless, some tasks might wait before running.
- */
- group_fully_busy,
- /*
- * One task doesn't fit with CPU's capacity and must be migrated to a
- * more powerful CPU.
- */
- group_misfit_task,
- /*
- * SD_ASYM_PACKING only: One local CPU with higher capacity is available,
- * and the task should be migrated to it instead of running on the
- * current CPU.
- */
- group_asym_packing,
- /*
- * The tasks' affinity constraints previously prevented the scheduler
- * from balancing the load across the system.
- */
- group_imbalanced,
- /*
- * The CPU is overloaded and can't provide expected CPU cycles to all
- * tasks.
- */
- group_overloaded
- };
- enum migration_type {
- migrate_load = 0,
- migrate_util,
- migrate_task,
- migrate_misfit
- };
- #define LBF_ALL_PINNED 0x01
- #define LBF_NEED_BREAK 0x02
- #define LBF_DST_PINNED 0x04
- #define LBF_SOME_PINNED 0x08
- #define LBF_ACTIVE_LB 0x10
- struct lb_env {
- struct sched_domain *sd;
- struct rq *src_rq;
- int src_cpu;
- int dst_cpu;
- struct rq *dst_rq;
- struct cpumask *dst_grpmask;
- int new_dst_cpu;
- enum cpu_idle_type idle;
- long imbalance;
- /* The set of CPUs under consideration for load-balancing */
- struct cpumask *cpus;
- unsigned int flags;
- unsigned int loop;
- unsigned int loop_break;
- unsigned int loop_max;
- enum fbq_type fbq_type;
- enum migration_type migration_type;
- struct list_head tasks;
- struct rq_flags *src_rq_rf;
- };
- /*
- * Is this task likely cache-hot:
- */
- static int task_hot(struct task_struct *p, struct lb_env *env)
- {
- s64 delta;
- lockdep_assert_rq_held(env->src_rq);
- if (p->sched_class != &fair_sched_class)
- return 0;
- if (unlikely(task_has_idle_policy(p)))
- return 0;
- /* SMT siblings share cache */
- if (env->sd->flags & SD_SHARE_CPUCAPACITY)
- return 0;
- /*
- * Buddy candidates are cache hot:
- */
- if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
- (&p->se == cfs_rq_of(&p->se)->next ||
- &p->se == cfs_rq_of(&p->se)->last))
- return 1;
- if (sysctl_sched_migration_cost == -1)
- return 1;
- /*
- * Don't migrate task if the task's cookie does not match
- * with the destination CPU's core cookie.
- */
- if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
- return 1;
- if (sysctl_sched_migration_cost == 0)
- return 0;
- delta = rq_clock_task(env->src_rq) - p->se.exec_start;
- return delta < (s64)sysctl_sched_migration_cost;
- }
- #ifdef CONFIG_NUMA_BALANCING
- /*
- * Returns 1, if task migration degrades locality
- * Returns 0, if task migration improves locality i.e migration preferred.
- * Returns -1, if task migration is not affected by locality.
- */
- static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
- {
- struct numa_group *numa_group = rcu_dereference(p->numa_group);
- unsigned long src_weight, dst_weight;
- int src_nid, dst_nid, dist;
- if (!static_branch_likely(&sched_numa_balancing))
- return -1;
- if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
- return -1;
- src_nid = cpu_to_node(env->src_cpu);
- dst_nid = cpu_to_node(env->dst_cpu);
- if (src_nid == dst_nid)
- return -1;
- /* Migrating away from the preferred node is always bad. */
- if (src_nid == p->numa_preferred_nid) {
- if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
- return 1;
- else
- return -1;
- }
- /* Encourage migration to the preferred node. */
- if (dst_nid == p->numa_preferred_nid)
- return 0;
- /* Leaving a core idle is often worse than degrading locality. */
- if (env->idle == CPU_IDLE)
- return -1;
- dist = node_distance(src_nid, dst_nid);
- if (numa_group) {
- src_weight = group_weight(p, src_nid, dist);
- dst_weight = group_weight(p, dst_nid, dist);
- } else {
- src_weight = task_weight(p, src_nid, dist);
- dst_weight = task_weight(p, dst_nid, dist);
- }
- return dst_weight < src_weight;
- }
- #else
- static inline int migrate_degrades_locality(struct task_struct *p,
- struct lb_env *env)
- {
- return -1;
- }
- #endif
- /*
- * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
- */
- static
- int can_migrate_task(struct task_struct *p, struct lb_env *env)
- {
- int tsk_cache_hot;
- int can_migrate = 1;
- lockdep_assert_rq_held(env->src_rq);
- trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate);
- if (!can_migrate)
- return 0;
- /*
- * We do not migrate tasks that are:
- * 1) throttled_lb_pair, or
- * 2) cannot be migrated to this CPU due to cpus_ptr, or
- * 3) running (obviously), or
- * 4) are cache-hot on their current CPU.
- */
- if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
- return 0;
- /* Disregard pcpu kthreads; they are where they need to be. */
- if (kthread_is_per_cpu(p))
- return 0;
- if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
- int cpu;
- schedstat_inc(p->stats.nr_failed_migrations_affine);
- env->flags |= LBF_SOME_PINNED;
- /*
- * Remember if this task can be migrated to any other CPU in
- * our sched_group. We may want to revisit it if we couldn't
- * meet load balance goals by pulling other tasks on src_cpu.
- *
- * Avoid computing new_dst_cpu
- * - for NEWLY_IDLE
- * - if we have already computed one in current iteration
- * - if it's an active balance
- */
- if (env->idle == CPU_NEWLY_IDLE ||
- env->flags & (LBF_DST_PINNED | LBF_ACTIVE_LB))
- return 0;
- /* Prevent to re-select dst_cpu via env's CPUs: */
- for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
- if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
- env->flags |= LBF_DST_PINNED;
- env->new_dst_cpu = cpu;
- break;
- }
- }
- return 0;
- }
- /* Record that we found at least one task that could run on dst_cpu */
- env->flags &= ~LBF_ALL_PINNED;
- if (task_on_cpu(env->src_rq, p)) {
- schedstat_inc(p->stats.nr_failed_migrations_running);
- return 0;
- }
- /*
- * Aggressive migration if:
- * 1) active balance
- * 2) destination numa is preferred
- * 3) task is cache cold, or
- * 4) too many balance attempts have failed.
- */
- if (env->flags & LBF_ACTIVE_LB)
- return 1;
- tsk_cache_hot = migrate_degrades_locality(p, env);
- if (tsk_cache_hot == -1)
- tsk_cache_hot = task_hot(p, env);
- if (tsk_cache_hot <= 0 ||
- env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
- if (tsk_cache_hot == 1) {
- schedstat_inc(env->sd->lb_hot_gained[env->idle]);
- schedstat_inc(p->stats.nr_forced_migrations);
- }
- return 1;
- }
- schedstat_inc(p->stats.nr_failed_migrations_hot);
- return 0;
- }
- /*
- * detach_task() -- detach the task for the migration specified in env
- */
- static void detach_task(struct task_struct *p, struct lb_env *env)
- {
- int detached = 0;
- lockdep_assert_rq_held(env->src_rq);
- /*
- * The vendor hook may drop the lock temporarily, so
- * pass the rq flags to unpin lock. We expect the
- * rq lock to be held after return.
- */
- trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p,
- env->dst_cpu, &detached);
- if (detached)
- return;
- deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
- set_task_cpu(p, env->dst_cpu);
- }
- /*
- * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
- * part of active balancing operations within "domain".
- *
- * Returns a task if successful and NULL otherwise.
- */
- static struct task_struct *detach_one_task(struct lb_env *env)
- {
- struct task_struct *p;
- lockdep_assert_rq_held(env->src_rq);
- list_for_each_entry_reverse(p,
- &env->src_rq->cfs_tasks, se.group_node) {
- if (!can_migrate_task(p, env))
- continue;
- detach_task(p, env);
- /*
- * Right now, this is only the second place where
- * lb_gained[env->idle] is updated (other is detach_tasks)
- * so we can safely collect stats here rather than
- * inside detach_tasks().
- */
- schedstat_inc(env->sd->lb_gained[env->idle]);
- return p;
- }
- return NULL;
- }
- /*
- * detach_tasks() -- tries to detach up to imbalance load/util/tasks from
- * busiest_rq, as part of a balancing operation within domain "sd".
- *
- * Returns number of detached tasks if successful and 0 otherwise.
- */
- static int detach_tasks(struct lb_env *env)
- {
- struct list_head *tasks = &env->src_rq->cfs_tasks;
- unsigned long util, load;
- struct task_struct *p;
- int detached = 0;
- lockdep_assert_rq_held(env->src_rq);
- /*
- * Source run queue has been emptied by another CPU, clear
- * LBF_ALL_PINNED flag as we will not test any task.
- */
- if (env->src_rq->nr_running <= 1) {
- env->flags &= ~LBF_ALL_PINNED;
- return 0;
- }
- if (env->imbalance <= 0)
- return 0;
- while (!list_empty(tasks)) {
- /*
- * We don't want to steal all, otherwise we may be treated likewise,
- * which could at worst lead to a livelock crash.
- */
- if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
- break;
- env->loop++;
- /*
- * We've more or less seen every task there is, call it quits
- * unless we haven't found any movable task yet.
- */
- if (env->loop > env->loop_max &&
- !(env->flags & LBF_ALL_PINNED))
- break;
- /* take a breather every nr_migrate tasks */
- if (env->loop > env->loop_break) {
- env->loop_break += SCHED_NR_MIGRATE_BREAK;
- env->flags |= LBF_NEED_BREAK;
- break;
- }
- p = list_last_entry(tasks, struct task_struct, se.group_node);
- if (!can_migrate_task(p, env))
- goto next;
- switch (env->migration_type) {
- case migrate_load:
- /*
- * Depending of the number of CPUs and tasks and the
- * cgroup hierarchy, task_h_load() can return a null
- * value. Make sure that env->imbalance decreases
- * otherwise detach_tasks() will stop only after
- * detaching up to loop_max tasks.
- */
- load = max_t(unsigned long, task_h_load(p), 1);
- if (sched_feat(LB_MIN) &&
- load < 16 && !env->sd->nr_balance_failed)
- goto next;
- /*
- * Make sure that we don't migrate too much load.
- * Nevertheless, let relax the constraint if
- * scheduler fails to find a good waiting task to
- * migrate.
- */
- if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
- goto next;
- env->imbalance -= load;
- break;
- case migrate_util:
- util = task_util_est(p);
- if (util > env->imbalance)
- goto next;
- env->imbalance -= util;
- break;
- case migrate_task:
- env->imbalance--;
- break;
- case migrate_misfit:
- /* This is not a misfit task */
- if (task_fits_cpu(p, env->src_cpu))
- goto next;
- env->imbalance = 0;
- break;
- }
- detach_task(p, env);
- list_add(&p->se.group_node, &env->tasks);
- detached++;
- #ifdef CONFIG_PREEMPTION
- /*
- * NEWIDLE balancing is a source of latency, so preemptible
- * kernels will stop after the first task is detached to minimize
- * the critical section.
- */
- if (env->idle == CPU_NEWLY_IDLE)
- break;
- #endif
- /*
- * We only want to steal up to the prescribed amount of
- * load/util/tasks.
- */
- if (env->imbalance <= 0)
- break;
- continue;
- next:
- list_move(&p->se.group_node, tasks);
- }
- /*
- * Right now, this is one of only two places we collect this stat
- * so we can safely collect detach_one_task() stats here rather
- * than inside detach_one_task().
- */
- schedstat_add(env->sd->lb_gained[env->idle], detached);
- return detached;
- }
- /*
- * attach_task() -- attach the task detached by detach_task() to its new rq.
- */
- static void attach_task(struct rq *rq, struct task_struct *p)
- {
- lockdep_assert_rq_held(rq);
- WARN_ON_ONCE(task_rq(p) != rq);
- activate_task(rq, p, ENQUEUE_NOCLOCK);
- check_preempt_curr(rq, p, 0);
- }
- /*
- * attach_one_task() -- attaches the task returned from detach_one_task() to
- * its new rq.
- */
- static void attach_one_task(struct rq *rq, struct task_struct *p)
- {
- struct rq_flags rf;
- rq_lock(rq, &rf);
- update_rq_clock(rq);
- attach_task(rq, p);
- rq_unlock(rq, &rf);
- }
- /*
- * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
- * new rq.
- */
- static void attach_tasks(struct lb_env *env)
- {
- struct list_head *tasks = &env->tasks;
- struct task_struct *p;
- struct rq_flags rf;
- rq_lock(env->dst_rq, &rf);
- update_rq_clock(env->dst_rq);
- while (!list_empty(tasks)) {
- p = list_first_entry(tasks, struct task_struct, se.group_node);
- list_del_init(&p->se.group_node);
- attach_task(env->dst_rq, p);
- }
- rq_unlock(env->dst_rq, &rf);
- }
- #ifdef CONFIG_NO_HZ_COMMON
- static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
- {
- if (cfs_rq->avg.load_avg)
- return true;
- if (cfs_rq->avg.util_avg)
- return true;
- return false;
- }
- static inline bool others_have_blocked(struct rq *rq)
- {
- if (READ_ONCE(rq->avg_rt.util_avg))
- return true;
- if (READ_ONCE(rq->avg_dl.util_avg))
- return true;
- if (thermal_load_avg(rq))
- return true;
- #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
- if (READ_ONCE(rq->avg_irq.util_avg))
- return true;
- #endif
- return false;
- }
- static inline void update_blocked_load_tick(struct rq *rq)
- {
- WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies);
- }
- static inline void update_blocked_load_status(struct rq *rq, bool has_blocked)
- {
- if (!has_blocked)
- rq->has_blocked_load = 0;
- }
- #else
- static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; }
- static inline bool others_have_blocked(struct rq *rq) { return false; }
- static inline void update_blocked_load_tick(struct rq *rq) {}
- static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {}
- #endif
- static bool __update_blocked_others(struct rq *rq, bool *done)
- {
- const struct sched_class *curr_class;
- u64 now = rq_clock_pelt(rq);
- unsigned long thermal_pressure;
- bool decayed;
- /*
- * update_load_avg() can call cpufreq_update_util(). Make sure that RT,
- * DL and IRQ signals have been updated before updating CFS.
- */
- curr_class = rq->curr->sched_class;
- thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
- decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
- update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
- update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) |
- update_irq_load_avg(rq, 0);
- if (others_have_blocked(rq))
- *done = false;
- return decayed;
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static bool __update_blocked_fair(struct rq *rq, bool *done)
- {
- struct cfs_rq *cfs_rq, *pos;
- bool decayed = false;
- int cpu = cpu_of(rq);
- trace_android_rvh_update_blocked_fair(rq);
- /*
- * Iterates the task_group tree in a bottom up fashion, see
- * list_add_leaf_cfs_rq() for details.
- */
- for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
- struct sched_entity *se;
- if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
- update_tg_load_avg(cfs_rq);
- if (cfs_rq->nr_running == 0)
- update_idle_cfs_rq_clock_pelt(cfs_rq);
- if (cfs_rq == &rq->cfs)
- decayed = true;
- }
- /* Propagate pending load changes to the parent, if any: */
- se = cfs_rq->tg->se[cpu];
- if (se && !skip_blocked_update(se))
- update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
- /*
- * There can be a lot of idle CPU cgroups. Don't let fully
- * decayed cfs_rqs linger on the list.
- */
- if (cfs_rq_is_decayed(cfs_rq))
- list_del_leaf_cfs_rq(cfs_rq);
- /* Don't need periodic decay once load/util_avg are null */
- if (cfs_rq_has_blocked(cfs_rq))
- *done = false;
- }
- return decayed;
- }
- /*
- * Compute the hierarchical load factor for cfs_rq and all its ascendants.
- * This needs to be done in a top-down fashion because the load of a child
- * group is a fraction of its parents load.
- */
- static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
- {
- struct rq *rq = rq_of(cfs_rq);
- struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
- unsigned long now = jiffies;
- unsigned long load;
- if (cfs_rq->last_h_load_update == now)
- return;
- WRITE_ONCE(cfs_rq->h_load_next, NULL);
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- WRITE_ONCE(cfs_rq->h_load_next, se);
- if (cfs_rq->last_h_load_update == now)
- break;
- }
- if (!se) {
- cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
- cfs_rq->last_h_load_update = now;
- }
- while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
- load = cfs_rq->h_load;
- load = div64_ul(load * se->avg.load_avg,
- cfs_rq_load_avg(cfs_rq) + 1);
- cfs_rq = group_cfs_rq(se);
- cfs_rq->h_load = load;
- cfs_rq->last_h_load_update = now;
- }
- }
- static unsigned long task_h_load(struct task_struct *p)
- {
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
- update_cfs_rq_h_load(cfs_rq);
- return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
- cfs_rq_load_avg(cfs_rq) + 1);
- }
- #else
- static bool __update_blocked_fair(struct rq *rq, bool *done)
- {
- struct cfs_rq *cfs_rq = &rq->cfs;
- bool decayed;
- decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq);
- if (cfs_rq_has_blocked(cfs_rq))
- *done = false;
- return decayed;
- }
- static unsigned long task_h_load(struct task_struct *p)
- {
- return p->se.avg.load_avg;
- }
- #endif
- static void update_blocked_averages(int cpu)
- {
- bool decayed = false, done = true;
- struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
- rq_lock_irqsave(rq, &rf);
- update_blocked_load_tick(rq);
- update_rq_clock(rq);
- decayed |= __update_blocked_others(rq, &done);
- decayed |= __update_blocked_fair(rq, &done);
- update_blocked_load_status(rq, !done);
- if (decayed)
- cpufreq_update_util(rq, 0);
- rq_unlock_irqrestore(rq, &rf);
- }
- /********** Helpers for find_busiest_group ************************/
- /*
- * sg_lb_stats - stats of a sched_group required for load_balancing
- */
- struct sg_lb_stats {
- unsigned long avg_load; /*Avg load across the CPUs of the group */
- unsigned long group_load; /* Total load over the CPUs of the group */
- unsigned long group_capacity;
- unsigned long group_util; /* Total utilization over the CPUs of the group */
- unsigned long group_runnable; /* Total runnable time over the CPUs of the group */
- unsigned int sum_nr_running; /* Nr of tasks running in the group */
- unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
- unsigned int idle_cpus;
- unsigned int group_weight;
- enum group_type group_type;
- unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
- unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
- #ifdef CONFIG_NUMA_BALANCING
- unsigned int nr_numa_running;
- unsigned int nr_preferred_running;
- #endif
- };
- /*
- * sd_lb_stats - Structure to store the statistics of a sched_domain
- * during load balancing.
- */
- struct sd_lb_stats {
- struct sched_group *busiest; /* Busiest group in this sd */
- struct sched_group *local; /* Local group in this sd */
- unsigned long total_load; /* Total load of all groups in sd */
- unsigned long total_capacity; /* Total capacity of all groups in sd */
- unsigned long avg_load; /* Average load across all groups in sd */
- unsigned int prefer_sibling; /* tasks should go to sibling first */
- struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
- struct sg_lb_stats local_stat; /* Statistics of the local group */
- };
- static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
- {
- /*
- * Skimp on the clearing to avoid duplicate work. We can avoid clearing
- * local_stat because update_sg_lb_stats() does a full clear/assignment.
- * We must however set busiest_stat::group_type and
- * busiest_stat::idle_cpus to the worst busiest group because
- * update_sd_pick_busiest() reads these before assignment.
- */
- *sds = (struct sd_lb_stats){
- .busiest = NULL,
- .local = NULL,
- .total_load = 0UL,
- .total_capacity = 0UL,
- .busiest_stat = {
- .idle_cpus = UINT_MAX,
- .group_type = group_has_spare,
- },
- };
- }
- static unsigned long scale_rt_capacity(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- unsigned long max = arch_scale_cpu_capacity(cpu);
- unsigned long used, free;
- unsigned long irq;
- irq = cpu_util_irq(rq);
- if (unlikely(irq >= max))
- return 1;
- /*
- * avg_rt.util_avg and avg_dl.util_avg track binary signals
- * (running and not running) with weights 0 and 1024 respectively.
- * avg_thermal.load_avg tracks thermal pressure and the weighted
- * average uses the actual delta max capacity(load).
- */
- used = READ_ONCE(rq->avg_rt.util_avg);
- used += READ_ONCE(rq->avg_dl.util_avg);
- used += thermal_load_avg(rq);
- if (unlikely(used >= max))
- return 1;
- free = max - used;
- return scale_irq_capacity(free, irq, max);
- }
- static void update_cpu_capacity(struct sched_domain *sd, int cpu)
- {
- unsigned long capacity = scale_rt_capacity(cpu);
- struct sched_group *sdg = sd->groups;
- cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
- if (!capacity)
- capacity = 1;
- trace_android_rvh_update_cpu_capacity(cpu, &capacity);
- cpu_rq(cpu)->cpu_capacity = capacity;
- trace_sched_cpu_capacity_tp(cpu_rq(cpu));
- sdg->sgc->capacity = capacity;
- sdg->sgc->min_capacity = capacity;
- sdg->sgc->max_capacity = capacity;
- }
- void update_group_capacity(struct sched_domain *sd, int cpu)
- {
- struct sched_domain *child = sd->child;
- struct sched_group *group, *sdg = sd->groups;
- unsigned long capacity, min_capacity, max_capacity;
- unsigned long interval;
- interval = msecs_to_jiffies(sd->balance_interval);
- interval = clamp(interval, 1UL, max_load_balance_interval);
- sdg->sgc->next_update = jiffies + interval;
- if (!child) {
- update_cpu_capacity(sd, cpu);
- return;
- }
- capacity = 0;
- min_capacity = ULONG_MAX;
- max_capacity = 0;
- if (child->flags & SD_OVERLAP) {
- /*
- * SD_OVERLAP domains cannot assume that child groups
- * span the current group.
- */
- for_each_cpu(cpu, sched_group_span(sdg)) {
- unsigned long cpu_cap = capacity_of(cpu);
- capacity += cpu_cap;
- min_capacity = min(cpu_cap, min_capacity);
- max_capacity = max(cpu_cap, max_capacity);
- }
- } else {
- /*
- * !SD_OVERLAP domains can assume that child groups
- * span the current group.
- */
- group = child->groups;
- do {
- struct sched_group_capacity *sgc = group->sgc;
- capacity += sgc->capacity;
- min_capacity = min(sgc->min_capacity, min_capacity);
- max_capacity = max(sgc->max_capacity, max_capacity);
- group = group->next;
- } while (group != child->groups);
- }
- sdg->sgc->capacity = capacity;
- sdg->sgc->min_capacity = min_capacity;
- sdg->sgc->max_capacity = max_capacity;
- }
- /*
- * Check whether the capacity of the rq has been noticeably reduced by side
- * activity. The imbalance_pct is used for the threshold.
- * Return true is the capacity is reduced
- */
- static inline int
- check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
- {
- return ((rq->cpu_capacity * sd->imbalance_pct) <
- (rq->cpu_capacity_orig * 100));
- }
- /*
- * Check whether a rq has a misfit task and if it looks like we can actually
- * help that task: we can migrate the task to a CPU of higher capacity, or
- * the task's current CPU is heavily pressured.
- */
- static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
- {
- return rq->misfit_task_load &&
- (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
- check_cpu_capacity(rq, sd));
- }
- /*
- * Group imbalance indicates (and tries to solve) the problem where balancing
- * groups is inadequate due to ->cpus_ptr constraints.
- *
- * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
- * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
- * Something like:
- *
- * { 0 1 2 3 } { 4 5 6 7 }
- * * * * *
- *
- * If we were to balance group-wise we'd place two tasks in the first group and
- * two tasks in the second group. Clearly this is undesired as it will overload
- * cpu 3 and leave one of the CPUs in the second group unused.
- *
- * The current solution to this issue is detecting the skew in the first group
- * by noticing the lower domain failed to reach balance and had difficulty
- * moving tasks due to affinity constraints.
- *
- * When this is so detected; this group becomes a candidate for busiest; see
- * update_sd_pick_busiest(). And calculate_imbalance() and
- * find_busiest_group() avoid some of the usual balance conditions to allow it
- * to create an effective group imbalance.
- *
- * This is a somewhat tricky proposition since the next run might not find the
- * group imbalance and decide the groups need to be balanced again. A most
- * subtle and fragile situation.
- */
- static inline int sg_imbalanced(struct sched_group *group)
- {
- return group->sgc->imbalance;
- }
- /*
- * group_has_capacity returns true if the group has spare capacity that could
- * be used by some tasks.
- * We consider that a group has spare capacity if the number of task is
- * smaller than the number of CPUs or if the utilization is lower than the
- * available capacity for CFS tasks.
- * For the latter, we use a threshold to stabilize the state, to take into
- * account the variance of the tasks' load and to return true if the available
- * capacity in meaningful for the load balancer.
- * As an example, an available capacity of 1% can appear but it doesn't make
- * any benefit for the load balance.
- */
- static inline bool
- group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
- {
- if (sgs->sum_nr_running < sgs->group_weight)
- return true;
- if ((sgs->group_capacity * imbalance_pct) <
- (sgs->group_runnable * 100))
- return false;
- if ((sgs->group_capacity * 100) >
- (sgs->group_util * imbalance_pct))
- return true;
- return false;
- }
- /*
- * group_is_overloaded returns true if the group has more tasks than it can
- * handle.
- * group_is_overloaded is not equals to !group_has_capacity because a group
- * with the exact right number of tasks, has no more spare capacity but is not
- * overloaded so both group_has_capacity and group_is_overloaded return
- * false.
- */
- static inline bool
- group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
- {
- if (sgs->sum_nr_running <= sgs->group_weight)
- return false;
- if ((sgs->group_capacity * 100) <
- (sgs->group_util * imbalance_pct))
- return true;
- if ((sgs->group_capacity * imbalance_pct) <
- (sgs->group_runnable * 100))
- return true;
- return false;
- }
- static inline enum
- group_type group_classify(unsigned int imbalance_pct,
- struct sched_group *group,
- struct sg_lb_stats *sgs)
- {
- if (group_is_overloaded(imbalance_pct, sgs))
- return group_overloaded;
- if (sg_imbalanced(group))
- return group_imbalanced;
- if (sgs->group_asym_packing)
- return group_asym_packing;
- if (sgs->group_misfit_task_load)
- return group_misfit_task;
- if (!group_has_capacity(imbalance_pct, sgs))
- return group_fully_busy;
- return group_has_spare;
- }
- /**
- * asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks
- * @dst_cpu: Destination CPU of the load balancing
- * @sds: Load-balancing data with statistics of the local group
- * @sgs: Load-balancing statistics of the candidate busiest group
- * @sg: The candidate busiest group
- *
- * Check the state of the SMT siblings of both @sds::local and @sg and decide
- * if @dst_cpu can pull tasks.
- *
- * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
- * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
- * only if @dst_cpu has higher priority.
- *
- * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more
- * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority.
- * Bigger imbalances in the number of busy CPUs will be dealt with in
- * update_sd_pick_busiest().
- *
- * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
- * of @dst_cpu are idle and @sg has lower priority.
- *
- * Return: true if @dst_cpu can pull tasks, false otherwise.
- */
- static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
- struct sg_lb_stats *sgs,
- struct sched_group *sg)
- {
- #ifdef CONFIG_SCHED_SMT
- bool local_is_smt, sg_is_smt;
- int sg_busy_cpus;
- local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY;
- sg_is_smt = sg->flags & SD_SHARE_CPUCAPACITY;
- sg_busy_cpus = sgs->group_weight - sgs->idle_cpus;
- if (!local_is_smt) {
- /*
- * If we are here, @dst_cpu is idle and does not have SMT
- * siblings. Pull tasks if candidate group has two or more
- * busy CPUs.
- */
- if (sg_busy_cpus >= 2) /* implies sg_is_smt */
- return true;
- /*
- * @dst_cpu does not have SMT siblings. @sg may have SMT
- * siblings and only one is busy. In such case, @dst_cpu
- * can help if it has higher priority and is idle (i.e.,
- * it has no running tasks).
- */
- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
- }
- /* @dst_cpu has SMT siblings. */
- if (sg_is_smt) {
- int local_busy_cpus = sds->local->group_weight -
- sds->local_stat.idle_cpus;
- int busy_cpus_delta = sg_busy_cpus - local_busy_cpus;
- if (busy_cpus_delta == 1)
- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
- return false;
- }
- /*
- * @sg does not have SMT siblings. Ensure that @sds::local does not end
- * up with more than one busy SMT sibling and only pull tasks if there
- * are not busy CPUs (i.e., no CPU has running tasks).
- */
- if (!sds->local_stat.sum_nr_running)
- return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
- return false;
- #else
- /* Always return false so that callers deal with non-SMT cases. */
- return false;
- #endif
- }
- static inline bool
- sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs,
- struct sched_group *group)
- {
- /* Only do SMT checks if either local or candidate have SMT siblings */
- if ((sds->local->flags & SD_SHARE_CPUCAPACITY) ||
- (group->flags & SD_SHARE_CPUCAPACITY))
- return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group);
- return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
- }
- static inline bool
- sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
- {
- /*
- * When there is more than 1 task, the group_overloaded case already
- * takes care of cpu with reduced capacity
- */
- if (rq->cfs.h_nr_running != 1)
- return false;
- return check_cpu_capacity(rq, sd);
- }
- /**
- * update_sg_lb_stats - Update sched_group's statistics for load balancing.
- * @env: The load balancing environment.
- * @sds: Load-balancing data with statistics of the local group.
- * @group: sched_group whose statistics are to be updated.
- * @sgs: variable to hold the statistics for this group.
- * @sg_status: Holds flag indicating the status of the sched_group
- */
- static inline void update_sg_lb_stats(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *group,
- struct sg_lb_stats *sgs,
- int *sg_status)
- {
- int i, nr_running, local_group;
- memset(sgs, 0, sizeof(*sgs));
- local_group = group == sds->local;
- for_each_cpu_and(i, sched_group_span(group), env->cpus) {
- struct rq *rq = cpu_rq(i);
- unsigned long load = cpu_load(rq);
- sgs->group_load += load;
- sgs->group_util += cpu_util_cfs(i);
- sgs->group_runnable += cpu_runnable(rq);
- sgs->sum_h_nr_running += rq->cfs.h_nr_running;
- nr_running = rq->nr_running;
- sgs->sum_nr_running += nr_running;
- if (nr_running > 1)
- *sg_status |= SG_OVERLOAD;
- if (cpu_overutilized(i))
- *sg_status |= SG_OVERUTILIZED;
- #ifdef CONFIG_NUMA_BALANCING
- sgs->nr_numa_running += rq->nr_numa_running;
- sgs->nr_preferred_running += rq->nr_preferred_running;
- #endif
- /*
- * No need to call idle_cpu() if nr_running is not 0
- */
- if (!nr_running && idle_cpu(i)) {
- sgs->idle_cpus++;
- /* Idle cpu can't have misfit task */
- continue;
- }
- if (local_group)
- continue;
- if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
- /* Check for a misfit task on the cpu */
- if (sgs->group_misfit_task_load < rq->misfit_task_load) {
- sgs->group_misfit_task_load = rq->misfit_task_load;
- *sg_status |= SG_OVERLOAD;
- }
- } else if ((env->idle != CPU_NOT_IDLE) &&
- sched_reduced_capacity(rq, env->sd)) {
- /* Check for a task running on a CPU with reduced capacity */
- if (sgs->group_misfit_task_load < load)
- sgs->group_misfit_task_load = load;
- }
- }
- sgs->group_capacity = group->sgc->capacity;
- sgs->group_weight = group->group_weight;
- /* Check if dst CPU is idle and preferred to this group */
- if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
- env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
- sched_asym(env, sds, sgs, group)) {
- sgs->group_asym_packing = 1;
- }
- sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
- /* Computing avg_load makes sense only when group is overloaded */
- if (sgs->group_type == group_overloaded)
- sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
- sgs->group_capacity;
- }
- /**
- * update_sd_pick_busiest - return 1 on busiest group
- * @env: The load balancing environment.
- * @sds: sched_domain statistics
- * @sg: sched_group candidate to be checked for being the busiest
- * @sgs: sched_group statistics
- *
- * Determine if @sg is a busier group than the previously selected
- * busiest group.
- *
- * Return: %true if @sg is a busier group than the previously selected
- * busiest group. %false otherwise.
- */
- static bool update_sd_pick_busiest(struct lb_env *env,
- struct sd_lb_stats *sds,
- struct sched_group *sg,
- struct sg_lb_stats *sgs)
- {
- struct sg_lb_stats *busiest = &sds->busiest_stat;
- /* Make sure that there is at least one task to pull */
- if (!sgs->sum_h_nr_running)
- return false;
- /*
- * Don't try to pull misfit tasks we can't help.
- * We can use max_capacity here as reduction in capacity on some
- * CPUs in the group should either be possible to resolve
- * internally or be covered by avg_load imbalance (eventually).
- */
- if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
- (sgs->group_type == group_misfit_task) &&
- (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
- sds->local_stat.group_type != group_has_spare))
- return false;
- if (sgs->group_type > busiest->group_type)
- return true;
- if (sgs->group_type < busiest->group_type)
- return false;
- /*
- * The candidate and the current busiest group are the same type of
- * group. Let check which one is the busiest according to the type.
- */
- switch (sgs->group_type) {
- case group_overloaded:
- /* Select the overloaded group with highest avg_load. */
- if (sgs->avg_load <= busiest->avg_load)
- return false;
- break;
- case group_imbalanced:
- /*
- * Select the 1st imbalanced group as we don't have any way to
- * choose one more than another.
- */
- return false;
- case group_asym_packing:
- /* Prefer to move from lowest priority CPU's work */
- if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu))
- return false;
- break;
- case group_misfit_task:
- /*
- * If we have more than one misfit sg go with the biggest
- * misfit.
- */
- if (sgs->group_misfit_task_load < busiest->group_misfit_task_load)
- return false;
- break;
- case group_fully_busy:
- /*
- * Select the fully busy group with highest avg_load. In
- * theory, there is no need to pull task from such kind of
- * group because tasks have all compute capacity that they need
- * but we can still improve the overall throughput by reducing
- * contention when accessing shared HW resources.
- *
- * XXX for now avg_load is not computed and always 0 so we
- * select the 1st one.
- */
- if (sgs->avg_load <= busiest->avg_load)
- return false;
- break;
- case group_has_spare:
- /*
- * Select not overloaded group with lowest number of idle cpus
- * and highest number of running tasks. We could also compare
- * the spare capacity which is more stable but it can end up
- * that the group has less spare capacity but finally more idle
- * CPUs which means less opportunity to pull tasks.
- */
- if (sgs->idle_cpus > busiest->idle_cpus)
- return false;
- else if ((sgs->idle_cpus == busiest->idle_cpus) &&
- (sgs->sum_nr_running <= busiest->sum_nr_running))
- return false;
- break;
- }
- /*
- * Candidate sg has no more than one task per CPU and has higher
- * per-CPU capacity. Migrating tasks to less capable CPUs may harm
- * throughput. Maximize throughput, power/energy consequences are not
- * considered.
- */
- if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
- (sgs->group_type <= group_fully_busy) &&
- (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
- return false;
- return true;
- }
- #ifdef CONFIG_NUMA_BALANCING
- static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
- {
- if (sgs->sum_h_nr_running > sgs->nr_numa_running)
- return regular;
- if (sgs->sum_h_nr_running > sgs->nr_preferred_running)
- return remote;
- return all;
- }
- static inline enum fbq_type fbq_classify_rq(struct rq *rq)
- {
- if (rq->nr_running > rq->nr_numa_running)
- return regular;
- if (rq->nr_running > rq->nr_preferred_running)
- return remote;
- return all;
- }
- #else
- static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
- {
- return all;
- }
- static inline enum fbq_type fbq_classify_rq(struct rq *rq)
- {
- return regular;
- }
- #endif /* CONFIG_NUMA_BALANCING */
- struct sg_lb_stats;
- /*
- * task_running_on_cpu - return 1 if @p is running on @cpu.
- */
- static unsigned int task_running_on_cpu(int cpu, struct task_struct *p)
- {
- /* Task has no contribution or is new */
- if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
- return 0;
- if (task_on_rq_queued(p))
- return 1;
- return 0;
- }
- /**
- * idle_cpu_without - would a given CPU be idle without p ?
- * @cpu: the processor on which idleness is tested.
- * @p: task which should be ignored.
- *
- * Return: 1 if the CPU would be idle. 0 otherwise.
- */
- static int idle_cpu_without(int cpu, struct task_struct *p)
- {
- struct rq *rq = cpu_rq(cpu);
- if (rq->curr != rq->idle && rq->curr != p)
- return 0;
- /*
- * rq->nr_running can't be used but an updated version without the
- * impact of p on cpu must be used instead. The updated nr_running
- * be computed and tested before calling idle_cpu_without().
- */
- #ifdef CONFIG_SMP
- if (rq->ttwu_pending)
- return 0;
- #endif
- return 1;
- }
- /*
- * update_sg_wakeup_stats - Update sched_group's statistics for wakeup.
- * @sd: The sched_domain level to look for idlest group.
- * @group: sched_group whose statistics are to be updated.
- * @sgs: variable to hold the statistics for this group.
- * @p: The task for which we look for the idlest group/CPU.
- */
- static inline void update_sg_wakeup_stats(struct sched_domain *sd,
- struct sched_group *group,
- struct sg_lb_stats *sgs,
- struct task_struct *p)
- {
- int i, nr_running;
- memset(sgs, 0, sizeof(*sgs));
- /* Assume that task can't fit any CPU of the group */
- if (sd->flags & SD_ASYM_CPUCAPACITY)
- sgs->group_misfit_task_load = 1;
- for_each_cpu(i, sched_group_span(group)) {
- struct rq *rq = cpu_rq(i);
- unsigned int local;
- sgs->group_load += cpu_load_without(rq, p);
- sgs->group_util += cpu_util_without(i, p);
- sgs->group_runnable += cpu_runnable_without(rq, p);
- local = task_running_on_cpu(i, p);
- sgs->sum_h_nr_running += rq->cfs.h_nr_running - local;
- nr_running = rq->nr_running - local;
- sgs->sum_nr_running += nr_running;
- /*
- * No need to call idle_cpu_without() if nr_running is not 0
- */
- if (!nr_running && idle_cpu_without(i, p))
- sgs->idle_cpus++;
- /* Check if task fits in the CPU */
- if (sd->flags & SD_ASYM_CPUCAPACITY &&
- sgs->group_misfit_task_load &&
- task_fits_cpu(p, i))
- sgs->group_misfit_task_load = 0;
- }
- sgs->group_capacity = group->sgc->capacity;
- sgs->group_weight = group->group_weight;
- sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
- /*
- * Computing avg_load makes sense only when group is fully busy or
- * overloaded
- */
- if (sgs->group_type == group_fully_busy ||
- sgs->group_type == group_overloaded)
- sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
- sgs->group_capacity;
- }
- static bool update_pick_idlest(struct sched_group *idlest,
- struct sg_lb_stats *idlest_sgs,
- struct sched_group *group,
- struct sg_lb_stats *sgs)
- {
- if (sgs->group_type < idlest_sgs->group_type)
- return true;
- if (sgs->group_type > idlest_sgs->group_type)
- return false;
- /*
- * The candidate and the current idlest group are the same type of
- * group. Let check which one is the idlest according to the type.
- */
- switch (sgs->group_type) {
- case group_overloaded:
- case group_fully_busy:
- /* Select the group with lowest avg_load. */
- if (idlest_sgs->avg_load <= sgs->avg_load)
- return false;
- break;
- case group_imbalanced:
- case group_asym_packing:
- /* Those types are not used in the slow wakeup path */
- return false;
- case group_misfit_task:
- /* Select group with the highest max capacity */
- if (idlest->sgc->max_capacity >= group->sgc->max_capacity)
- return false;
- break;
- case group_has_spare:
- /* Select group with most idle CPUs */
- if (idlest_sgs->idle_cpus > sgs->idle_cpus)
- return false;
- /* Select group with lowest group_util */
- if (idlest_sgs->idle_cpus == sgs->idle_cpus &&
- idlest_sgs->group_util <= sgs->group_util)
- return false;
- break;
- }
- return true;
- }
- /*
- * find_idlest_group() finds and returns the least busy CPU group within the
- * domain.
- *
- * Assumes p is allowed on at least one CPU in sd.
- */
- static struct sched_group *
- find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
- {
- struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups;
- struct sg_lb_stats local_sgs, tmp_sgs;
- struct sg_lb_stats *sgs;
- unsigned long imbalance;
- struct sg_lb_stats idlest_sgs = {
- .avg_load = UINT_MAX,
- .group_type = group_overloaded,
- };
- do {
- int local_group;
- /* Skip over this group if it has no CPUs allowed */
- if (!cpumask_intersects(sched_group_span(group),
- p->cpus_ptr))
- continue;
- /* Skip over this group if no cookie matched */
- if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
- continue;
- local_group = cpumask_test_cpu(this_cpu,
- sched_group_span(group));
- if (local_group) {
- sgs = &local_sgs;
- local = group;
- } else {
- sgs = &tmp_sgs;
- }
- update_sg_wakeup_stats(sd, group, sgs, p);
- if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
- idlest = group;
- idlest_sgs = *sgs;
- }
- } while (group = group->next, group != sd->groups);
- /* There is no idlest group to push tasks to */
- if (!idlest)
- return NULL;
- /* The local group has been skipped because of CPU affinity */
- if (!local)
- return idlest;
- /*
- * If the local group is idler than the selected idlest group
- * don't try and push the task.
- */
- if (local_sgs.group_type < idlest_sgs.group_type)
- return NULL;
- /*
- * If the local group is busier than the selected idlest group
- * try and push the task.
- */
- if (local_sgs.group_type > idlest_sgs.group_type)
- return idlest;
- switch (local_sgs.group_type) {
- case group_overloaded:
- case group_fully_busy:
- /* Calculate allowed imbalance based on load */
- imbalance = scale_load_down(NICE_0_LOAD) *
- (sd->imbalance_pct-100) / 100;
- /*
- * When comparing groups across NUMA domains, it's possible for
- * the local domain to be very lightly loaded relative to the
- * remote domains but "imbalance" skews the comparison making
- * remote CPUs look much more favourable. When considering
- * cross-domain, add imbalance to the load on the remote node
- * and consider staying local.
- */
- if ((sd->flags & SD_NUMA) &&
- ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load))
- return NULL;
- /*
- * If the local group is less loaded than the selected
- * idlest group don't try and push any tasks.
- */
- if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance))
- return NULL;
- if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load)
- return NULL;
- break;
- case group_imbalanced:
- case group_asym_packing:
- /* Those type are not used in the slow wakeup path */
- return NULL;
- case group_misfit_task:
- /* Select group with the highest max capacity */
- if (local->sgc->max_capacity >= idlest->sgc->max_capacity)
- return NULL;
- break;
- case group_has_spare:
- #ifdef CONFIG_NUMA
- if (sd->flags & SD_NUMA) {
- int imb_numa_nr = sd->imb_numa_nr;
- #ifdef CONFIG_NUMA_BALANCING
- int idlest_cpu;
- /*
- * If there is spare capacity at NUMA, try to select
- * the preferred node
- */
- if (cpu_to_node(this_cpu) == p->numa_preferred_nid)
- return NULL;
- idlest_cpu = cpumask_first(sched_group_span(idlest));
- if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid)
- return idlest;
- #endif /* CONFIG_NUMA_BALANCING */
- /*
- * Otherwise, keep the task close to the wakeup source
- * and improve locality if the number of running tasks
- * would remain below threshold where an imbalance is
- * allowed while accounting for the possibility the
- * task is pinned to a subset of CPUs. If there is a
- * real need of migration, periodic load balance will
- * take care of it.
- */
- if (p->nr_cpus_allowed != NR_CPUS) {
- struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
- cpumask_and(cpus, sched_group_span(local), p->cpus_ptr);
- imb_numa_nr = min(cpumask_weight(cpus), sd->imb_numa_nr);
- }
- imbalance = abs(local_sgs.idle_cpus - idlest_sgs.idle_cpus);
- if (!adjust_numa_imbalance(imbalance,
- local_sgs.sum_nr_running + 1,
- imb_numa_nr)) {
- return NULL;
- }
- }
- #endif /* CONFIG_NUMA */
- /*
- * Select group with highest number of idle CPUs. We could also
- * compare the utilization which is more stable but it can end
- * up that the group has less spare capacity but finally more
- * idle CPUs which means more opportunity to run task.
- */
- if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus)
- return NULL;
- break;
- }
- return idlest;
- }
- static void update_idle_cpu_scan(struct lb_env *env,
- unsigned long sum_util)
- {
- struct sched_domain_shared *sd_share;
- int llc_weight, pct;
- u64 x, y, tmp;
- /*
- * Update the number of CPUs to scan in LLC domain, which could
- * be used as a hint in select_idle_cpu(). The update of sd_share
- * could be expensive because it is within a shared cache line.
- * So the write of this hint only occurs during periodic load
- * balancing, rather than CPU_NEWLY_IDLE, because the latter
- * can fire way more frequently than the former.
- */
- if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
- return;
- llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
- if (env->sd->span_weight != llc_weight)
- return;
- sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
- if (!sd_share)
- return;
- /*
- * The number of CPUs to search drops as sum_util increases, when
- * sum_util hits 85% or above, the scan stops.
- * The reason to choose 85% as the threshold is because this is the
- * imbalance_pct(117) when a LLC sched group is overloaded.
- *
- * let y = SCHED_CAPACITY_SCALE - p * x^2 [1]
- * and y'= y / SCHED_CAPACITY_SCALE
- *
- * x is the ratio of sum_util compared to the CPU capacity:
- * x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
- * y' is the ratio of CPUs to be scanned in the LLC domain,
- * and the number of CPUs to scan is calculated by:
- *
- * nr_scan = llc_weight * y' [2]
- *
- * When x hits the threshold of overloaded, AKA, when
- * x = 100 / pct, y drops to 0. According to [1],
- * p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
- *
- * Scale x by SCHED_CAPACITY_SCALE:
- * x' = sum_util / llc_weight; [3]
- *
- * and finally [1] becomes:
- * y = SCHED_CAPACITY_SCALE -
- * x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE) [4]
- *
- */
- /* equation [3] */
- x = sum_util;
- do_div(x, llc_weight);
- /* equation [4] */
- pct = env->sd->imbalance_pct;
- tmp = x * x * pct * pct;
- do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
- tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
- y = SCHED_CAPACITY_SCALE - tmp;
- /* equation [2] */
- y *= llc_weight;
- do_div(y, SCHED_CAPACITY_SCALE);
- if ((int)y != sd_share->nr_idle_scan)
- WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
- }
- /**
- * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
- * @env: The load balancing environment.
- * @sds: variable to hold the statistics for this sched_domain.
- */
- static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
- {
- struct sched_domain *child = env->sd->child;
- struct sched_group *sg = env->sd->groups;
- struct sg_lb_stats *local = &sds->local_stat;
- struct sg_lb_stats tmp_sgs;
- unsigned long sum_util = 0;
- int sg_status = 0;
- do {
- struct sg_lb_stats *sgs = &tmp_sgs;
- int local_group;
- local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
- if (local_group) {
- sds->local = sg;
- sgs = local;
- if (env->idle != CPU_NEWLY_IDLE ||
- time_after_eq(jiffies, sg->sgc->next_update))
- update_group_capacity(env->sd, env->dst_cpu);
- }
- update_sg_lb_stats(env, sds, sg, sgs, &sg_status);
- if (local_group)
- goto next_group;
- if (update_sd_pick_busiest(env, sds, sg, sgs)) {
- sds->busiest = sg;
- sds->busiest_stat = *sgs;
- }
- next_group:
- /* Now, start updating sd_lb_stats */
- sds->total_load += sgs->group_load;
- sds->total_capacity += sgs->group_capacity;
- sum_util += sgs->group_util;
- sg = sg->next;
- } while (sg != env->sd->groups);
- /* Tag domain that child domain prefers tasks go to siblings first */
- sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
- if (env->sd->flags & SD_NUMA)
- env->fbq_type = fbq_classify_group(&sds->busiest_stat);
- if (!env->sd->parent) {
- struct root_domain *rd = env->dst_rq->rd;
- /* update overload indicator if we are at root domain */
- WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
- /* Update over-utilization (tipping point, U >= 0) indicator */
- WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
- } else if (sg_status & SG_OVERUTILIZED) {
- struct root_domain *rd = env->dst_rq->rd;
- WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
- }
- update_idle_cpu_scan(env, sum_util);
- }
- /**
- * calculate_imbalance - Calculate the amount of imbalance present within the
- * groups of a given sched_domain during load balance.
- * @env: load balance environment
- * @sds: statistics of the sched_domain whose imbalance is to be calculated.
- */
- static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
- {
- struct sg_lb_stats *local, *busiest;
- local = &sds->local_stat;
- busiest = &sds->busiest_stat;
- if (busiest->group_type == group_misfit_task) {
- if (env->sd->flags & SD_ASYM_CPUCAPACITY) {
- /* Set imbalance to allow misfit tasks to be balanced. */
- env->migration_type = migrate_misfit;
- env->imbalance = 1;
- } else {
- /*
- * Set load imbalance to allow moving task from cpu
- * with reduced capacity.
- */
- env->migration_type = migrate_load;
- env->imbalance = busiest->group_misfit_task_load;
- }
- return;
- }
- if (busiest->group_type == group_asym_packing) {
- /*
- * In case of asym capacity, we will try to migrate all load to
- * the preferred CPU.
- */
- env->migration_type = migrate_task;
- env->imbalance = busiest->sum_h_nr_running;
- return;
- }
- if (busiest->group_type == group_imbalanced) {
- /*
- * In the group_imb case we cannot rely on group-wide averages
- * to ensure CPU-load equilibrium, try to move any task to fix
- * the imbalance. The next load balance will take care of
- * balancing back the system.
- */
- env->migration_type = migrate_task;
- env->imbalance = 1;
- return;
- }
- /*
- * Try to use spare capacity of local group without overloading it or
- * emptying busiest.
- */
- if (local->group_type == group_has_spare) {
- if ((busiest->group_type > group_fully_busy) &&
- !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) {
- /*
- * If busiest is overloaded, try to fill spare
- * capacity. This might end up creating spare capacity
- * in busiest or busiest still being overloaded but
- * there is no simple way to directly compute the
- * amount of load to migrate in order to balance the
- * system.
- */
- env->migration_type = migrate_util;
- env->imbalance = max(local->group_capacity, local->group_util) -
- local->group_util;
- /*
- * In some cases, the group's utilization is max or even
- * higher than capacity because of migrations but the
- * local CPU is (newly) idle. There is at least one
- * waiting task in this overloaded busiest group. Let's
- * try to pull it.
- */
- if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) {
- env->migration_type = migrate_task;
- env->imbalance = 1;
- }
- return;
- }
- if (busiest->group_weight == 1 || sds->prefer_sibling) {
- unsigned int nr_diff = busiest->sum_nr_running;
- /*
- * When prefer sibling, evenly spread running tasks on
- * groups.
- */
- env->migration_type = migrate_task;
- lsub_positive(&nr_diff, local->sum_nr_running);
- env->imbalance = nr_diff;
- } else {
- /*
- * If there is no overload, we just want to even the number of
- * idle cpus.
- */
- env->migration_type = migrate_task;
- env->imbalance = max_t(long, 0,
- (local->idle_cpus - busiest->idle_cpus));
- }
- #ifdef CONFIG_NUMA
- /* Consider allowing a small imbalance between NUMA groups */
- if (env->sd->flags & SD_NUMA) {
- env->imbalance = adjust_numa_imbalance(env->imbalance,
- local->sum_nr_running + 1,
- env->sd->imb_numa_nr);
- }
- #endif
- /* Number of tasks to move to restore balance */
- env->imbalance >>= 1;
- return;
- }
- /*
- * Local is fully busy but has to take more load to relieve the
- * busiest group
- */
- if (local->group_type < group_overloaded) {
- /*
- * Local will become overloaded so the avg_load metrics are
- * finally needed.
- */
- local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
- local->group_capacity;
- /*
- * If the local group is more loaded than the selected
- * busiest group don't try to pull any tasks.
- */
- if (local->avg_load >= busiest->avg_load) {
- env->imbalance = 0;
- return;
- }
- sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
- sds->total_capacity;
- /*
- * If the local group is more loaded than the average system
- * load, don't try to pull any tasks.
- */
- if (local->avg_load >= sds->avg_load) {
- env->imbalance = 0;
- return;
- }
- }
- /*
- * Both group are or will become overloaded and we're trying to get all
- * the CPUs to the average_load, so we don't want to push ourselves
- * above the average load, nor do we wish to reduce the max loaded CPU
- * below the average load. At the same time, we also don't want to
- * reduce the group load below the group capacity. Thus we look for
- * the minimum possible imbalance.
- */
- env->migration_type = migrate_load;
- env->imbalance = min(
- (busiest->avg_load - sds->avg_load) * busiest->group_capacity,
- (sds->avg_load - local->avg_load) * local->group_capacity
- ) / SCHED_CAPACITY_SCALE;
- }
- /******* find_busiest_group() helpers end here *********************/
- /*
- * Decision matrix according to the local and busiest group type:
- *
- * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded
- * has_spare nr_idle balanced N/A N/A balanced balanced
- * fully_busy nr_idle nr_idle N/A N/A balanced balanced
- * misfit_task force N/A N/A N/A N/A N/A
- * asym_packing force force N/A N/A force force
- * imbalanced force force N/A N/A force force
- * overloaded force force N/A N/A force avg_load
- *
- * N/A : Not Applicable because already filtered while updating
- * statistics.
- * balanced : The system is balanced for these 2 groups.
- * force : Calculate the imbalance as load migration is probably needed.
- * avg_load : Only if imbalance is significant enough.
- * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite
- * different in groups.
- */
- /**
- * find_busiest_group - Returns the busiest group within the sched_domain
- * if there is an imbalance.
- * @env: The load balancing environment.
- *
- * Also calculates the amount of runnable load which should be moved
- * to restore balance.
- *
- * Return: - The busiest group if imbalance exists.
- */
- static struct sched_group *find_busiest_group(struct lb_env *env)
- {
- struct sg_lb_stats *local, *busiest;
- struct sd_lb_stats sds;
- init_sd_lb_stats(&sds);
- /*
- * Compute the various statistics relevant for load balancing at
- * this level.
- */
- update_sd_lb_stats(env, &sds);
- /* There is no busy sibling group to pull tasks from */
- if (!sds.busiest)
- goto out_balanced;
- busiest = &sds.busiest_stat;
- /* Misfit tasks should be dealt with regardless of the avg load */
- if (busiest->group_type == group_misfit_task)
- goto force_balance;
- if (sched_energy_enabled()) {
- struct root_domain *rd = env->dst_rq->rd;
- int out_balance = 1;
- trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq,
- &out_balance);
- if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)
- && out_balance)
- goto out_balanced;
- }
- /* ASYM feature bypasses nice load balance check */
- if (busiest->group_type == group_asym_packing)
- goto force_balance;
- /*
- * If the busiest group is imbalanced the below checks don't
- * work because they assume all things are equal, which typically
- * isn't true due to cpus_ptr constraints and the like.
- */
- if (busiest->group_type == group_imbalanced)
- goto force_balance;
- local = &sds.local_stat;
- /*
- * If the local group is busier than the selected busiest group
- * don't try and pull any tasks.
- */
- if (local->group_type > busiest->group_type)
- goto out_balanced;
- /*
- * When groups are overloaded, use the avg_load to ensure fairness
- * between tasks.
- */
- if (local->group_type == group_overloaded) {
- /*
- * If the local group is more loaded than the selected
- * busiest group don't try to pull any tasks.
- */
- if (local->avg_load >= busiest->avg_load)
- goto out_balanced;
- /* XXX broken for overlapping NUMA groups */
- sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) /
- sds.total_capacity;
- /*
- * Don't pull any tasks if this group is already above the
- * domain average load.
- */
- if (local->avg_load >= sds.avg_load)
- goto out_balanced;
- /*
- * If the busiest group is more loaded, use imbalance_pct to be
- * conservative.
- */
- if (100 * busiest->avg_load <=
- env->sd->imbalance_pct * local->avg_load)
- goto out_balanced;
- }
- /* Try to move all excess tasks to child's sibling domain */
- if (sds.prefer_sibling && local->group_type == group_has_spare &&
- busiest->sum_nr_running > local->sum_nr_running + 1)
- goto force_balance;
- if (busiest->group_type != group_overloaded) {
- if (env->idle == CPU_NOT_IDLE)
- /*
- * If the busiest group is not overloaded (and as a
- * result the local one too) but this CPU is already
- * busy, let another idle CPU try to pull task.
- */
- goto out_balanced;
- if (busiest->group_weight > 1 &&
- local->idle_cpus <= (busiest->idle_cpus + 1))
- /*
- * If the busiest group is not overloaded
- * and there is no imbalance between this and busiest
- * group wrt idle CPUs, it is balanced. The imbalance
- * becomes significant if the diff is greater than 1
- * otherwise we might end up to just move the imbalance
- * on another group. Of course this applies only if
- * there is more than 1 CPU per group.
- */
- goto out_balanced;
- if (busiest->sum_h_nr_running == 1)
- /*
- * busiest doesn't have any tasks waiting to run
- */
- goto out_balanced;
- }
- force_balance:
- /* Looks like there is an imbalance. Compute it */
- calculate_imbalance(env, &sds);
- return env->imbalance ? sds.busiest : NULL;
- out_balanced:
- env->imbalance = 0;
- return NULL;
- }
- /*
- * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
- */
- static struct rq *find_busiest_queue(struct lb_env *env,
- struct sched_group *group)
- {
- struct rq *busiest = NULL, *rq;
- unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
- unsigned int busiest_nr = 0;
- int i, done = 0;
- trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus,
- &busiest, &done);
- if (done)
- return busiest;
- for_each_cpu_and(i, sched_group_span(group), env->cpus) {
- unsigned long capacity, load, util;
- unsigned int nr_running;
- enum fbq_type rt;
- rq = cpu_rq(i);
- rt = fbq_classify_rq(rq);
- /*
- * We classify groups/runqueues into three groups:
- * - regular: there are !numa tasks
- * - remote: there are numa tasks that run on the 'wrong' node
- * - all: there is no distinction
- *
- * In order to avoid migrating ideally placed numa tasks,
- * ignore those when there's better options.
- *
- * If we ignore the actual busiest queue to migrate another
- * task, the next balance pass can still reduce the busiest
- * queue by moving tasks around inside the node.
- *
- * If we cannot move enough load due to this classification
- * the next pass will adjust the group classification and
- * allow migration of more tasks.
- *
- * Both cases only affect the total convergence complexity.
- */
- if (rt > env->fbq_type)
- continue;
- nr_running = rq->cfs.h_nr_running;
- if (!nr_running)
- continue;
- capacity = capacity_of(i);
- /*
- * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
- * eventually lead to active_balancing high->low capacity.
- * Higher per-CPU capacity is considered better than balancing
- * average load.
- */
- if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
- !capacity_greater(capacity_of(env->dst_cpu), capacity) &&
- nr_running == 1)
- continue;
- /* Make sure we only pull tasks from a CPU of lower priority */
- if ((env->sd->flags & SD_ASYM_PACKING) &&
- sched_asym_prefer(i, env->dst_cpu) &&
- nr_running == 1)
- continue;
- switch (env->migration_type) {
- case migrate_load:
- /*
- * When comparing with load imbalance, use cpu_load()
- * which is not scaled with the CPU capacity.
- */
- load = cpu_load(rq);
- if (nr_running == 1 && load > env->imbalance &&
- !check_cpu_capacity(rq, env->sd))
- break;
- /*
- * For the load comparisons with the other CPUs,
- * consider the cpu_load() scaled with the CPU
- * capacity, so that the load can be moved away
- * from the CPU that is potentially running at a
- * lower capacity.
- *
- * Thus we're looking for max(load_i / capacity_i),
- * crosswise multiplication to rid ourselves of the
- * division works out to:
- * load_i * capacity_j > load_j * capacity_i;
- * where j is our previous maximum.
- */
- if (load * busiest_capacity > busiest_load * capacity) {
- busiest_load = load;
- busiest_capacity = capacity;
- busiest = rq;
- }
- break;
- case migrate_util:
- util = cpu_util_cfs(i);
- /*
- * Don't try to pull utilization from a CPU with one
- * running task. Whatever its utilization, we will fail
- * detach the task.
- */
- if (nr_running <= 1)
- continue;
- if (busiest_util < util) {
- busiest_util = util;
- busiest = rq;
- }
- break;
- case migrate_task:
- if (busiest_nr < nr_running) {
- busiest_nr = nr_running;
- busiest = rq;
- }
- break;
- case migrate_misfit:
- /*
- * For ASYM_CPUCAPACITY domains with misfit tasks we
- * simply seek the "biggest" misfit task.
- */
- if (rq->misfit_task_load > busiest_load) {
- busiest_load = rq->misfit_task_load;
- busiest = rq;
- }
- break;
- }
- }
- return busiest;
- }
- /*
- * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
- * so long as it is large enough.
- */
- #define MAX_PINNED_INTERVAL 512
- static inline bool
- asym_active_balance(struct lb_env *env)
- {
- /*
- * ASYM_PACKING needs to force migrate tasks from busy but
- * lower priority CPUs in order to pack all tasks in the
- * highest priority CPUs.
- */
- return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
- sched_asym_prefer(env->dst_cpu, env->src_cpu);
- }
- static inline bool
- imbalanced_active_balance(struct lb_env *env)
- {
- struct sched_domain *sd = env->sd;
- /*
- * The imbalanced case includes the case of pinned tasks preventing a fair
- * distribution of the load on the system but also the even distribution of the
- * threads on a system with spare capacity
- */
- if ((env->migration_type == migrate_task) &&
- (sd->nr_balance_failed > sd->cache_nice_tries+2))
- return 1;
- return 0;
- }
- static int need_active_balance(struct lb_env *env)
- {
- struct sched_domain *sd = env->sd;
- if (asym_active_balance(env))
- return 1;
- if (imbalanced_active_balance(env))
- return 1;
- /*
- * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
- * It's worth migrating the task if the src_cpu's capacity is reduced
- * because of other sched_class or IRQs if more capacity stays
- * available on dst_cpu.
- */
- if ((env->idle != CPU_NOT_IDLE) &&
- (env->src_rq->cfs.h_nr_running == 1)) {
- if ((check_cpu_capacity(env->src_rq, sd)) &&
- (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
- return 1;
- }
- if (env->migration_type == migrate_misfit)
- return 1;
- return 0;
- }
- static int active_load_balance_cpu_stop(void *data);
- static int should_we_balance(struct lb_env *env)
- {
- struct sched_group *sg = env->sd->groups;
- int cpu;
- /*
- * Ensure the balancing environment is consistent; can happen
- * when the softirq triggers 'during' hotplug.
- */
- if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
- return 0;
- /*
- * In the newly idle case, we will allow all the CPUs
- * to do the newly idle load balance.
- *
- * However, we bail out if we already have tasks or a wakeup pending,
- * to optimize wakeup latency.
- */
- if (env->idle == CPU_NEWLY_IDLE) {
- if (env->dst_rq->nr_running > 0 || env->dst_rq->ttwu_pending)
- return 0;
- return 1;
- }
- /* Try to find first idle CPU */
- for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
- if (!idle_cpu(cpu))
- continue;
- /* Are we the first idle CPU? */
- return cpu == env->dst_cpu;
- }
- /* Are we the first CPU of this group ? */
- return group_balance_cpu(sg) == env->dst_cpu;
- }
- /*
- * Check this_cpu to ensure it is balanced within domain. Attempt to move
- * tasks if there is an imbalance.
- */
- static int load_balance(int this_cpu, struct rq *this_rq,
- struct sched_domain *sd, enum cpu_idle_type idle,
- int *continue_balancing)
- {
- int ld_moved, cur_ld_moved, active_balance = 0;
- struct sched_domain *sd_parent = sd->parent;
- struct sched_group *group;
- struct rq *busiest;
- struct rq_flags rf;
- struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
- struct lb_env env = {
- .sd = sd,
- .dst_cpu = this_cpu,
- .dst_rq = this_rq,
- .dst_grpmask = group_balance_mask(sd->groups),
- .idle = idle,
- .loop_break = SCHED_NR_MIGRATE_BREAK,
- .cpus = cpus,
- .fbq_type = all,
- .tasks = LIST_HEAD_INIT(env.tasks),
- };
- cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
- schedstat_inc(sd->lb_count[idle]);
- redo:
- if (!should_we_balance(&env)) {
- *continue_balancing = 0;
- goto out_balanced;
- }
- group = find_busiest_group(&env);
- if (!group) {
- schedstat_inc(sd->lb_nobusyg[idle]);
- goto out_balanced;
- }
- busiest = find_busiest_queue(&env, group);
- if (!busiest) {
- schedstat_inc(sd->lb_nobusyq[idle]);
- goto out_balanced;
- }
- WARN_ON_ONCE(busiest == env.dst_rq);
- schedstat_add(sd->lb_imbalance[idle], env.imbalance);
- env.src_cpu = busiest->cpu;
- env.src_rq = busiest;
- ld_moved = 0;
- /* Clear this flag as soon as we find a pullable task */
- env.flags |= LBF_ALL_PINNED;
- if (busiest->nr_running > 1) {
- /*
- * Attempt to move tasks. If find_busiest_group has found
- * an imbalance but busiest->nr_running <= 1, the group is
- * still unbalanced. ld_moved simply stays zero, so it is
- * correctly treated as an imbalance.
- */
- env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
- more_balance:
- rq_lock_irqsave(busiest, &rf);
- env.src_rq_rf = &rf;
- update_rq_clock(busiest);
- /*
- * cur_ld_moved - load moved in current iteration
- * ld_moved - cumulative load moved across iterations
- */
- cur_ld_moved = detach_tasks(&env);
- /*
- * We've detached some tasks from busiest_rq. Every
- * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
- * unlock busiest->lock, and we are able to be sure
- * that nobody can manipulate the tasks in parallel.
- * See task_rq_lock() family for the details.
- */
- rq_unlock(busiest, &rf);
- if (cur_ld_moved) {
- attach_tasks(&env);
- ld_moved += cur_ld_moved;
- }
- local_irq_restore(rf.flags);
- if (env.flags & LBF_NEED_BREAK) {
- env.flags &= ~LBF_NEED_BREAK;
- /* Stop if we tried all running tasks */
- if (env.loop < busiest->nr_running)
- goto more_balance;
- }
- /*
- * Revisit (affine) tasks on src_cpu that couldn't be moved to
- * us and move them to an alternate dst_cpu in our sched_group
- * where they can run. The upper limit on how many times we
- * iterate on same src_cpu is dependent on number of CPUs in our
- * sched_group.
- *
- * This changes load balance semantics a bit on who can move
- * load to a given_cpu. In addition to the given_cpu itself
- * (or a ilb_cpu acting on its behalf where given_cpu is
- * nohz-idle), we now have balance_cpu in a position to move
- * load to given_cpu. In rare situations, this may cause
- * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
- * _independently_ and at _same_ time to move some load to
- * given_cpu) causing excess load to be moved to given_cpu.
- * This however should not happen so much in practice and
- * moreover subsequent load balance cycles should correct the
- * excess load moved.
- */
- if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
- /* Prevent to re-select dst_cpu via env's CPUs */
- __cpumask_clear_cpu(env.dst_cpu, env.cpus);
- env.dst_rq = cpu_rq(env.new_dst_cpu);
- env.dst_cpu = env.new_dst_cpu;
- env.flags &= ~LBF_DST_PINNED;
- env.loop = 0;
- env.loop_break = SCHED_NR_MIGRATE_BREAK;
- /*
- * Go back to "more_balance" rather than "redo" since we
- * need to continue with same src_cpu.
- */
- goto more_balance;
- }
- /*
- * We failed to reach balance because of affinity.
- */
- if (sd_parent) {
- int *group_imbalance = &sd_parent->groups->sgc->imbalance;
- if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
- *group_imbalance = 1;
- }
- /* All tasks on this runqueue were pinned by CPU affinity */
- if (unlikely(env.flags & LBF_ALL_PINNED)) {
- __cpumask_clear_cpu(cpu_of(busiest), cpus);
- /*
- * Attempting to continue load balancing at the current
- * sched_domain level only makes sense if there are
- * active CPUs remaining as possible busiest CPUs to
- * pull load from which are not contained within the
- * destination group that is receiving any migrated
- * load.
- */
- if (!cpumask_subset(cpus, env.dst_grpmask)) {
- env.loop = 0;
- env.loop_break = SCHED_NR_MIGRATE_BREAK;
- goto redo;
- }
- goto out_all_pinned;
- }
- }
- if (!ld_moved) {
- schedstat_inc(sd->lb_failed[idle]);
- /*
- * Increment the failure counter only on periodic balance.
- * We do not want newidle balance, which can be very
- * frequent, pollute the failure counter causing
- * excessive cache_hot migrations and active balances.
- */
- if (idle != CPU_NEWLY_IDLE)
- sd->nr_balance_failed++;
- if (need_active_balance(&env)) {
- unsigned long flags;
- raw_spin_rq_lock_irqsave(busiest, flags);
- /*
- * Don't kick the active_load_balance_cpu_stop,
- * if the curr task on busiest CPU can't be
- * moved to this_cpu:
- */
- if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
- raw_spin_rq_unlock_irqrestore(busiest, flags);
- goto out_one_pinned;
- }
- /* Record that we found at least one task that could run on this_cpu */
- env.flags &= ~LBF_ALL_PINNED;
- /*
- * ->active_balance synchronizes accesses to
- * ->active_balance_work. Once set, it's cleared
- * only after active load balance is finished.
- */
- if (!busiest->active_balance) {
- busiest->active_balance = 1;
- busiest->push_cpu = this_cpu;
- active_balance = 1;
- }
- preempt_disable();
- raw_spin_rq_unlock_irqrestore(busiest, flags);
- if (active_balance) {
- stop_one_cpu_nowait(cpu_of(busiest),
- active_load_balance_cpu_stop, busiest,
- &busiest->active_balance_work);
- }
- preempt_enable();
- }
- } else {
- sd->nr_balance_failed = 0;
- }
- if (likely(!active_balance) || need_active_balance(&env)) {
- /* We were unbalanced, so reset the balancing interval */
- sd->balance_interval = sd->min_interval;
- }
- goto out;
- out_balanced:
- /*
- * We reach balance although we may have faced some affinity
- * constraints. Clear the imbalance flag only if other tasks got
- * a chance to move and fix the imbalance.
- */
- if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
- int *group_imbalance = &sd_parent->groups->sgc->imbalance;
- if (*group_imbalance)
- *group_imbalance = 0;
- }
- out_all_pinned:
- /*
- * We reach balance because all tasks are pinned at this level so
- * we can't migrate them. Let the imbalance flag set so parent level
- * can try to migrate them.
- */
- schedstat_inc(sd->lb_balanced[idle]);
- sd->nr_balance_failed = 0;
- out_one_pinned:
- ld_moved = 0;
- /*
- * newidle_balance() disregards balance intervals, so we could
- * repeatedly reach this code, which would lead to balance_interval
- * skyrocketing in a short amount of time. Skip the balance_interval
- * increase logic to avoid that.
- */
- if (env.idle == CPU_NEWLY_IDLE)
- goto out;
- /* tune up the balancing interval */
- if ((env.flags & LBF_ALL_PINNED &&
- sd->balance_interval < MAX_PINNED_INTERVAL) ||
- sd->balance_interval < sd->max_interval)
- sd->balance_interval *= 2;
- out:
- return ld_moved;
- }
- static inline unsigned long
- get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
- {
- unsigned long interval = sd->balance_interval;
- if (cpu_busy)
- interval *= sd->busy_factor;
- /* scale ms to jiffies */
- interval = msecs_to_jiffies(interval);
- /*
- * Reduce likelihood of busy balancing at higher domains racing with
- * balancing at lower domains by preventing their balancing periods
- * from being multiples of each other.
- */
- if (cpu_busy)
- interval -= 1;
- interval = clamp(interval, 1UL, max_load_balance_interval);
- return interval;
- }
- static inline void
- update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
- {
- unsigned long interval, next;
- /* used by idle balance, so cpu_busy = 0 */
- interval = get_sd_balance_interval(sd, 0);
- next = sd->last_balance + interval;
- if (time_after(*next_balance, next))
- *next_balance = next;
- }
- /*
- * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
- * running tasks off the busiest CPU onto idle CPUs. It requires at
- * least 1 task to be running on each physical CPU where possible, and
- * avoids physical / logical imbalances.
- */
- static int active_load_balance_cpu_stop(void *data)
- {
- struct rq *busiest_rq = data;
- int busiest_cpu = cpu_of(busiest_rq);
- int target_cpu = busiest_rq->push_cpu;
- struct rq *target_rq = cpu_rq(target_cpu);
- struct sched_domain *sd;
- struct task_struct *p = NULL;
- struct rq_flags rf;
- rq_lock_irq(busiest_rq, &rf);
- /*
- * Between queueing the stop-work and running it is a hole in which
- * CPUs can become inactive. We should not move tasks from or to
- * inactive CPUs.
- */
- if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
- goto out_unlock;
- /* Make sure the requested CPU hasn't gone down in the meantime: */
- if (unlikely(busiest_cpu != smp_processor_id() ||
- !busiest_rq->active_balance))
- goto out_unlock;
- /* Is there any task to move? */
- if (busiest_rq->nr_running <= 1)
- goto out_unlock;
- /*
- * This condition is "impossible", if it occurs
- * we need to fix it. Originally reported by
- * Bjorn Helgaas on a 128-CPU setup.
- */
- WARN_ON_ONCE(busiest_rq == target_rq);
- /* Search for an sd spanning us and the target CPU. */
- rcu_read_lock();
- for_each_domain(target_cpu, sd) {
- if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
- break;
- }
- if (likely(sd)) {
- struct lb_env env = {
- .sd = sd,
- .dst_cpu = target_cpu,
- .dst_rq = target_rq,
- .src_cpu = busiest_rq->cpu,
- .src_rq = busiest_rq,
- .idle = CPU_IDLE,
- .flags = LBF_ACTIVE_LB,
- .src_rq_rf = &rf,
- };
- schedstat_inc(sd->alb_count);
- update_rq_clock(busiest_rq);
- p = detach_one_task(&env);
- if (p) {
- schedstat_inc(sd->alb_pushed);
- /* Active balancing done, reset the failure counter. */
- sd->nr_balance_failed = 0;
- } else {
- schedstat_inc(sd->alb_failed);
- }
- }
- rcu_read_unlock();
- out_unlock:
- busiest_rq->active_balance = 0;
- rq_unlock(busiest_rq, &rf);
- if (p)
- attach_one_task(target_rq, p);
- local_irq_enable();
- return 0;
- }
- static DEFINE_SPINLOCK(balancing);
- /*
- * Scale the max load_balance interval with the number of CPUs in the system.
- * This trades load-balance latency on larger machines for less cross talk.
- */
- void update_max_interval(void)
- {
- max_load_balance_interval = HZ*num_online_cpus()/10;
- }
- static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
- {
- if (cost > sd->max_newidle_lb_cost) {
- /*
- * Track max cost of a domain to make sure to not delay the
- * next wakeup on the CPU.
- */
- sd->max_newidle_lb_cost = cost;
- sd->last_decay_max_lb_cost = jiffies;
- } else if (time_after(jiffies, sd->last_decay_max_lb_cost + HZ)) {
- /*
- * Decay the newidle max times by ~1% per second to ensure that
- * it is not outdated and the current max cost is actually
- * shorter.
- */
- sd->max_newidle_lb_cost = (sd->max_newidle_lb_cost * 253) / 256;
- sd->last_decay_max_lb_cost = jiffies;
- return true;
- }
- return false;
- }
- /*
- * It checks each scheduling domain to see if it is due to be balanced,
- * and initiates a balancing operation if so.
- *
- * Balancing parameters are set up in init_sched_domains.
- */
- static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
- {
- int continue_balancing = 1;
- int cpu = rq->cpu;
- int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
- unsigned long interval;
- struct sched_domain *sd;
- /* Earliest time when we have to do rebalance again */
- unsigned long next_balance = jiffies + 60*HZ;
- int update_next_balance = 0;
- int need_serialize, need_decay = 0;
- u64 max_cost = 0;
- trace_android_rvh_sched_rebalance_domains(rq, &continue_balancing);
- if (!continue_balancing)
- return;
- rcu_read_lock();
- for_each_domain(cpu, sd) {
- /*
- * Decay the newidle max times here because this is a regular
- * visit to all the domains.
- */
- need_decay = update_newidle_cost(sd, 0);
- max_cost += sd->max_newidle_lb_cost;
- /*
- * Stop the load balance at this level. There is another
- * CPU in our sched group which is doing load balancing more
- * actively.
- */
- if (!continue_balancing) {
- if (need_decay)
- continue;
- break;
- }
- interval = get_sd_balance_interval(sd, busy);
- need_serialize = sd->flags & SD_SERIALIZE;
- if (need_serialize) {
- if (!spin_trylock(&balancing))
- goto out;
- }
- if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
- /*
- * The LBF_DST_PINNED logic could have changed
- * env->dst_cpu, so we can't know our idle
- * state even if we migrated tasks. Update it.
- */
- idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
- busy = idle != CPU_IDLE && !sched_idle_cpu(cpu);
- }
- sd->last_balance = jiffies;
- interval = get_sd_balance_interval(sd, busy);
- }
- if (need_serialize)
- spin_unlock(&balancing);
- out:
- if (time_after(next_balance, sd->last_balance + interval)) {
- next_balance = sd->last_balance + interval;
- update_next_balance = 1;
- }
- }
- if (need_decay) {
- /*
- * Ensure the rq-wide value also decays but keep it at a
- * reasonable floor to avoid funnies with rq->avg_idle.
- */
- rq->max_idle_balance_cost =
- max((u64)sysctl_sched_migration_cost, max_cost);
- }
- rcu_read_unlock();
- /*
- * next_balance will be updated only when there is a need.
- * When the cpu is attached to null domain for ex, it will not be
- * updated.
- */
- if (likely(update_next_balance))
- rq->next_balance = next_balance;
- }
- static inline int on_null_domain(struct rq *rq)
- {
- return unlikely(!rcu_dereference_sched(rq->sd));
- }
- #ifdef CONFIG_NO_HZ_COMMON
- /*
- * idle load balancing details
- * - When one of the busy CPUs notice that there may be an idle rebalancing
- * needed, they will kick the idle load balancer, which then does idle
- * load balancing for all the idle CPUs.
- * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED not set
- * anywhere yet.
- */
- static inline int find_new_ilb(void)
- {
- int ilb = -1;
- const struct cpumask *hk_mask;
- trace_android_rvh_find_new_ilb(nohz.idle_cpus_mask, &ilb);
- if (ilb >= 0)
- return ilb;
- hk_mask = housekeeping_cpumask(HK_TYPE_MISC);
- for_each_cpu_and(ilb, nohz.idle_cpus_mask, hk_mask) {
- if (ilb == smp_processor_id())
- continue;
- if (idle_cpu(ilb))
- return ilb;
- }
- return nr_cpu_ids;
- }
- /*
- * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
- * idle CPU in the HK_TYPE_MISC housekeeping set (if there is one).
- */
- static void kick_ilb(unsigned int flags)
- {
- int ilb_cpu;
- /*
- * Increase nohz.next_balance only when if full ilb is triggered but
- * not if we only update stats.
- */
- if (flags & NOHZ_BALANCE_KICK)
- nohz.next_balance = jiffies+1;
- ilb_cpu = find_new_ilb();
- if (ilb_cpu >= nr_cpu_ids)
- return;
- /*
- * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets
- * the first flag owns it; cleared by nohz_csd_func().
- */
- flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
- if (flags & NOHZ_KICK_MASK)
- return;
- /*
- * This way we generate an IPI on the target CPU which
- * is idle. And the softirq performing nohz idle load balance
- * will be run before returning from the IPI.
- */
- smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd);
- }
- /*
- * Current decision point for kicking the idle load balancer in the presence
- * of idle CPUs in the system.
- */
- static void nohz_balancer_kick(struct rq *rq)
- {
- unsigned long now = jiffies;
- struct sched_domain_shared *sds;
- struct sched_domain *sd;
- int nr_busy, i, cpu = rq->cpu;
- unsigned int flags = 0;
- int done = 0;
- if (unlikely(rq->idle_balance))
- return;
- /*
- * We may be recently in ticked or tickless idle mode. At the first
- * busy tick after returning from idle, we will update the busy stats.
- */
- nohz_balance_exit_idle(rq);
- /*
- * None are in tickless mode and hence no need for NOHZ idle load
- * balancing.
- */
- if (likely(!atomic_read(&nohz.nr_cpus)))
- return;
- if (READ_ONCE(nohz.has_blocked) &&
- time_after(now, READ_ONCE(nohz.next_blocked)))
- flags = NOHZ_STATS_KICK;
- if (time_before(now, nohz.next_balance))
- goto out;
- trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done);
- if (done)
- goto out;
- if (rq->nr_running >= 2) {
- flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
- goto out;
- }
- rcu_read_lock();
- sd = rcu_dereference(rq->sd);
- if (sd) {
- /*
- * If there's a CFS task and the current CPU has reduced
- * capacity; kick the ILB to see if there's a better CPU to run
- * on.
- */
- if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
- flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
- goto unlock;
- }
- }
- sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
- if (sd) {
- /*
- * When ASYM_PACKING; see if there's a more preferred CPU
- * currently idle; in which case, kick the ILB to move tasks
- * around.
- */
- for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
- if (sched_asym_prefer(i, cpu)) {
- flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
- goto unlock;
- }
- }
- }
- sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
- if (sd) {
- /*
- * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
- * to run the misfit task on.
- */
- if (check_misfit_status(rq, sd)) {
- flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
- goto unlock;
- }
- /*
- * For asymmetric systems, we do not want to nicely balance
- * cache use, instead we want to embrace asymmetry and only
- * ensure tasks have enough CPU capacity.
- *
- * Skip the LLC logic because it's not relevant in that case.
- */
- goto unlock;
- }
- sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
- if (sds) {
- /*
- * If there is an imbalance between LLC domains (IOW we could
- * increase the overall cache use), we need some less-loaded LLC
- * domain to pull some load. Likewise, we may need to spread
- * load within the current LLC domain (e.g. packed SMT cores but
- * other CPUs are idle). We can't really know from here how busy
- * the others are - so just get a nohz balance going if it looks
- * like this LLC domain has tasks we could move.
- */
- nr_busy = atomic_read(&sds->nr_busy_cpus);
- if (nr_busy > 1) {
- flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
- goto unlock;
- }
- }
- unlock:
- rcu_read_unlock();
- out:
- if (READ_ONCE(nohz.needs_update))
- flags |= NOHZ_NEXT_KICK;
- if (flags)
- kick_ilb(flags);
- }
- static void set_cpu_sd_state_busy(int cpu)
- {
- struct sched_domain *sd;
- rcu_read_lock();
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
- if (!sd || !sd->nohz_idle)
- goto unlock;
- sd->nohz_idle = 0;
- atomic_inc(&sd->shared->nr_busy_cpus);
- unlock:
- rcu_read_unlock();
- }
- void nohz_balance_exit_idle(struct rq *rq)
- {
- SCHED_WARN_ON(rq != this_rq());
- if (likely(!rq->nohz_tick_stopped))
- return;
- rq->nohz_tick_stopped = 0;
- cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
- set_cpu_sd_state_busy(rq->cpu);
- }
- static void set_cpu_sd_state_idle(int cpu)
- {
- struct sched_domain *sd;
- rcu_read_lock();
- sd = rcu_dereference(per_cpu(sd_llc, cpu));
- if (!sd || sd->nohz_idle)
- goto unlock;
- sd->nohz_idle = 1;
- atomic_dec(&sd->shared->nr_busy_cpus);
- unlock:
- rcu_read_unlock();
- }
- /*
- * This routine will record that the CPU is going idle with tick stopped.
- * This info will be used in performing idle load balancing in the future.
- */
- void nohz_balance_enter_idle(int cpu)
- {
- struct rq *rq = cpu_rq(cpu);
- SCHED_WARN_ON(cpu != smp_processor_id());
- /* If this CPU is going down, then nothing needs to be done: */
- if (!cpu_active(cpu))
- return;
- /* Spare idle load balancing on CPUs that don't want to be disturbed: */
- if (!housekeeping_cpu(cpu, HK_TYPE_SCHED))
- return;
- /*
- * Can be set safely without rq->lock held
- * If a clear happens, it will have evaluated last additions because
- * rq->lock is held during the check and the clear
- */
- rq->has_blocked_load = 1;
- /*
- * The tick is still stopped but load could have been added in the
- * meantime. We set the nohz.has_blocked flag to trig a check of the
- * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
- * of nohz.has_blocked can only happen after checking the new load
- */
- if (rq->nohz_tick_stopped)
- goto out;
- /* If we're a completely isolated CPU, we don't play: */
- if (on_null_domain(rq))
- return;
- rq->nohz_tick_stopped = 1;
- cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
- atomic_inc(&nohz.nr_cpus);
- /*
- * Ensures that if nohz_idle_balance() fails to observe our
- * @idle_cpus_mask store, it must observe the @has_blocked
- * and @needs_update stores.
- */
- smp_mb__after_atomic();
- set_cpu_sd_state_idle(cpu);
- WRITE_ONCE(nohz.needs_update, 1);
- out:
- /*
- * Each time a cpu enter idle, we assume that it has blocked load and
- * enable the periodic update of the load of idle cpus
- */
- WRITE_ONCE(nohz.has_blocked, 1);
- }
- static bool update_nohz_stats(struct rq *rq)
- {
- unsigned int cpu = rq->cpu;
- if (!rq->has_blocked_load)
- return false;
- if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
- return false;
- if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
- return true;
- update_blocked_averages(cpu);
- return rq->has_blocked_load;
- }
- /*
- * Internal function that runs load balance for all idle cpus. The load balance
- * can be a simple update of blocked load or a complete load balance with
- * tasks movement depending of flags.
- */
- static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags)
- {
- /* Earliest time when we have to do rebalance again */
- unsigned long now = jiffies;
- unsigned long next_balance = now + 60*HZ;
- bool has_blocked_load = false;
- int update_next_balance = 0;
- int this_cpu = this_rq->cpu;
- int balance_cpu;
- struct rq *rq;
- SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
- /*
- * We assume there will be no idle load after this update and clear
- * the has_blocked flag. If a cpu enters idle in the mean time, it will
- * set the has_blocked flag and trigger another update of idle load.
- * Because a cpu that becomes idle, is added to idle_cpus_mask before
- * setting the flag, we are sure to not clear the state and not
- * check the load of an idle cpu.
- *
- * Same applies to idle_cpus_mask vs needs_update.
- */
- if (flags & NOHZ_STATS_KICK)
- WRITE_ONCE(nohz.has_blocked, 0);
- if (flags & NOHZ_NEXT_KICK)
- WRITE_ONCE(nohz.needs_update, 0);
- /*
- * Ensures that if we miss the CPU, we must see the has_blocked
- * store from nohz_balance_enter_idle().
- */
- smp_mb();
- /*
- * Start with the next CPU after this_cpu so we will end with this_cpu and let a
- * chance for other idle cpu to pull load.
- */
- for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) {
- if (!idle_cpu(balance_cpu))
- continue;
- /*
- * If this CPU gets work to do, stop the load balancing
- * work being done for other CPUs. Next load
- * balancing owner will pick it up.
- */
- if (need_resched()) {
- if (flags & NOHZ_STATS_KICK)
- has_blocked_load = true;
- if (flags & NOHZ_NEXT_KICK)
- WRITE_ONCE(nohz.needs_update, 1);
- goto abort;
- }
- rq = cpu_rq(balance_cpu);
- if (flags & NOHZ_STATS_KICK)
- has_blocked_load |= update_nohz_stats(rq);
- /*
- * If time for next balance is due,
- * do the balance.
- */
- if (time_after_eq(jiffies, rq->next_balance)) {
- struct rq_flags rf;
- rq_lock_irqsave(rq, &rf);
- update_rq_clock(rq);
- rq_unlock_irqrestore(rq, &rf);
- if (flags & NOHZ_BALANCE_KICK)
- rebalance_domains(rq, CPU_IDLE);
- }
- if (time_after(next_balance, rq->next_balance)) {
- next_balance = rq->next_balance;
- update_next_balance = 1;
- }
- }
- /*
- * next_balance will be updated only when there is a need.
- * When the CPU is attached to null domain for ex, it will not be
- * updated.
- */
- if (likely(update_next_balance))
- nohz.next_balance = next_balance;
- if (flags & NOHZ_STATS_KICK)
- WRITE_ONCE(nohz.next_blocked,
- now + msecs_to_jiffies(LOAD_AVG_PERIOD));
- abort:
- /* There is still blocked load, enable periodic update */
- if (has_blocked_load)
- WRITE_ONCE(nohz.has_blocked, 1);
- }
- /*
- * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
- * rebalancing for all the cpus for whom scheduler ticks are stopped.
- */
- static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
- {
- unsigned int flags = this_rq->nohz_idle_balance;
- if (!flags)
- return false;
- this_rq->nohz_idle_balance = 0;
- if (idle != CPU_IDLE)
- return false;
- _nohz_idle_balance(this_rq, flags);
- return true;
- }
- /*
- * Check if we need to run the ILB for updating blocked load before entering
- * idle state.
- */
- void nohz_run_idle_balance(int cpu)
- {
- unsigned int flags;
- flags = atomic_fetch_andnot(NOHZ_NEWILB_KICK, nohz_flags(cpu));
- /*
- * Update the blocked load only if no SCHED_SOFTIRQ is about to happen
- * (ie NOHZ_STATS_KICK set) and will do the same.
- */
- if ((flags == NOHZ_NEWILB_KICK) && !need_resched())
- _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK);
- }
- static void nohz_newidle_balance(struct rq *this_rq)
- {
- int this_cpu = this_rq->cpu;
- /*
- * This CPU doesn't want to be disturbed by scheduler
- * housekeeping
- */
- if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED))
- return;
- /* Will wake up very soon. No time for doing anything else*/
- if (this_rq->avg_idle < sysctl_sched_migration_cost)
- return;
- /* Don't need to update blocked load of idle CPUs*/
- if (!READ_ONCE(nohz.has_blocked) ||
- time_before(jiffies, READ_ONCE(nohz.next_blocked)))
- return;
- /*
- * Set the need to trigger ILB in order to update blocked load
- * before entering idle state.
- */
- atomic_or(NOHZ_NEWILB_KICK, nohz_flags(this_cpu));
- }
- #else /* !CONFIG_NO_HZ_COMMON */
- static inline void nohz_balancer_kick(struct rq *rq) { }
- static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
- {
- return false;
- }
- static inline void nohz_newidle_balance(struct rq *this_rq) { }
- #endif /* CONFIG_NO_HZ_COMMON */
- /*
- * newidle_balance is called by schedule() if this_cpu is about to become
- * idle. Attempts to pull tasks from other CPUs.
- *
- * Returns:
- * < 0 - we released the lock and there are !fair tasks present
- * 0 - failed, no new tasks
- * > 0 - success, new (fair) tasks present
- */
- static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
- {
- unsigned long next_balance = jiffies + HZ;
- int this_cpu = this_rq->cpu;
- u64 t0, t1, curr_cost = 0;
- struct sched_domain *sd;
- int pulled_task = 0;
- int done = 0;
- trace_android_rvh_sched_newidle_balance(this_rq, rf, &pulled_task, &done);
- if (done)
- return pulled_task;
- update_misfit_status(NULL, this_rq);
- /*
- * There is a task waiting to run. No need to search for one.
- * Return 0; the task will be enqueued when switching to idle.
- */
- if (this_rq->ttwu_pending)
- return 0;
- /*
- * We must set idle_stamp _before_ calling idle_balance(), such that we
- * measure the duration of idle_balance() as idle time.
- */
- this_rq->idle_stamp = rq_clock(this_rq);
- /*
- * Do not pull tasks towards !active CPUs...
- */
- if (!cpu_active(this_cpu))
- return 0;
- /*
- * This is OK, because current is on_cpu, which avoids it being picked
- * for load-balance and preemption/IRQs are still disabled avoiding
- * further scheduler activity on it and we're being very careful to
- * re-start the picking loop.
- */
- rq_unpin_lock(this_rq, rf);
- rcu_read_lock();
- sd = rcu_dereference_check_sched_domain(this_rq->sd);
- if (!READ_ONCE(this_rq->rd->overload) ||
- (sd && this_rq->avg_idle < sd->max_newidle_lb_cost)) {
- if (sd)
- update_next_balance(sd, &next_balance);
- rcu_read_unlock();
- goto out;
- }
- rcu_read_unlock();
- raw_spin_rq_unlock(this_rq);
- t0 = sched_clock_cpu(this_cpu);
- update_blocked_averages(this_cpu);
- rcu_read_lock();
- for_each_domain(this_cpu, sd) {
- int continue_balancing = 1;
- u64 domain_cost;
- update_next_balance(sd, &next_balance);
- if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
- break;
- if (sd->flags & SD_BALANCE_NEWIDLE) {
- pulled_task = load_balance(this_cpu, this_rq,
- sd, CPU_NEWLY_IDLE,
- &continue_balancing);
- t1 = sched_clock_cpu(this_cpu);
- domain_cost = t1 - t0;
- update_newidle_cost(sd, domain_cost);
- curr_cost += domain_cost;
- t0 = t1;
- }
- /*
- * Stop searching for tasks to pull if there are
- * now runnable tasks on this rq.
- */
- if (pulled_task || this_rq->nr_running > 0 ||
- this_rq->ttwu_pending)
- break;
- }
- rcu_read_unlock();
- raw_spin_rq_lock(this_rq);
- if (curr_cost > this_rq->max_idle_balance_cost)
- this_rq->max_idle_balance_cost = curr_cost;
- /*
- * While browsing the domains, we released the rq lock, a task could
- * have been enqueued in the meantime. Since we're not going idle,
- * pretend we pulled a task.
- */
- if (this_rq->cfs.h_nr_running && !pulled_task)
- pulled_task = 1;
- /* Is there a task of a high priority class? */
- if (this_rq->nr_running != this_rq->cfs.h_nr_running)
- pulled_task = -1;
- out:
- /* Move the next balance forward */
- if (time_after(this_rq->next_balance, next_balance))
- this_rq->next_balance = next_balance;
- if (pulled_task)
- this_rq->idle_stamp = 0;
- else
- nohz_newidle_balance(this_rq);
- rq_repin_lock(this_rq, rf);
- return pulled_task;
- }
- /*
- * run_rebalance_domains is triggered when needed from the scheduler tick.
- * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
- */
- static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
- {
- struct rq *this_rq = this_rq();
- enum cpu_idle_type idle = this_rq->idle_balance ?
- CPU_IDLE : CPU_NOT_IDLE;
- /*
- * If this CPU has a pending nohz_balance_kick, then do the
- * balancing on behalf of the other idle CPUs whose ticks are
- * stopped. Do nohz_idle_balance *before* rebalance_domains to
- * give the idle CPUs a chance to load balance. Else we may
- * load balance only within the local sched_domain hierarchy
- * and abort nohz_idle_balance altogether if we pull some load.
- */
- if (nohz_idle_balance(this_rq, idle))
- return;
- /* normal load balance */
- update_blocked_averages(this_rq->cpu);
- rebalance_domains(this_rq, idle);
- }
- /*
- * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
- */
- void trigger_load_balance(struct rq *rq)
- {
- /*
- * Don't need to rebalance while attached to NULL domain or
- * runqueue CPU is not active
- */
- if (unlikely(on_null_domain(rq) || !cpu_active(cpu_of(rq))))
- return;
- if (time_after_eq(jiffies, rq->next_balance))
- raise_softirq(SCHED_SOFTIRQ);
- nohz_balancer_kick(rq);
- }
- static void rq_online_fair(struct rq *rq)
- {
- update_sysctl();
- update_runtime_enabled(rq);
- }
- static void rq_offline_fair(struct rq *rq)
- {
- update_sysctl();
- /* Ensure any throttled groups are reachable by pick_next_task */
- unthrottle_offline_cfs_rqs(rq);
- }
- #endif /* CONFIG_SMP */
- #ifdef CONFIG_SCHED_CORE
- static inline bool
- __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
- {
- u64 slice = sched_slice(cfs_rq_of(se), se);
- u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
- return (rtime * min_nr_tasks > slice);
- }
- #define MIN_NR_TASKS_DURING_FORCEIDLE 2
- static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
- {
- if (!sched_core_enabled(rq))
- return;
- /*
- * If runqueue has only one task which used up its slice and
- * if the sibling is forced idle, then trigger schedule to
- * give forced idle task a chance.
- *
- * sched_slice() considers only this active rq and it gets the
- * whole slice. But during force idle, we have siblings acting
- * like a single runqueue and hence we need to consider runnable
- * tasks on this CPU and the forced idle CPU. Ideally, we should
- * go through the forced idle rq, but that would be a perf hit.
- * We can assume that the forced idle CPU has at least
- * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
- * if we need to give up the CPU.
- */
- if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
- __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
- resched_curr(rq);
- }
- /*
- * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
- */
- static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
- {
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (forceidle) {
- if (cfs_rq->forceidle_seq == fi_seq)
- break;
- cfs_rq->forceidle_seq = fi_seq;
- }
- cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime;
- }
- }
- void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
- {
- struct sched_entity *se = &p->se;
- if (p->sched_class != &fair_sched_class)
- return;
- se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
- }
- bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
- {
- struct rq *rq = task_rq(a);
- struct sched_entity *sea = &a->se;
- struct sched_entity *seb = &b->se;
- struct cfs_rq *cfs_rqa;
- struct cfs_rq *cfs_rqb;
- s64 delta;
- SCHED_WARN_ON(task_rq(b)->core != rq->core);
- #ifdef CONFIG_FAIR_GROUP_SCHED
- /*
- * Find an se in the hierarchy for tasks a and b, such that the se's
- * are immediate siblings.
- */
- while (sea->cfs_rq->tg != seb->cfs_rq->tg) {
- int sea_depth = sea->depth;
- int seb_depth = seb->depth;
- if (sea_depth >= seb_depth)
- sea = parent_entity(sea);
- if (sea_depth <= seb_depth)
- seb = parent_entity(seb);
- }
- se_fi_update(sea, rq->core->core_forceidle_seq, in_fi);
- se_fi_update(seb, rq->core->core_forceidle_seq, in_fi);
- cfs_rqa = sea->cfs_rq;
- cfs_rqb = seb->cfs_rq;
- #else
- cfs_rqa = &task_rq(a)->cfs;
- cfs_rqb = &task_rq(b)->cfs;
- #endif
- /*
- * Find delta after normalizing se's vruntime with its cfs_rq's
- * min_vruntime_fi, which would have been updated in prior calls
- * to se_fi_update().
- */
- delta = (s64)(sea->vruntime - seb->vruntime) +
- (s64)(cfs_rqb->min_vruntime_fi - cfs_rqa->min_vruntime_fi);
- return delta > 0;
- }
- #else
- static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
- #endif
- /*
- * scheduler tick hitting a task of our scheduling class.
- *
- * NOTE: This function can be called remotely by the tick offload that
- * goes along full dynticks. Therefore no local assumption can be made
- * and everything must be accessed through the @rq and @curr passed in
- * parameters.
- */
- static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &curr->se;
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- entity_tick(cfs_rq, se, queued);
- }
- if (static_branch_unlikely(&sched_numa_balancing))
- task_tick_numa(rq, curr);
- update_misfit_status(curr, rq);
- update_overutilized_status(task_rq(curr));
- task_tick_core(rq, curr);
- }
- /*
- * called on fork with the child task as argument from the parent's context
- * - child not yet on the tasklist
- * - preemption disabled
- */
- static void task_fork_fair(struct task_struct *p)
- {
- struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se, *curr;
- struct rq *rq = this_rq();
- struct rq_flags rf;
- rq_lock(rq, &rf);
- update_rq_clock(rq);
- cfs_rq = task_cfs_rq(current);
- curr = cfs_rq->curr;
- if (curr) {
- update_curr(cfs_rq);
- se->vruntime = curr->vruntime;
- }
- place_entity(cfs_rq, se, 1);
- if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
- /*
- * Upon rescheduling, sched_class::put_prev_task() will place
- * 'current' within the tree based on its new key value.
- */
- swap(curr->vruntime, se->vruntime);
- resched_curr(rq);
- }
- se->vruntime -= cfs_rq->min_vruntime;
- rq_unlock(rq, &rf);
- }
- /*
- * Priority of the task has changed. Check to see if we preempt
- * the current task.
- */
- static void
- prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
- {
- if (!task_on_rq_queued(p))
- return;
- if (rq->cfs.nr_running == 1)
- return;
- /*
- * Reschedule if we are currently running on this runqueue and
- * our priority decreased, or if we are not currently running on
- * this runqueue and our priority is higher than the current's
- */
- if (task_current(rq, p)) {
- if (p->prio > oldprio)
- resched_curr(rq);
- } else
- check_preempt_curr(rq, p, 0);
- }
- static inline bool vruntime_normalized(struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- /*
- * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
- * the dequeue_entity(.flags=0) will already have normalized the
- * vruntime.
- */
- if (p->on_rq)
- return true;
- /*
- * When !on_rq, vruntime of the task has usually NOT been normalized.
- * But there are some cases where it has already been normalized:
- *
- * - A forked child which is waiting for being woken up by
- * wake_up_new_task().
- * - A task which has been woken up by try_to_wake_up() and
- * waiting for actually being woken up by sched_ttwu_pending().
- */
- if (!se->sum_exec_runtime ||
- (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup))
- return true;
- return false;
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- /*
- * Propagate the changes of the sched_entity across the tg tree to make it
- * visible to the root
- */
- static void propagate_entity_cfs_rq(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq_throttled(cfs_rq))
- return;
- if (!throttled_hierarchy(cfs_rq))
- list_add_leaf_cfs_rq(cfs_rq);
- /* Start to propagate at parent */
- se = se->parent;
- for_each_sched_entity(se) {
- cfs_rq = cfs_rq_of(se);
- update_load_avg(cfs_rq, se, UPDATE_TG);
- if (cfs_rq_throttled(cfs_rq))
- break;
- if (!throttled_hierarchy(cfs_rq))
- list_add_leaf_cfs_rq(cfs_rq);
- }
- }
- #else
- static void propagate_entity_cfs_rq(struct sched_entity *se) { }
- #endif
- static void detach_entity_cfs_rq(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- #ifdef CONFIG_SMP
- /*
- * In case the task sched_avg hasn't been attached:
- * - A forked task which hasn't been woken up by wake_up_new_task().
- * - A task which has been woken up by try_to_wake_up() but is
- * waiting for actually being woken up by sched_ttwu_pending().
- */
- if (!se->avg.last_update_time)
- return;
- #endif
- /* Catch up with the cfs_rq and remove our load when we leave */
- update_load_avg(cfs_rq, se, 0);
- detach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq);
- propagate_entity_cfs_rq(se);
- }
- static void attach_entity_cfs_rq(struct sched_entity *se)
- {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- /* Synchronize entity with its cfs_rq */
- update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq);
- propagate_entity_cfs_rq(se);
- }
- static void detach_task_cfs_rq(struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (!vruntime_normalized(p)) {
- /*
- * Fix up our vruntime so that the current sleep doesn't
- * cause 'unlimited' sleep bonus.
- */
- place_entity(cfs_rq, se, 0);
- se->vruntime -= cfs_rq->min_vruntime;
- }
- detach_entity_cfs_rq(se);
- }
- static void attach_task_cfs_rq(struct task_struct *p)
- {
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- attach_entity_cfs_rq(se);
- if (!vruntime_normalized(p))
- se->vruntime += cfs_rq->min_vruntime;
- }
- static void switched_from_fair(struct rq *rq, struct task_struct *p)
- {
- detach_task_cfs_rq(p);
- }
- static void switched_to_fair(struct rq *rq, struct task_struct *p)
- {
- attach_task_cfs_rq(p);
- if (task_on_rq_queued(p)) {
- /*
- * We were most likely switched from sched_rt, so
- * kick off the schedule if running, otherwise just see
- * if we can still preempt the current task.
- */
- if (task_current(rq, p))
- resched_curr(rq);
- else
- check_preempt_curr(rq, p, 0);
- }
- }
- /* Account for a task changing its policy or group.
- *
- * This routine is mostly called to set cfs_rq->curr field when a task
- * migrates between groups/classes.
- */
- static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
- {
- struct sched_entity *se = &p->se;
- #ifdef CONFIG_SMP
- if (task_on_rq_queued(p)) {
- /*
- * Move the next running task to the front of the list, so our
- * cfs_tasks list becomes MRU one.
- */
- list_move(&se->group_node, &rq->cfs_tasks);
- }
- #endif
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- set_next_entity(cfs_rq, se);
- /* ensure bandwidth has been allocated on our new cfs_rq */
- account_cfs_rq_runtime(cfs_rq, 0);
- }
- }
- void init_cfs_rq(struct cfs_rq *cfs_rq)
- {
- cfs_rq->tasks_timeline = RB_ROOT_CACHED;
- u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20)));
- #ifdef CONFIG_SMP
- raw_spin_lock_init(&cfs_rq->removed.lock);
- #endif
- }
- #ifdef CONFIG_FAIR_GROUP_SCHED
- static void task_change_group_fair(struct task_struct *p)
- {
- /*
- * We couldn't detach or attach a forked task which
- * hasn't been woken up by wake_up_new_task().
- */
- if (READ_ONCE(p->__state) == TASK_NEW)
- return;
- detach_task_cfs_rq(p);
- #ifdef CONFIG_SMP
- /* Tell se's cfs_rq has been changed -- migrated */
- p->se.avg.last_update_time = 0;
- #endif
- set_task_rq(p, task_cpu(p));
- attach_task_cfs_rq(p);
- }
- void free_fair_sched_group(struct task_group *tg)
- {
- int i;
- for_each_possible_cpu(i) {
- if (tg->cfs_rq)
- kfree(tg->cfs_rq[i]);
- if (tg->se)
- kfree(tg->se[i]);
- }
- kfree(tg->cfs_rq);
- kfree(tg->se);
- }
- int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
- {
- struct sched_entity *se;
- struct cfs_rq *cfs_rq;
- int i;
- tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
- if (!tg->cfs_rq)
- goto err;
- tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
- if (!tg->se)
- goto err;
- tg->shares = NICE_0_LOAD;
- init_cfs_bandwidth(tg_cfs_bandwidth(tg));
- for_each_possible_cpu(i) {
- cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
- GFP_KERNEL, cpu_to_node(i));
- if (!cfs_rq)
- goto err;
- se = kzalloc_node(sizeof(struct sched_entity_stats),
- GFP_KERNEL, cpu_to_node(i));
- if (!se)
- goto err_free_rq;
- init_cfs_rq(cfs_rq);
- init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
- init_entity_runnable_average(se);
- }
- return 1;
- err_free_rq:
- kfree(cfs_rq);
- err:
- return 0;
- }
- void online_fair_sched_group(struct task_group *tg)
- {
- struct sched_entity *se;
- struct rq_flags rf;
- struct rq *rq;
- int i;
- for_each_possible_cpu(i) {
- rq = cpu_rq(i);
- se = tg->se[i];
- rq_lock_irq(rq, &rf);
- update_rq_clock(rq);
- attach_entity_cfs_rq(se);
- sync_throttle(tg, i);
- rq_unlock_irq(rq, &rf);
- }
- }
- void unregister_fair_sched_group(struct task_group *tg)
- {
- unsigned long flags;
- struct rq *rq;
- int cpu;
- destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
- for_each_possible_cpu(cpu) {
- if (tg->se[cpu])
- remove_entity_load_avg(tg->se[cpu]);
- /*
- * Only empty task groups can be destroyed; so we can speculatively
- * check on_list without danger of it being re-added.
- */
- if (!tg->cfs_rq[cpu]->on_list)
- continue;
- rq = cpu_rq(cpu);
- raw_spin_rq_lock_irqsave(rq, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_rq_unlock_irqrestore(rq, flags);
- }
- }
- void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
- struct sched_entity *se, int cpu,
- struct sched_entity *parent)
- {
- struct rq *rq = cpu_rq(cpu);
- cfs_rq->tg = tg;
- cfs_rq->rq = rq;
- init_cfs_rq_runtime(cfs_rq);
- tg->cfs_rq[cpu] = cfs_rq;
- tg->se[cpu] = se;
- /* se could be NULL for root_task_group */
- if (!se)
- return;
- if (!parent) {
- se->cfs_rq = &rq->cfs;
- se->depth = 0;
- } else {
- se->cfs_rq = parent->my_q;
- se->depth = parent->depth + 1;
- }
- se->my_q = cfs_rq;
- /* guarantee group entities always have weight */
- update_load_set(&se->load, NICE_0_LOAD);
- se->parent = parent;
- }
- static DEFINE_MUTEX(shares_mutex);
- static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
- {
- int i;
- lockdep_assert_held(&shares_mutex);
- /*
- * We can't change the weight of the root cgroup.
- */
- if (!tg->se[0])
- return -EINVAL;
- shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
- if (tg->shares == shares)
- return 0;
- tg->shares = shares;
- for_each_possible_cpu(i) {
- struct rq *rq = cpu_rq(i);
- struct sched_entity *se = tg->se[i];
- struct rq_flags rf;
- /* Propagate contribution to hierarchy */
- rq_lock_irqsave(rq, &rf);
- update_rq_clock(rq);
- for_each_sched_entity(se) {
- update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
- update_cfs_group(se);
- }
- rq_unlock_irqrestore(rq, &rf);
- }
- return 0;
- }
- int sched_group_set_shares(struct task_group *tg, unsigned long shares)
- {
- int ret;
- mutex_lock(&shares_mutex);
- if (tg_is_idle(tg))
- ret = -EINVAL;
- else
- ret = __sched_group_set_shares(tg, shares);
- mutex_unlock(&shares_mutex);
- return ret;
- }
- int sched_group_set_idle(struct task_group *tg, long idle)
- {
- int i;
- if (tg == &root_task_group)
- return -EINVAL;
- if (idle < 0 || idle > 1)
- return -EINVAL;
- mutex_lock(&shares_mutex);
- if (tg->idle == idle) {
- mutex_unlock(&shares_mutex);
- return 0;
- }
- tg->idle = idle;
- for_each_possible_cpu(i) {
- struct rq *rq = cpu_rq(i);
- struct sched_entity *se = tg->se[i];
- struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i];
- bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
- long idle_task_delta;
- struct rq_flags rf;
- rq_lock_irqsave(rq, &rf);
- grp_cfs_rq->idle = idle;
- if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq)))
- goto next_cpu;
- if (se->on_rq) {
- parent_cfs_rq = cfs_rq_of(se);
- if (cfs_rq_is_idle(grp_cfs_rq))
- parent_cfs_rq->idle_nr_running++;
- else
- parent_cfs_rq->idle_nr_running--;
- }
- idle_task_delta = grp_cfs_rq->h_nr_running -
- grp_cfs_rq->idle_h_nr_running;
- if (!cfs_rq_is_idle(grp_cfs_rq))
- idle_task_delta *= -1;
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (!se->on_rq)
- break;
- cfs_rq->idle_h_nr_running += idle_task_delta;
- /* Already accounted at parent level and above. */
- if (cfs_rq_is_idle(cfs_rq))
- break;
- }
- next_cpu:
- rq_unlock_irqrestore(rq, &rf);
- }
- /* Idle groups have minimum weight. */
- if (tg_is_idle(tg))
- __sched_group_set_shares(tg, scale_load(WEIGHT_IDLEPRIO));
- else
- __sched_group_set_shares(tg, NICE_0_LOAD);
- mutex_unlock(&shares_mutex);
- return 0;
- }
- #else /* CONFIG_FAIR_GROUP_SCHED */
- void free_fair_sched_group(struct task_group *tg) { }
- int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
- {
- return 1;
- }
- void online_fair_sched_group(struct task_group *tg) { }
- void unregister_fair_sched_group(struct task_group *tg) { }
- #endif /* CONFIG_FAIR_GROUP_SCHED */
- static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
- {
- struct sched_entity *se = &task->se;
- unsigned int rr_interval = 0;
- /*
- * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
- * idle runqueue:
- */
- if (rq->cfs.load.weight)
- rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
- return rr_interval;
- }
- /*
- * All the scheduling class methods:
- */
- DEFINE_SCHED_CLASS(fair) = {
- .enqueue_task = enqueue_task_fair,
- .dequeue_task = dequeue_task_fair,
- .yield_task = yield_task_fair,
- .yield_to_task = yield_to_task_fair,
- .check_preempt_curr = check_preempt_wakeup,
- .pick_next_task = __pick_next_task_fair,
- .put_prev_task = put_prev_task_fair,
- .set_next_task = set_next_task_fair,
- #ifdef CONFIG_SMP
- .balance = balance_fair,
- .pick_task = pick_task_fair,
- .select_task_rq = select_task_rq_fair,
- .migrate_task_rq = migrate_task_rq_fair,
- .rq_online = rq_online_fair,
- .rq_offline = rq_offline_fair,
- .task_dead = task_dead_fair,
- .set_cpus_allowed = set_cpus_allowed_common,
- #endif
- .task_tick = task_tick_fair,
- .task_fork = task_fork_fair,
- .prio_changed = prio_changed_fair,
- .switched_from = switched_from_fair,
- .switched_to = switched_to_fair,
- .get_rr_interval = get_rr_interval_fair,
- .update_curr = update_curr_fair,
- #ifdef CONFIG_FAIR_GROUP_SCHED
- .task_change_group = task_change_group_fair,
- #endif
- #ifdef CONFIG_UCLAMP_TASK
- .uclamp_enabled = 1,
- #endif
- };
- #ifdef CONFIG_SCHED_DEBUG
- void print_cfs_stats(struct seq_file *m, int cpu)
- {
- struct cfs_rq *cfs_rq, *pos;
- rcu_read_lock();
- for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
- print_cfs_rq(m, cpu, cfs_rq);
- rcu_read_unlock();
- }
- #ifdef CONFIG_NUMA_BALANCING
- void show_numa_stats(struct task_struct *p, struct seq_file *m)
- {
- int node;
- unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
- struct numa_group *ng;
- rcu_read_lock();
- ng = rcu_dereference(p->numa_group);
- for_each_online_node(node) {
- if (p->numa_faults) {
- tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
- tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
- }
- if (ng) {
- gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
- gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
- }
- print_numa_stats(m, node, tsf, tpf, gsf, gpf);
- }
- rcu_read_unlock();
- }
- #endif /* CONFIG_NUMA_BALANCING */
- #endif /* CONFIG_SCHED_DEBUG */
- __init void init_sched_fair_class(void)
- {
- #ifdef CONFIG_SMP
- int i;
- for_each_possible_cpu(i) {
- zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
- zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i));
- }
- open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
- #ifdef CONFIG_NO_HZ_COMMON
- nohz.next_balance = jiffies;
- nohz.next_blocked = jiffies;
- zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
- #endif
- #endif /* SMP */
- }
|