bfq-iosched.c 259 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Budget Fair Queueing (BFQ) I/O scheduler.
  4. *
  5. * Based on ideas and code from CFQ:
  6. * Copyright (C) 2003 Jens Axboe <[email protected]>
  7. *
  8. * Copyright (C) 2008 Fabio Checconi <[email protected]>
  9. * Paolo Valente <[email protected]>
  10. *
  11. * Copyright (C) 2010 Paolo Valente <[email protected]>
  12. * Arianna Avanzini <[email protected]>
  13. *
  14. * Copyright (C) 2017 Paolo Valente <[email protected]>
  15. *
  16. * BFQ is a proportional-share I/O scheduler, with some extra
  17. * low-latency capabilities. BFQ also supports full hierarchical
  18. * scheduling through cgroups. Next paragraphs provide an introduction
  19. * on BFQ inner workings. Details on BFQ benefits, usage and
  20. * limitations can be found in Documentation/block/bfq-iosched.rst.
  21. *
  22. * BFQ is a proportional-share storage-I/O scheduling algorithm based
  23. * on the slice-by-slice service scheme of CFQ. But BFQ assigns
  24. * budgets, measured in number of sectors, to processes instead of
  25. * time slices. The device is not granted to the in-service process
  26. * for a given time slice, but until it has exhausted its assigned
  27. * budget. This change from the time to the service domain enables BFQ
  28. * to distribute the device throughput among processes as desired,
  29. * without any distortion due to throughput fluctuations, or to device
  30. * internal queueing. BFQ uses an ad hoc internal scheduler, called
  31. * B-WF2Q+, to schedule processes according to their budgets. More
  32. * precisely, BFQ schedules queues associated with processes. Each
  33. * process/queue is assigned a user-configurable weight, and B-WF2Q+
  34. * guarantees that each queue receives a fraction of the throughput
  35. * proportional to its weight. Thanks to the accurate policy of
  36. * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
  37. * processes issuing sequential requests (to boost the throughput),
  38. * and yet guarantee a low latency to interactive and soft real-time
  39. * applications.
  40. *
  41. * In particular, to provide these low-latency guarantees, BFQ
  42. * explicitly privileges the I/O of two classes of time-sensitive
  43. * applications: interactive and soft real-time. In more detail, BFQ
  44. * behaves this way if the low_latency parameter is set (default
  45. * configuration). This feature enables BFQ to provide applications in
  46. * these classes with a very low latency.
  47. *
  48. * To implement this feature, BFQ constantly tries to detect whether
  49. * the I/O requests in a bfq_queue come from an interactive or a soft
  50. * real-time application. For brevity, in these cases, the queue is
  51. * said to be interactive or soft real-time. In both cases, BFQ
  52. * privileges the service of the queue, over that of non-interactive
  53. * and non-soft-real-time queues. This privileging is performed,
  54. * mainly, by raising the weight of the queue. So, for brevity, we
  55. * call just weight-raising periods the time periods during which a
  56. * queue is privileged, because deemed interactive or soft real-time.
  57. *
  58. * The detection of soft real-time queues/applications is described in
  59. * detail in the comments on the function
  60. * bfq_bfqq_softrt_next_start. On the other hand, the detection of an
  61. * interactive queue works as follows: a queue is deemed interactive
  62. * if it is constantly non empty only for a limited time interval,
  63. * after which it does become empty. The queue may be deemed
  64. * interactive again (for a limited time), if it restarts being
  65. * constantly non empty, provided that this happens only after the
  66. * queue has remained empty for a given minimum idle time.
  67. *
  68. * By default, BFQ computes automatically the above maximum time
  69. * interval, i.e., the time interval after which a constantly
  70. * non-empty queue stops being deemed interactive. Since a queue is
  71. * weight-raised while it is deemed interactive, this maximum time
  72. * interval happens to coincide with the (maximum) duration of the
  73. * weight-raising for interactive queues.
  74. *
  75. * Finally, BFQ also features additional heuristics for
  76. * preserving both a low latency and a high throughput on NCQ-capable,
  77. * rotational or flash-based devices, and to get the job done quickly
  78. * for applications consisting in many I/O-bound processes.
  79. *
  80. * NOTE: if the main or only goal, with a given device, is to achieve
  81. * the maximum-possible throughput at all times, then do switch off
  82. * all low-latency heuristics for that device, by setting low_latency
  83. * to 0.
  84. *
  85. * BFQ is described in [1], where also a reference to the initial,
  86. * more theoretical paper on BFQ can be found. The interested reader
  87. * can find in the latter paper full details on the main algorithm, as
  88. * well as formulas of the guarantees and formal proofs of all the
  89. * properties. With respect to the version of BFQ presented in these
  90. * papers, this implementation adds a few more heuristics, such as the
  91. * ones that guarantee a low latency to interactive and soft real-time
  92. * applications, and a hierarchical extension based on H-WF2Q+.
  93. *
  94. * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
  95. * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
  96. * with O(log N) complexity derives from the one introduced with EEVDF
  97. * in [3].
  98. *
  99. * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
  100. * Scheduler", Proceedings of the First Workshop on Mobile System
  101. * Technologies (MST-2015), May 2015.
  102. * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
  103. *
  104. * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
  105. * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
  106. * Oct 1997.
  107. *
  108. * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
  109. *
  110. * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
  111. * First: A Flexible and Accurate Mechanism for Proportional Share
  112. * Resource Allocation", technical report.
  113. *
  114. * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
  115. */
  116. #include <linux/module.h>
  117. #include <linux/slab.h>
  118. #include <linux/blkdev.h>
  119. #include <linux/cgroup.h>
  120. #include <linux/ktime.h>
  121. #include <linux/rbtree.h>
  122. #include <linux/ioprio.h>
  123. #include <linux/sbitmap.h>
  124. #include <linux/delay.h>
  125. #include <linux/backing-dev.h>
  126. #include <trace/events/block.h>
  127. #include "elevator.h"
  128. #include "blk.h"
  129. #include "blk-mq.h"
  130. #include "blk-mq-tag.h"
  131. #include "blk-mq-sched.h"
  132. #include "bfq-iosched.h"
  133. #include "blk-wbt.h"
  134. #define BFQ_BFQQ_FNS(name) \
  135. void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
  136. { \
  137. __set_bit(BFQQF_##name, &(bfqq)->flags); \
  138. } \
  139. void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
  140. { \
  141. __clear_bit(BFQQF_##name, &(bfqq)->flags); \
  142. } \
  143. int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
  144. { \
  145. return test_bit(BFQQF_##name, &(bfqq)->flags); \
  146. }
  147. BFQ_BFQQ_FNS(just_created);
  148. BFQ_BFQQ_FNS(busy);
  149. BFQ_BFQQ_FNS(wait_request);
  150. BFQ_BFQQ_FNS(non_blocking_wait_rq);
  151. BFQ_BFQQ_FNS(fifo_expire);
  152. BFQ_BFQQ_FNS(has_short_ttime);
  153. BFQ_BFQQ_FNS(sync);
  154. BFQ_BFQQ_FNS(IO_bound);
  155. BFQ_BFQQ_FNS(in_large_burst);
  156. BFQ_BFQQ_FNS(coop);
  157. BFQ_BFQQ_FNS(split_coop);
  158. BFQ_BFQQ_FNS(softrt_update);
  159. #undef BFQ_BFQQ_FNS \
  160. /* Expiration time of async (0) and sync (1) requests, in ns. */
  161. static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
  162. /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
  163. static const int bfq_back_max = 16 * 1024;
  164. /* Penalty of a backwards seek, in number of sectors. */
  165. static const int bfq_back_penalty = 2;
  166. /* Idling period duration, in ns. */
  167. static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
  168. /* Minimum number of assigned budgets for which stats are safe to compute. */
  169. static const int bfq_stats_min_budgets = 194;
  170. /* Default maximum budget values, in sectors and number of requests. */
  171. static const int bfq_default_max_budget = 16 * 1024;
  172. /*
  173. * When a sync request is dispatched, the queue that contains that
  174. * request, and all the ancestor entities of that queue, are charged
  175. * with the number of sectors of the request. In contrast, if the
  176. * request is async, then the queue and its ancestor entities are
  177. * charged with the number of sectors of the request, multiplied by
  178. * the factor below. This throttles the bandwidth for async I/O,
  179. * w.r.t. to sync I/O, and it is done to counter the tendency of async
  180. * writes to steal I/O throughput to reads.
  181. *
  182. * The current value of this parameter is the result of a tuning with
  183. * several hardware and software configurations. We tried to find the
  184. * lowest value for which writes do not cause noticeable problems to
  185. * reads. In fact, the lower this parameter, the stabler I/O control,
  186. * in the following respect. The lower this parameter is, the less
  187. * the bandwidth enjoyed by a group decreases
  188. * - when the group does writes, w.r.t. to when it does reads;
  189. * - when other groups do reads, w.r.t. to when they do writes.
  190. */
  191. static const int bfq_async_charge_factor = 3;
  192. /* Default timeout values, in jiffies, approximating CFQ defaults. */
  193. const int bfq_timeout = HZ / 8;
  194. /*
  195. * Time limit for merging (see comments in bfq_setup_cooperator). Set
  196. * to the slowest value that, in our tests, proved to be effective in
  197. * removing false positives, while not causing true positives to miss
  198. * queue merging.
  199. *
  200. * As can be deduced from the low time limit below, queue merging, if
  201. * successful, happens at the very beginning of the I/O of the involved
  202. * cooperating processes, as a consequence of the arrival of the very
  203. * first requests from each cooperator. After that, there is very
  204. * little chance to find cooperators.
  205. */
  206. static const unsigned long bfq_merge_time_limit = HZ/10;
  207. static struct kmem_cache *bfq_pool;
  208. /* Below this threshold (in ns), we consider thinktime immediate. */
  209. #define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
  210. /* hw_tag detection: parallel requests threshold and min samples needed. */
  211. #define BFQ_HW_QUEUE_THRESHOLD 3
  212. #define BFQ_HW_QUEUE_SAMPLES 32
  213. #define BFQQ_SEEK_THR (sector_t)(8 * 100)
  214. #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
  215. #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
  216. (get_sdist(last_pos, rq) > \
  217. BFQQ_SEEK_THR && \
  218. (!blk_queue_nonrot(bfqd->queue) || \
  219. blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
  220. #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
  221. #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
  222. /*
  223. * Sync random I/O is likely to be confused with soft real-time I/O,
  224. * because it is characterized by limited throughput and apparently
  225. * isochronous arrival pattern. To avoid false positives, queues
  226. * containing only random (seeky) I/O are prevented from being tagged
  227. * as soft real-time.
  228. */
  229. #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
  230. /* Min number of samples required to perform peak-rate update */
  231. #define BFQ_RATE_MIN_SAMPLES 32
  232. /* Min observation time interval required to perform a peak-rate update (ns) */
  233. #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
  234. /* Target observation time interval for a peak-rate update (ns) */
  235. #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
  236. /*
  237. * Shift used for peak-rate fixed precision calculations.
  238. * With
  239. * - the current shift: 16 positions
  240. * - the current type used to store rate: u32
  241. * - the current unit of measure for rate: [sectors/usec], or, more precisely,
  242. * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift,
  243. * the range of rates that can be stored is
  244. * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec =
  245. * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec =
  246. * [15, 65G] sectors/sec
  247. * Which, assuming a sector size of 512B, corresponds to a range of
  248. * [7.5K, 33T] B/sec
  249. */
  250. #define BFQ_RATE_SHIFT 16
  251. /*
  252. * When configured for computing the duration of the weight-raising
  253. * for interactive queues automatically (see the comments at the
  254. * beginning of this file), BFQ does it using the following formula:
  255. * duration = (ref_rate / r) * ref_wr_duration,
  256. * where r is the peak rate of the device, and ref_rate and
  257. * ref_wr_duration are two reference parameters. In particular,
  258. * ref_rate is the peak rate of the reference storage device (see
  259. * below), and ref_wr_duration is about the maximum time needed, with
  260. * BFQ and while reading two files in parallel, to load typical large
  261. * applications on the reference device (see the comments on
  262. * max_service_from_wr below, for more details on how ref_wr_duration
  263. * is obtained). In practice, the slower/faster the device at hand
  264. * is, the more/less it takes to load applications with respect to the
  265. * reference device. Accordingly, the longer/shorter BFQ grants
  266. * weight raising to interactive applications.
  267. *
  268. * BFQ uses two different reference pairs (ref_rate, ref_wr_duration),
  269. * depending on whether the device is rotational or non-rotational.
  270. *
  271. * In the following definitions, ref_rate[0] and ref_wr_duration[0]
  272. * are the reference values for a rotational device, whereas
  273. * ref_rate[1] and ref_wr_duration[1] are the reference values for a
  274. * non-rotational device. The reference rates are not the actual peak
  275. * rates of the devices used as a reference, but slightly lower
  276. * values. The reason for using slightly lower values is that the
  277. * peak-rate estimator tends to yield slightly lower values than the
  278. * actual peak rate (it can yield the actual peak rate only if there
  279. * is only one process doing I/O, and the process does sequential
  280. * I/O).
  281. *
  282. * The reference peak rates are measured in sectors/usec, left-shifted
  283. * by BFQ_RATE_SHIFT.
  284. */
  285. static int ref_rate[2] = {14000, 33000};
  286. /*
  287. * To improve readability, a conversion function is used to initialize
  288. * the following array, which entails that the array can be
  289. * initialized only in a function.
  290. */
  291. static int ref_wr_duration[2];
  292. /*
  293. * BFQ uses the above-detailed, time-based weight-raising mechanism to
  294. * privilege interactive tasks. This mechanism is vulnerable to the
  295. * following false positives: I/O-bound applications that will go on
  296. * doing I/O for much longer than the duration of weight
  297. * raising. These applications have basically no benefit from being
  298. * weight-raised at the beginning of their I/O. On the opposite end,
  299. * while being weight-raised, these applications
  300. * a) unjustly steal throughput to applications that may actually need
  301. * low latency;
  302. * b) make BFQ uselessly perform device idling; device idling results
  303. * in loss of device throughput with most flash-based storage, and may
  304. * increase latencies when used purposelessly.
  305. *
  306. * BFQ tries to reduce these problems, by adopting the following
  307. * countermeasure. To introduce this countermeasure, we need first to
  308. * finish explaining how the duration of weight-raising for
  309. * interactive tasks is computed.
  310. *
  311. * For a bfq_queue deemed as interactive, the duration of weight
  312. * raising is dynamically adjusted, as a function of the estimated
  313. * peak rate of the device, so as to be equal to the time needed to
  314. * execute the 'largest' interactive task we benchmarked so far. By
  315. * largest task, we mean the task for which each involved process has
  316. * to do more I/O than for any of the other tasks we benchmarked. This
  317. * reference interactive task is the start-up of LibreOffice Writer,
  318. * and in this task each process/bfq_queue needs to have at most ~110K
  319. * sectors transferred.
  320. *
  321. * This last piece of information enables BFQ to reduce the actual
  322. * duration of weight-raising for at least one class of I/O-bound
  323. * applications: those doing sequential or quasi-sequential I/O. An
  324. * example is file copy. In fact, once started, the main I/O-bound
  325. * processes of these applications usually consume the above 110K
  326. * sectors in much less time than the processes of an application that
  327. * is starting, because these I/O-bound processes will greedily devote
  328. * almost all their CPU cycles only to their target,
  329. * throughput-friendly I/O operations. This is even more true if BFQ
  330. * happens to be underestimating the device peak rate, and thus
  331. * overestimating the duration of weight raising. But, according to
  332. * our measurements, once transferred 110K sectors, these processes
  333. * have no right to be weight-raised any longer.
  334. *
  335. * Basing on the last consideration, BFQ ends weight-raising for a
  336. * bfq_queue if the latter happens to have received an amount of
  337. * service at least equal to the following constant. The constant is
  338. * set to slightly more than 110K, to have a minimum safety margin.
  339. *
  340. * This early ending of weight-raising reduces the amount of time
  341. * during which interactive false positives cause the two problems
  342. * described at the beginning of these comments.
  343. */
  344. static const unsigned long max_service_from_wr = 120000;
  345. /*
  346. * Maximum time between the creation of two queues, for stable merge
  347. * to be activated (in ms)
  348. */
  349. static const unsigned long bfq_activation_stable_merging = 600;
  350. /*
  351. * Minimum time to be waited before evaluating delayed stable merge (in ms)
  352. */
  353. static const unsigned long bfq_late_stable_merging = 600;
  354. #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0]))
  355. #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
  356. struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
  357. {
  358. return bic->bfqq[is_sync];
  359. }
  360. static void bfq_put_stable_ref(struct bfq_queue *bfqq);
  361. void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
  362. {
  363. struct bfq_queue *old_bfqq = bic->bfqq[is_sync];
  364. /* Clear bic pointer if bfqq is detached from this bic */
  365. if (old_bfqq && old_bfqq->bic == bic)
  366. old_bfqq->bic = NULL;
  367. /*
  368. * If bfqq != NULL, then a non-stable queue merge between
  369. * bic->bfqq and bfqq is happening here. This causes troubles
  370. * in the following case: bic->bfqq has also been scheduled
  371. * for a possible stable merge with bic->stable_merge_bfqq,
  372. * and bic->stable_merge_bfqq == bfqq happens to
  373. * hold. Troubles occur because bfqq may then undergo a split,
  374. * thereby becoming eligible for a stable merge. Yet, if
  375. * bic->stable_merge_bfqq points exactly to bfqq, then bfqq
  376. * would be stably merged with itself. To avoid this anomaly,
  377. * we cancel the stable merge if
  378. * bic->stable_merge_bfqq == bfqq.
  379. */
  380. bic->bfqq[is_sync] = bfqq;
  381. if (bfqq && bic->stable_merge_bfqq == bfqq) {
  382. /*
  383. * Actually, these same instructions are executed also
  384. * in bfq_setup_cooperator, in case of abort or actual
  385. * execution of a stable merge. We could avoid
  386. * repeating these instructions there too, but if we
  387. * did so, we would nest even more complexity in this
  388. * function.
  389. */
  390. bfq_put_stable_ref(bic->stable_merge_bfqq);
  391. bic->stable_merge_bfqq = NULL;
  392. }
  393. }
  394. struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
  395. {
  396. return bic->icq.q->elevator->elevator_data;
  397. }
  398. /**
  399. * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
  400. * @icq: the iocontext queue.
  401. */
  402. static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
  403. {
  404. /* bic->icq is the first member, %NULL will convert to %NULL */
  405. return container_of(icq, struct bfq_io_cq, icq);
  406. }
  407. /**
  408. * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
  409. * @q: the request queue.
  410. */
  411. static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
  412. {
  413. struct bfq_io_cq *icq;
  414. unsigned long flags;
  415. if (!current->io_context)
  416. return NULL;
  417. spin_lock_irqsave(&q->queue_lock, flags);
  418. icq = icq_to_bic(ioc_lookup_icq(q));
  419. spin_unlock_irqrestore(&q->queue_lock, flags);
  420. return icq;
  421. }
  422. /*
  423. * Scheduler run of queue, if there are requests pending and no one in the
  424. * driver that will restart queueing.
  425. */
  426. void bfq_schedule_dispatch(struct bfq_data *bfqd)
  427. {
  428. lockdep_assert_held(&bfqd->lock);
  429. if (bfqd->queued != 0) {
  430. bfq_log(bfqd, "schedule dispatch");
  431. blk_mq_run_hw_queues(bfqd->queue, true);
  432. }
  433. }
  434. #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  435. #define bfq_sample_valid(samples) ((samples) > 80)
  436. /*
  437. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  438. * We choose the request that is closer to the head right now. Distance
  439. * behind the head is penalized and only allowed to a certain extent.
  440. */
  441. static struct request *bfq_choose_req(struct bfq_data *bfqd,
  442. struct request *rq1,
  443. struct request *rq2,
  444. sector_t last)
  445. {
  446. sector_t s1, s2, d1 = 0, d2 = 0;
  447. unsigned long back_max;
  448. #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  449. #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  450. unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
  451. if (!rq1 || rq1 == rq2)
  452. return rq2;
  453. if (!rq2)
  454. return rq1;
  455. if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  456. return rq1;
  457. else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  458. return rq2;
  459. if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
  460. return rq1;
  461. else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
  462. return rq2;
  463. s1 = blk_rq_pos(rq1);
  464. s2 = blk_rq_pos(rq2);
  465. /*
  466. * By definition, 1KiB is 2 sectors.
  467. */
  468. back_max = bfqd->bfq_back_max * 2;
  469. /*
  470. * Strict one way elevator _except_ in the case where we allow
  471. * short backward seeks which are biased as twice the cost of a
  472. * similar forward seek.
  473. */
  474. if (s1 >= last)
  475. d1 = s1 - last;
  476. else if (s1 + back_max >= last)
  477. d1 = (last - s1) * bfqd->bfq_back_penalty;
  478. else
  479. wrap |= BFQ_RQ1_WRAP;
  480. if (s2 >= last)
  481. d2 = s2 - last;
  482. else if (s2 + back_max >= last)
  483. d2 = (last - s2) * bfqd->bfq_back_penalty;
  484. else
  485. wrap |= BFQ_RQ2_WRAP;
  486. /* Found required data */
  487. /*
  488. * By doing switch() on the bit mask "wrap" we avoid having to
  489. * check two variables for all permutations: --> faster!
  490. */
  491. switch (wrap) {
  492. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  493. if (d1 < d2)
  494. return rq1;
  495. else if (d2 < d1)
  496. return rq2;
  497. if (s1 >= s2)
  498. return rq1;
  499. else
  500. return rq2;
  501. case BFQ_RQ2_WRAP:
  502. return rq1;
  503. case BFQ_RQ1_WRAP:
  504. return rq2;
  505. case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
  506. default:
  507. /*
  508. * Since both rqs are wrapped,
  509. * start with the one that's further behind head
  510. * (--> only *one* back seek required),
  511. * since back seek takes more time than forward.
  512. */
  513. if (s1 <= s2)
  514. return rq1;
  515. else
  516. return rq2;
  517. }
  518. }
  519. #define BFQ_LIMIT_INLINE_DEPTH 16
  520. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  521. static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
  522. {
  523. struct bfq_data *bfqd = bfqq->bfqd;
  524. struct bfq_entity *entity = &bfqq->entity;
  525. struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
  526. struct bfq_entity **entities = inline_entities;
  527. int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
  528. int class_idx = bfqq->ioprio_class - 1;
  529. struct bfq_sched_data *sched_data;
  530. unsigned long wsum;
  531. bool ret = false;
  532. if (!entity->on_st_or_in_serv)
  533. return false;
  534. retry:
  535. spin_lock_irq(&bfqd->lock);
  536. /* +1 for bfqq entity, root cgroup not included */
  537. depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
  538. if (depth > alloc_depth) {
  539. spin_unlock_irq(&bfqd->lock);
  540. if (entities != inline_entities)
  541. kfree(entities);
  542. entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
  543. if (!entities)
  544. return false;
  545. alloc_depth = depth;
  546. goto retry;
  547. }
  548. sched_data = entity->sched_data;
  549. /* Gather our ancestors as we need to traverse them in reverse order */
  550. level = 0;
  551. for_each_entity(entity) {
  552. /*
  553. * If at some level entity is not even active, allow request
  554. * queueing so that BFQ knows there's work to do and activate
  555. * entities.
  556. */
  557. if (!entity->on_st_or_in_serv)
  558. goto out;
  559. /* Uh, more parents than cgroup subsystem thinks? */
  560. if (WARN_ON_ONCE(level >= depth))
  561. break;
  562. entities[level++] = entity;
  563. }
  564. WARN_ON_ONCE(level != depth);
  565. for (level--; level >= 0; level--) {
  566. entity = entities[level];
  567. if (level > 0) {
  568. wsum = bfq_entity_service_tree(entity)->wsum;
  569. } else {
  570. int i;
  571. /*
  572. * For bfqq itself we take into account service trees
  573. * of all higher priority classes and multiply their
  574. * weights so that low prio queue from higher class
  575. * gets more requests than high prio queue from lower
  576. * class.
  577. */
  578. wsum = 0;
  579. for (i = 0; i <= class_idx; i++) {
  580. wsum = wsum * IOPRIO_BE_NR +
  581. sched_data->service_tree[i].wsum;
  582. }
  583. }
  584. if (!wsum)
  585. continue;
  586. limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum);
  587. if (entity->allocated >= limit) {
  588. bfq_log_bfqq(bfqq->bfqd, bfqq,
  589. "too many requests: allocated %d limit %d level %d",
  590. entity->allocated, limit, level);
  591. ret = true;
  592. break;
  593. }
  594. }
  595. out:
  596. spin_unlock_irq(&bfqd->lock);
  597. if (entities != inline_entities)
  598. kfree(entities);
  599. return ret;
  600. }
  601. #else
  602. static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
  603. {
  604. return false;
  605. }
  606. #endif
  607. /*
  608. * Async I/O can easily starve sync I/O (both sync reads and sync
  609. * writes), by consuming all tags. Similarly, storms of sync writes,
  610. * such as those that sync(2) may trigger, can starve sync reads.
  611. * Limit depths of async I/O and sync writes so as to counter both
  612. * problems.
  613. *
  614. * Also if a bfq queue or its parent cgroup consume more tags than would be
  615. * appropriate for their weight, we trim the available tag depth to 1. This
  616. * avoids a situation where one cgroup can starve another cgroup from tags and
  617. * thus block service differentiation among cgroups. Note that because the
  618. * queue / cgroup already has many requests allocated and queued, this does not
  619. * significantly affect service guarantees coming from the BFQ scheduling
  620. * algorithm.
  621. */
  622. static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
  623. {
  624. struct bfq_data *bfqd = data->q->elevator->elevator_data;
  625. struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
  626. struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(opf)) : NULL;
  627. int depth;
  628. unsigned limit = data->q->nr_requests;
  629. /* Sync reads have full depth available */
  630. if (op_is_sync(opf) && !op_is_write(opf)) {
  631. depth = 0;
  632. } else {
  633. depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)];
  634. limit = (limit * depth) >> bfqd->full_depth_shift;
  635. }
  636. /*
  637. * Does queue (or any parent entity) exceed number of requests that
  638. * should be available to it? Heavily limit depth so that it cannot
  639. * consume more available requests and thus starve other entities.
  640. */
  641. if (bfqq && bfqq_request_over_limit(bfqq, limit))
  642. depth = 1;
  643. bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
  644. __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth);
  645. if (depth)
  646. data->shallow_depth = depth;
  647. }
  648. static struct bfq_queue *
  649. bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
  650. sector_t sector, struct rb_node **ret_parent,
  651. struct rb_node ***rb_link)
  652. {
  653. struct rb_node **p, *parent;
  654. struct bfq_queue *bfqq = NULL;
  655. parent = NULL;
  656. p = &root->rb_node;
  657. while (*p) {
  658. struct rb_node **n;
  659. parent = *p;
  660. bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  661. /*
  662. * Sort strictly based on sector. Smallest to the left,
  663. * largest to the right.
  664. */
  665. if (sector > blk_rq_pos(bfqq->next_rq))
  666. n = &(*p)->rb_right;
  667. else if (sector < blk_rq_pos(bfqq->next_rq))
  668. n = &(*p)->rb_left;
  669. else
  670. break;
  671. p = n;
  672. bfqq = NULL;
  673. }
  674. *ret_parent = parent;
  675. if (rb_link)
  676. *rb_link = p;
  677. bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
  678. (unsigned long long)sector,
  679. bfqq ? bfqq->pid : 0);
  680. return bfqq;
  681. }
  682. static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
  683. {
  684. return bfqq->service_from_backlogged > 0 &&
  685. time_is_before_jiffies(bfqq->first_IO_time +
  686. bfq_merge_time_limit);
  687. }
  688. /*
  689. * The following function is not marked as __cold because it is
  690. * actually cold, but for the same performance goal described in the
  691. * comments on the likely() at the beginning of
  692. * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
  693. * execution time for the case where this function is not invoked, we
  694. * had to add an unlikely() in each involved if().
  695. */
  696. void __cold
  697. bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  698. {
  699. struct rb_node **p, *parent;
  700. struct bfq_queue *__bfqq;
  701. if (bfqq->pos_root) {
  702. rb_erase(&bfqq->pos_node, bfqq->pos_root);
  703. bfqq->pos_root = NULL;
  704. }
  705. /* oom_bfqq does not participate in queue merging */
  706. if (bfqq == &bfqd->oom_bfqq)
  707. return;
  708. /*
  709. * bfqq cannot be merged any longer (see comments in
  710. * bfq_setup_cooperator): no point in adding bfqq into the
  711. * position tree.
  712. */
  713. if (bfq_too_late_for_merging(bfqq))
  714. return;
  715. if (bfq_class_idle(bfqq))
  716. return;
  717. if (!bfqq->next_rq)
  718. return;
  719. bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree;
  720. __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
  721. blk_rq_pos(bfqq->next_rq), &parent, &p);
  722. if (!__bfqq) {
  723. rb_link_node(&bfqq->pos_node, parent, p);
  724. rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
  725. } else
  726. bfqq->pos_root = NULL;
  727. }
  728. /*
  729. * The following function returns false either if every active queue
  730. * must receive the same share of the throughput (symmetric scenario),
  731. * or, as a special case, if bfqq must receive a share of the
  732. * throughput lower than or equal to the share that every other active
  733. * queue must receive. If bfqq does sync I/O, then these are the only
  734. * two cases where bfqq happens to be guaranteed its share of the
  735. * throughput even if I/O dispatching is not plugged when bfqq remains
  736. * temporarily empty (for more details, see the comments in the
  737. * function bfq_better_to_idle()). For this reason, the return value
  738. * of this function is used to check whether I/O-dispatch plugging can
  739. * be avoided.
  740. *
  741. * The above first case (symmetric scenario) occurs when:
  742. * 1) all active queues have the same weight,
  743. * 2) all active queues belong to the same I/O-priority class,
  744. * 3) all active groups at the same level in the groups tree have the same
  745. * weight,
  746. * 4) all active groups at the same level in the groups tree have the same
  747. * number of children.
  748. *
  749. * Unfortunately, keeping the necessary state for evaluating exactly
  750. * the last two symmetry sub-conditions above would be quite complex
  751. * and time consuming. Therefore this function evaluates, instead,
  752. * only the following stronger three sub-conditions, for which it is
  753. * much easier to maintain the needed state:
  754. * 1) all active queues have the same weight,
  755. * 2) all active queues belong to the same I/O-priority class,
  756. * 3) there are no active groups.
  757. * In particular, the last condition is always true if hierarchical
  758. * support or the cgroups interface are not enabled, thus no state
  759. * needs to be maintained in this case.
  760. */
  761. static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
  762. struct bfq_queue *bfqq)
  763. {
  764. bool smallest_weight = bfqq &&
  765. bfqq->weight_counter &&
  766. bfqq->weight_counter ==
  767. container_of(
  768. rb_first_cached(&bfqd->queue_weights_tree),
  769. struct bfq_weight_counter,
  770. weights_node);
  771. /*
  772. * For queue weights to differ, queue_weights_tree must contain
  773. * at least two nodes.
  774. */
  775. bool varied_queue_weights = !smallest_weight &&
  776. !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
  777. (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
  778. bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
  779. bool multiple_classes_busy =
  780. (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
  781. (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
  782. (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
  783. return varied_queue_weights || multiple_classes_busy
  784. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  785. || bfqd->num_groups_with_pending_reqs > 0
  786. #endif
  787. ;
  788. }
  789. /*
  790. * If the weight-counter tree passed as input contains no counter for
  791. * the weight of the input queue, then add that counter; otherwise just
  792. * increment the existing counter.
  793. *
  794. * Note that weight-counter trees contain few nodes in mostly symmetric
  795. * scenarios. For example, if all queues have the same weight, then the
  796. * weight-counter tree for the queues may contain at most one node.
  797. * This holds even if low_latency is on, because weight-raised queues
  798. * are not inserted in the tree.
  799. * In most scenarios, the rate at which nodes are created/destroyed
  800. * should be low too.
  801. */
  802. void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  803. struct rb_root_cached *root)
  804. {
  805. struct bfq_entity *entity = &bfqq->entity;
  806. struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
  807. bool leftmost = true;
  808. /*
  809. * Do not insert if the queue is already associated with a
  810. * counter, which happens if:
  811. * 1) a request arrival has caused the queue to become both
  812. * non-weight-raised, and hence change its weight, and
  813. * backlogged; in this respect, each of the two events
  814. * causes an invocation of this function,
  815. * 2) this is the invocation of this function caused by the
  816. * second event. This second invocation is actually useless,
  817. * and we handle this fact by exiting immediately. More
  818. * efficient or clearer solutions might possibly be adopted.
  819. */
  820. if (bfqq->weight_counter)
  821. return;
  822. while (*new) {
  823. struct bfq_weight_counter *__counter = container_of(*new,
  824. struct bfq_weight_counter,
  825. weights_node);
  826. parent = *new;
  827. if (entity->weight == __counter->weight) {
  828. bfqq->weight_counter = __counter;
  829. goto inc_counter;
  830. }
  831. if (entity->weight < __counter->weight)
  832. new = &((*new)->rb_left);
  833. else {
  834. new = &((*new)->rb_right);
  835. leftmost = false;
  836. }
  837. }
  838. bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
  839. GFP_ATOMIC);
  840. /*
  841. * In the unlucky event of an allocation failure, we just
  842. * exit. This will cause the weight of queue to not be
  843. * considered in bfq_asymmetric_scenario, which, in its turn,
  844. * causes the scenario to be deemed wrongly symmetric in case
  845. * bfqq's weight would have been the only weight making the
  846. * scenario asymmetric. On the bright side, no unbalance will
  847. * however occur when bfqq becomes inactive again (the
  848. * invocation of this function is triggered by an activation
  849. * of queue). In fact, bfq_weights_tree_remove does nothing
  850. * if !bfqq->weight_counter.
  851. */
  852. if (unlikely(!bfqq->weight_counter))
  853. return;
  854. bfqq->weight_counter->weight = entity->weight;
  855. rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
  856. rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
  857. leftmost);
  858. inc_counter:
  859. bfqq->weight_counter->num_active++;
  860. bfqq->ref++;
  861. }
  862. /*
  863. * Decrement the weight counter associated with the queue, and, if the
  864. * counter reaches 0, remove the counter from the tree.
  865. * See the comments to the function bfq_weights_tree_add() for considerations
  866. * about overhead.
  867. */
  868. void __bfq_weights_tree_remove(struct bfq_data *bfqd,
  869. struct bfq_queue *bfqq,
  870. struct rb_root_cached *root)
  871. {
  872. if (!bfqq->weight_counter)
  873. return;
  874. bfqq->weight_counter->num_active--;
  875. if (bfqq->weight_counter->num_active > 0)
  876. goto reset_entity_pointer;
  877. rb_erase_cached(&bfqq->weight_counter->weights_node, root);
  878. kfree(bfqq->weight_counter);
  879. reset_entity_pointer:
  880. bfqq->weight_counter = NULL;
  881. bfq_put_queue(bfqq);
  882. }
  883. /*
  884. * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
  885. * of active groups for each queue's inactive parent entity.
  886. */
  887. void bfq_weights_tree_remove(struct bfq_data *bfqd,
  888. struct bfq_queue *bfqq)
  889. {
  890. struct bfq_entity *entity = bfqq->entity.parent;
  891. for_each_entity(entity) {
  892. struct bfq_sched_data *sd = entity->my_sched_data;
  893. if (sd->next_in_service || sd->in_service_entity) {
  894. /*
  895. * entity is still active, because either
  896. * next_in_service or in_service_entity is not
  897. * NULL (see the comments on the definition of
  898. * next_in_service for details on why
  899. * in_service_entity must be checked too).
  900. *
  901. * As a consequence, its parent entities are
  902. * active as well, and thus this loop must
  903. * stop here.
  904. */
  905. break;
  906. }
  907. /*
  908. * The decrement of num_groups_with_pending_reqs is
  909. * not performed immediately upon the deactivation of
  910. * entity, but it is delayed to when it also happens
  911. * that the first leaf descendant bfqq of entity gets
  912. * all its pending requests completed. The following
  913. * instructions perform this delayed decrement, if
  914. * needed. See the comments on
  915. * num_groups_with_pending_reqs for details.
  916. */
  917. if (entity->in_groups_with_pending_reqs) {
  918. entity->in_groups_with_pending_reqs = false;
  919. bfqd->num_groups_with_pending_reqs--;
  920. }
  921. }
  922. /*
  923. * Next function is invoked last, because it causes bfqq to be
  924. * freed if the following holds: bfqq is not in service and
  925. * has no dispatched request. DO NOT use bfqq after the next
  926. * function invocation.
  927. */
  928. __bfq_weights_tree_remove(bfqd, bfqq,
  929. &bfqd->queue_weights_tree);
  930. }
  931. /*
  932. * Return expired entry, or NULL to just start from scratch in rbtree.
  933. */
  934. static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
  935. struct request *last)
  936. {
  937. struct request *rq;
  938. if (bfq_bfqq_fifo_expire(bfqq))
  939. return NULL;
  940. bfq_mark_bfqq_fifo_expire(bfqq);
  941. rq = rq_entry_fifo(bfqq->fifo.next);
  942. if (rq == last || ktime_get_ns() < rq->fifo_time)
  943. return NULL;
  944. bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
  945. return rq;
  946. }
  947. static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
  948. struct bfq_queue *bfqq,
  949. struct request *last)
  950. {
  951. struct rb_node *rbnext = rb_next(&last->rb_node);
  952. struct rb_node *rbprev = rb_prev(&last->rb_node);
  953. struct request *next, *prev = NULL;
  954. /* Follow expired path, else get first next available. */
  955. next = bfq_check_fifo(bfqq, last);
  956. if (next)
  957. return next;
  958. if (rbprev)
  959. prev = rb_entry_rq(rbprev);
  960. if (rbnext)
  961. next = rb_entry_rq(rbnext);
  962. else {
  963. rbnext = rb_first(&bfqq->sort_list);
  964. if (rbnext && rbnext != &last->rb_node)
  965. next = rb_entry_rq(rbnext);
  966. }
  967. return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
  968. }
  969. /* see the definition of bfq_async_charge_factor for details */
  970. static unsigned long bfq_serv_to_charge(struct request *rq,
  971. struct bfq_queue *bfqq)
  972. {
  973. if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
  974. bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
  975. return blk_rq_sectors(rq);
  976. return blk_rq_sectors(rq) * bfq_async_charge_factor;
  977. }
  978. /**
  979. * bfq_updated_next_req - update the queue after a new next_rq selection.
  980. * @bfqd: the device data the queue belongs to.
  981. * @bfqq: the queue to update.
  982. *
  983. * If the first request of a queue changes we make sure that the queue
  984. * has enough budget to serve at least its first request (if the
  985. * request has grown). We do this because if the queue has not enough
  986. * budget for its first request, it has to go through two dispatch
  987. * rounds to actually get it dispatched.
  988. */
  989. static void bfq_updated_next_req(struct bfq_data *bfqd,
  990. struct bfq_queue *bfqq)
  991. {
  992. struct bfq_entity *entity = &bfqq->entity;
  993. struct request *next_rq = bfqq->next_rq;
  994. unsigned long new_budget;
  995. if (!next_rq)
  996. return;
  997. if (bfqq == bfqd->in_service_queue)
  998. /*
  999. * In order not to break guarantees, budgets cannot be
  1000. * changed after an entity has been selected.
  1001. */
  1002. return;
  1003. new_budget = max_t(unsigned long,
  1004. max_t(unsigned long, bfqq->max_budget,
  1005. bfq_serv_to_charge(next_rq, bfqq)),
  1006. entity->service);
  1007. if (entity->budget != new_budget) {
  1008. entity->budget = new_budget;
  1009. bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
  1010. new_budget);
  1011. bfq_requeue_bfqq(bfqd, bfqq, false);
  1012. }
  1013. }
  1014. static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
  1015. {
  1016. u64 dur;
  1017. if (bfqd->bfq_wr_max_time > 0)
  1018. return bfqd->bfq_wr_max_time;
  1019. dur = bfqd->rate_dur_prod;
  1020. do_div(dur, bfqd->peak_rate);
  1021. /*
  1022. * Limit duration between 3 and 25 seconds. The upper limit
  1023. * has been conservatively set after the following worst case:
  1024. * on a QEMU/KVM virtual machine
  1025. * - running in a slow PC
  1026. * - with a virtual disk stacked on a slow low-end 5400rpm HDD
  1027. * - serving a heavy I/O workload, such as the sequential reading
  1028. * of several files
  1029. * mplayer took 23 seconds to start, if constantly weight-raised.
  1030. *
  1031. * As for higher values than that accommodating the above bad
  1032. * scenario, tests show that higher values would often yield
  1033. * the opposite of the desired result, i.e., would worsen
  1034. * responsiveness by allowing non-interactive applications to
  1035. * preserve weight raising for too long.
  1036. *
  1037. * On the other end, lower values than 3 seconds make it
  1038. * difficult for most interactive tasks to complete their jobs
  1039. * before weight-raising finishes.
  1040. */
  1041. return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000));
  1042. }
  1043. /* switch back from soft real-time to interactive weight raising */
  1044. static void switch_back_to_interactive_wr(struct bfq_queue *bfqq,
  1045. struct bfq_data *bfqd)
  1046. {
  1047. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1048. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1049. bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt;
  1050. }
  1051. static void
  1052. bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
  1053. struct bfq_io_cq *bic, bool bfq_already_existing)
  1054. {
  1055. unsigned int old_wr_coeff = 1;
  1056. bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq);
  1057. if (bic->saved_has_short_ttime)
  1058. bfq_mark_bfqq_has_short_ttime(bfqq);
  1059. else
  1060. bfq_clear_bfqq_has_short_ttime(bfqq);
  1061. if (bic->saved_IO_bound)
  1062. bfq_mark_bfqq_IO_bound(bfqq);
  1063. else
  1064. bfq_clear_bfqq_IO_bound(bfqq);
  1065. bfqq->last_serv_time_ns = bic->saved_last_serv_time_ns;
  1066. bfqq->inject_limit = bic->saved_inject_limit;
  1067. bfqq->decrease_time_jif = bic->saved_decrease_time_jif;
  1068. bfqq->entity.new_weight = bic->saved_weight;
  1069. bfqq->ttime = bic->saved_ttime;
  1070. bfqq->io_start_time = bic->saved_io_start_time;
  1071. bfqq->tot_idle_time = bic->saved_tot_idle_time;
  1072. /*
  1073. * Restore weight coefficient only if low_latency is on
  1074. */
  1075. if (bfqd->low_latency) {
  1076. old_wr_coeff = bfqq->wr_coeff;
  1077. bfqq->wr_coeff = bic->saved_wr_coeff;
  1078. }
  1079. bfqq->service_from_wr = bic->saved_service_from_wr;
  1080. bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
  1081. bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
  1082. bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
  1083. if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
  1084. time_is_before_jiffies(bfqq->last_wr_start_finish +
  1085. bfqq->wr_cur_max_time))) {
  1086. if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  1087. !bfq_bfqq_in_large_burst(bfqq) &&
  1088. time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt +
  1089. bfq_wr_duration(bfqd))) {
  1090. switch_back_to_interactive_wr(bfqq, bfqd);
  1091. } else {
  1092. bfqq->wr_coeff = 1;
  1093. bfq_log_bfqq(bfqq->bfqd, bfqq,
  1094. "resume state: switching off wr");
  1095. }
  1096. }
  1097. /* make sure weight will be updated, however we got here */
  1098. bfqq->entity.prio_changed = 1;
  1099. if (likely(!busy))
  1100. return;
  1101. if (old_wr_coeff == 1 && bfqq->wr_coeff > 1)
  1102. bfqd->wr_busy_queues++;
  1103. else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1)
  1104. bfqd->wr_busy_queues--;
  1105. }
  1106. static int bfqq_process_refs(struct bfq_queue *bfqq)
  1107. {
  1108. return bfqq->ref - bfqq->entity.allocated -
  1109. bfqq->entity.on_st_or_in_serv -
  1110. (bfqq->weight_counter != NULL) - bfqq->stable_ref;
  1111. }
  1112. /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
  1113. static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1114. {
  1115. struct bfq_queue *item;
  1116. struct hlist_node *n;
  1117. hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
  1118. hlist_del_init(&item->burst_list_node);
  1119. /*
  1120. * Start the creation of a new burst list only if there is no
  1121. * active queue. See comments on the conditional invocation of
  1122. * bfq_handle_burst().
  1123. */
  1124. if (bfq_tot_busy_queues(bfqd) == 0) {
  1125. hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
  1126. bfqd->burst_size = 1;
  1127. } else
  1128. bfqd->burst_size = 0;
  1129. bfqd->burst_parent_entity = bfqq->entity.parent;
  1130. }
  1131. /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
  1132. static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1133. {
  1134. /* Increment burst size to take into account also bfqq */
  1135. bfqd->burst_size++;
  1136. if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
  1137. struct bfq_queue *pos, *bfqq_item;
  1138. struct hlist_node *n;
  1139. /*
  1140. * Enough queues have been activated shortly after each
  1141. * other to consider this burst as large.
  1142. */
  1143. bfqd->large_burst = true;
  1144. /*
  1145. * We can now mark all queues in the burst list as
  1146. * belonging to a large burst.
  1147. */
  1148. hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
  1149. burst_list_node)
  1150. bfq_mark_bfqq_in_large_burst(bfqq_item);
  1151. bfq_mark_bfqq_in_large_burst(bfqq);
  1152. /*
  1153. * From now on, and until the current burst finishes, any
  1154. * new queue being activated shortly after the last queue
  1155. * was inserted in the burst can be immediately marked as
  1156. * belonging to a large burst. So the burst list is not
  1157. * needed any more. Remove it.
  1158. */
  1159. hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
  1160. burst_list_node)
  1161. hlist_del_init(&pos->burst_list_node);
  1162. } else /*
  1163. * Burst not yet large: add bfqq to the burst list. Do
  1164. * not increment the ref counter for bfqq, because bfqq
  1165. * is removed from the burst list before freeing bfqq
  1166. * in put_queue.
  1167. */
  1168. hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
  1169. }
  1170. /*
  1171. * If many queues belonging to the same group happen to be created
  1172. * shortly after each other, then the processes associated with these
  1173. * queues have typically a common goal. In particular, bursts of queue
  1174. * creations are usually caused by services or applications that spawn
  1175. * many parallel threads/processes. Examples are systemd during boot,
  1176. * or git grep. To help these processes get their job done as soon as
  1177. * possible, it is usually better to not grant either weight-raising
  1178. * or device idling to their queues, unless these queues must be
  1179. * protected from the I/O flowing through other active queues.
  1180. *
  1181. * In this comment we describe, firstly, the reasons why this fact
  1182. * holds, and, secondly, the next function, which implements the main
  1183. * steps needed to properly mark these queues so that they can then be
  1184. * treated in a different way.
  1185. *
  1186. * The above services or applications benefit mostly from a high
  1187. * throughput: the quicker the requests of the activated queues are
  1188. * cumulatively served, the sooner the target job of these queues gets
  1189. * completed. As a consequence, weight-raising any of these queues,
  1190. * which also implies idling the device for it, is almost always
  1191. * counterproductive, unless there are other active queues to isolate
  1192. * these new queues from. If there no other active queues, then
  1193. * weight-raising these new queues just lowers throughput in most
  1194. * cases.
  1195. *
  1196. * On the other hand, a burst of queue creations may be caused also by
  1197. * the start of an application that does not consist of a lot of
  1198. * parallel I/O-bound threads. In fact, with a complex application,
  1199. * several short processes may need to be executed to start-up the
  1200. * application. In this respect, to start an application as quickly as
  1201. * possible, the best thing to do is in any case to privilege the I/O
  1202. * related to the application with respect to all other
  1203. * I/O. Therefore, the best strategy to start as quickly as possible
  1204. * an application that causes a burst of queue creations is to
  1205. * weight-raise all the queues created during the burst. This is the
  1206. * exact opposite of the best strategy for the other type of bursts.
  1207. *
  1208. * In the end, to take the best action for each of the two cases, the
  1209. * two types of bursts need to be distinguished. Fortunately, this
  1210. * seems relatively easy, by looking at the sizes of the bursts. In
  1211. * particular, we found a threshold such that only bursts with a
  1212. * larger size than that threshold are apparently caused by
  1213. * services or commands such as systemd or git grep. For brevity,
  1214. * hereafter we call just 'large' these bursts. BFQ *does not*
  1215. * weight-raise queues whose creation occurs in a large burst. In
  1216. * addition, for each of these queues BFQ performs or does not perform
  1217. * idling depending on which choice boosts the throughput more. The
  1218. * exact choice depends on the device and request pattern at
  1219. * hand.
  1220. *
  1221. * Unfortunately, false positives may occur while an interactive task
  1222. * is starting (e.g., an application is being started). The
  1223. * consequence is that the queues associated with the task do not
  1224. * enjoy weight raising as expected. Fortunately these false positives
  1225. * are very rare. They typically occur if some service happens to
  1226. * start doing I/O exactly when the interactive task starts.
  1227. *
  1228. * Turning back to the next function, it is invoked only if there are
  1229. * no active queues (apart from active queues that would belong to the
  1230. * same, possible burst bfqq would belong to), and it implements all
  1231. * the steps needed to detect the occurrence of a large burst and to
  1232. * properly mark all the queues belonging to it (so that they can then
  1233. * be treated in a different way). This goal is achieved by
  1234. * maintaining a "burst list" that holds, temporarily, the queues that
  1235. * belong to the burst in progress. The list is then used to mark
  1236. * these queues as belonging to a large burst if the burst does become
  1237. * large. The main steps are the following.
  1238. *
  1239. * . when the very first queue is created, the queue is inserted into the
  1240. * list (as it could be the first queue in a possible burst)
  1241. *
  1242. * . if the current burst has not yet become large, and a queue Q that does
  1243. * not yet belong to the burst is activated shortly after the last time
  1244. * at which a new queue entered the burst list, then the function appends
  1245. * Q to the burst list
  1246. *
  1247. * . if, as a consequence of the previous step, the burst size reaches
  1248. * the large-burst threshold, then
  1249. *
  1250. * . all the queues in the burst list are marked as belonging to a
  1251. * large burst
  1252. *
  1253. * . the burst list is deleted; in fact, the burst list already served
  1254. * its purpose (keeping temporarily track of the queues in a burst,
  1255. * so as to be able to mark them as belonging to a large burst in the
  1256. * previous sub-step), and now is not needed any more
  1257. *
  1258. * . the device enters a large-burst mode
  1259. *
  1260. * . if a queue Q that does not belong to the burst is created while
  1261. * the device is in large-burst mode and shortly after the last time
  1262. * at which a queue either entered the burst list or was marked as
  1263. * belonging to the current large burst, then Q is immediately marked
  1264. * as belonging to a large burst.
  1265. *
  1266. * . if a queue Q that does not belong to the burst is created a while
  1267. * later, i.e., not shortly after, than the last time at which a queue
  1268. * either entered the burst list or was marked as belonging to the
  1269. * current large burst, then the current burst is deemed as finished and:
  1270. *
  1271. * . the large-burst mode is reset if set
  1272. *
  1273. * . the burst list is emptied
  1274. *
  1275. * . Q is inserted in the burst list, as Q may be the first queue
  1276. * in a possible new burst (then the burst list contains just Q
  1277. * after this step).
  1278. */
  1279. static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  1280. {
  1281. /*
  1282. * If bfqq is already in the burst list or is part of a large
  1283. * burst, or finally has just been split, then there is
  1284. * nothing else to do.
  1285. */
  1286. if (!hlist_unhashed(&bfqq->burst_list_node) ||
  1287. bfq_bfqq_in_large_burst(bfqq) ||
  1288. time_is_after_eq_jiffies(bfqq->split_time +
  1289. msecs_to_jiffies(10)))
  1290. return;
  1291. /*
  1292. * If bfqq's creation happens late enough, or bfqq belongs to
  1293. * a different group than the burst group, then the current
  1294. * burst is finished, and related data structures must be
  1295. * reset.
  1296. *
  1297. * In this respect, consider the special case where bfqq is
  1298. * the very first queue created after BFQ is selected for this
  1299. * device. In this case, last_ins_in_burst and
  1300. * burst_parent_entity are not yet significant when we get
  1301. * here. But it is easy to verify that, whether or not the
  1302. * following condition is true, bfqq will end up being
  1303. * inserted into the burst list. In particular the list will
  1304. * happen to contain only bfqq. And this is exactly what has
  1305. * to happen, as bfqq may be the first queue of the first
  1306. * burst.
  1307. */
  1308. if (time_is_before_jiffies(bfqd->last_ins_in_burst +
  1309. bfqd->bfq_burst_interval) ||
  1310. bfqq->entity.parent != bfqd->burst_parent_entity) {
  1311. bfqd->large_burst = false;
  1312. bfq_reset_burst_list(bfqd, bfqq);
  1313. goto end;
  1314. }
  1315. /*
  1316. * If we get here, then bfqq is being activated shortly after the
  1317. * last queue. So, if the current burst is also large, we can mark
  1318. * bfqq as belonging to this large burst immediately.
  1319. */
  1320. if (bfqd->large_burst) {
  1321. bfq_mark_bfqq_in_large_burst(bfqq);
  1322. goto end;
  1323. }
  1324. /*
  1325. * If we get here, then a large-burst state has not yet been
  1326. * reached, but bfqq is being activated shortly after the last
  1327. * queue. Then we add bfqq to the burst.
  1328. */
  1329. bfq_add_to_burst(bfqd, bfqq);
  1330. end:
  1331. /*
  1332. * At this point, bfqq either has been added to the current
  1333. * burst or has caused the current burst to terminate and a
  1334. * possible new burst to start. In particular, in the second
  1335. * case, bfqq has become the first queue in the possible new
  1336. * burst. In both cases last_ins_in_burst needs to be moved
  1337. * forward.
  1338. */
  1339. bfqd->last_ins_in_burst = jiffies;
  1340. }
  1341. static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
  1342. {
  1343. struct bfq_entity *entity = &bfqq->entity;
  1344. return entity->budget - entity->service;
  1345. }
  1346. /*
  1347. * If enough samples have been computed, return the current max budget
  1348. * stored in bfqd, which is dynamically updated according to the
  1349. * estimated disk peak rate; otherwise return the default max budget
  1350. */
  1351. static int bfq_max_budget(struct bfq_data *bfqd)
  1352. {
  1353. if (bfqd->budgets_assigned < bfq_stats_min_budgets)
  1354. return bfq_default_max_budget;
  1355. else
  1356. return bfqd->bfq_max_budget;
  1357. }
  1358. /*
  1359. * Return min budget, which is a fraction of the current or default
  1360. * max budget (trying with 1/32)
  1361. */
  1362. static int bfq_min_budget(struct bfq_data *bfqd)
  1363. {
  1364. if (bfqd->budgets_assigned < bfq_stats_min_budgets)
  1365. return bfq_default_max_budget / 32;
  1366. else
  1367. return bfqd->bfq_max_budget / 32;
  1368. }
  1369. /*
  1370. * The next function, invoked after the input queue bfqq switches from
  1371. * idle to busy, updates the budget of bfqq. The function also tells
  1372. * whether the in-service queue should be expired, by returning
  1373. * true. The purpose of expiring the in-service queue is to give bfqq
  1374. * the chance to possibly preempt the in-service queue, and the reason
  1375. * for preempting the in-service queue is to achieve one of the two
  1376. * goals below.
  1377. *
  1378. * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
  1379. * expired because it has remained idle. In particular, bfqq may have
  1380. * expired for one of the following two reasons:
  1381. *
  1382. * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
  1383. * and did not make it to issue a new request before its last
  1384. * request was served;
  1385. *
  1386. * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
  1387. * a new request before the expiration of the idling-time.
  1388. *
  1389. * Even if bfqq has expired for one of the above reasons, the process
  1390. * associated with the queue may be however issuing requests greedily,
  1391. * and thus be sensitive to the bandwidth it receives (bfqq may have
  1392. * remained idle for other reasons: CPU high load, bfqq not enjoying
  1393. * idling, I/O throttling somewhere in the path from the process to
  1394. * the I/O scheduler, ...). But if, after every expiration for one of
  1395. * the above two reasons, bfqq has to wait for the service of at least
  1396. * one full budget of another queue before being served again, then
  1397. * bfqq is likely to get a much lower bandwidth or resource time than
  1398. * its reserved ones. To address this issue, two countermeasures need
  1399. * to be taken.
  1400. *
  1401. * First, the budget and the timestamps of bfqq need to be updated in
  1402. * a special way on bfqq reactivation: they need to be updated as if
  1403. * bfqq did not remain idle and did not expire. In fact, if they are
  1404. * computed as if bfqq expired and remained idle until reactivation,
  1405. * then the process associated with bfqq is treated as if, instead of
  1406. * being greedy, it stopped issuing requests when bfqq remained idle,
  1407. * and restarts issuing requests only on this reactivation. In other
  1408. * words, the scheduler does not help the process recover the "service
  1409. * hole" between bfqq expiration and reactivation. As a consequence,
  1410. * the process receives a lower bandwidth than its reserved one. In
  1411. * contrast, to recover this hole, the budget must be updated as if
  1412. * bfqq was not expired at all before this reactivation, i.e., it must
  1413. * be set to the value of the remaining budget when bfqq was
  1414. * expired. Along the same line, timestamps need to be assigned the
  1415. * value they had the last time bfqq was selected for service, i.e.,
  1416. * before last expiration. Thus timestamps need to be back-shifted
  1417. * with respect to their normal computation (see [1] for more details
  1418. * on this tricky aspect).
  1419. *
  1420. * Secondly, to allow the process to recover the hole, the in-service
  1421. * queue must be expired too, to give bfqq the chance to preempt it
  1422. * immediately. In fact, if bfqq has to wait for a full budget of the
  1423. * in-service queue to be completed, then it may become impossible to
  1424. * let the process recover the hole, even if the back-shifted
  1425. * timestamps of bfqq are lower than those of the in-service queue. If
  1426. * this happens for most or all of the holes, then the process may not
  1427. * receive its reserved bandwidth. In this respect, it is worth noting
  1428. * that, being the service of outstanding requests unpreemptible, a
  1429. * little fraction of the holes may however be unrecoverable, thereby
  1430. * causing a little loss of bandwidth.
  1431. *
  1432. * The last important point is detecting whether bfqq does need this
  1433. * bandwidth recovery. In this respect, the next function deems the
  1434. * process associated with bfqq greedy, and thus allows it to recover
  1435. * the hole, if: 1) the process is waiting for the arrival of a new
  1436. * request (which implies that bfqq expired for one of the above two
  1437. * reasons), and 2) such a request has arrived soon. The first
  1438. * condition is controlled through the flag non_blocking_wait_rq,
  1439. * while the second through the flag arrived_in_time. If both
  1440. * conditions hold, then the function computes the budget in the
  1441. * above-described special way, and signals that the in-service queue
  1442. * should be expired. Timestamp back-shifting is done later in
  1443. * __bfq_activate_entity.
  1444. *
  1445. * 2. Reduce latency. Even if timestamps are not backshifted to let
  1446. * the process associated with bfqq recover a service hole, bfqq may
  1447. * however happen to have, after being (re)activated, a lower finish
  1448. * timestamp than the in-service queue. That is, the next budget of
  1449. * bfqq may have to be completed before the one of the in-service
  1450. * queue. If this is the case, then preempting the in-service queue
  1451. * allows this goal to be achieved, apart from the unpreemptible,
  1452. * outstanding requests mentioned above.
  1453. *
  1454. * Unfortunately, regardless of which of the above two goals one wants
  1455. * to achieve, service trees need first to be updated to know whether
  1456. * the in-service queue must be preempted. To have service trees
  1457. * correctly updated, the in-service queue must be expired and
  1458. * rescheduled, and bfqq must be scheduled too. This is one of the
  1459. * most costly operations (in future versions, the scheduling
  1460. * mechanism may be re-designed in such a way to make it possible to
  1461. * know whether preemption is needed without needing to update service
  1462. * trees). In addition, queue preemptions almost always cause random
  1463. * I/O, which may in turn cause loss of throughput. Finally, there may
  1464. * even be no in-service queue when the next function is invoked (so,
  1465. * no queue to compare timestamps with). Because of these facts, the
  1466. * next function adopts the following simple scheme to avoid costly
  1467. * operations, too frequent preemptions and too many dependencies on
  1468. * the state of the scheduler: it requests the expiration of the
  1469. * in-service queue (unconditionally) only for queues that need to
  1470. * recover a hole. Then it delegates to other parts of the code the
  1471. * responsibility of handling the above case 2.
  1472. */
  1473. static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
  1474. struct bfq_queue *bfqq,
  1475. bool arrived_in_time)
  1476. {
  1477. struct bfq_entity *entity = &bfqq->entity;
  1478. /*
  1479. * In the next compound condition, we check also whether there
  1480. * is some budget left, because otherwise there is no point in
  1481. * trying to go on serving bfqq with this same budget: bfqq
  1482. * would be expired immediately after being selected for
  1483. * service. This would only cause useless overhead.
  1484. */
  1485. if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
  1486. bfq_bfqq_budget_left(bfqq) > 0) {
  1487. /*
  1488. * We do not clear the flag non_blocking_wait_rq here, as
  1489. * the latter is used in bfq_activate_bfqq to signal
  1490. * that timestamps need to be back-shifted (and is
  1491. * cleared right after).
  1492. */
  1493. /*
  1494. * In next assignment we rely on that either
  1495. * entity->service or entity->budget are not updated
  1496. * on expiration if bfqq is empty (see
  1497. * __bfq_bfqq_recalc_budget). Thus both quantities
  1498. * remain unchanged after such an expiration, and the
  1499. * following statement therefore assigns to
  1500. * entity->budget the remaining budget on such an
  1501. * expiration.
  1502. */
  1503. entity->budget = min_t(unsigned long,
  1504. bfq_bfqq_budget_left(bfqq),
  1505. bfqq->max_budget);
  1506. /*
  1507. * At this point, we have used entity->service to get
  1508. * the budget left (needed for updating
  1509. * entity->budget). Thus we finally can, and have to,
  1510. * reset entity->service. The latter must be reset
  1511. * because bfqq would otherwise be charged again for
  1512. * the service it has received during its previous
  1513. * service slot(s).
  1514. */
  1515. entity->service = 0;
  1516. return true;
  1517. }
  1518. /*
  1519. * We can finally complete expiration, by setting service to 0.
  1520. */
  1521. entity->service = 0;
  1522. entity->budget = max_t(unsigned long, bfqq->max_budget,
  1523. bfq_serv_to_charge(bfqq->next_rq, bfqq));
  1524. bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
  1525. return false;
  1526. }
  1527. /*
  1528. * Return the farthest past time instant according to jiffies
  1529. * macros.
  1530. */
  1531. static unsigned long bfq_smallest_from_now(void)
  1532. {
  1533. return jiffies - MAX_JIFFY_OFFSET;
  1534. }
  1535. static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
  1536. struct bfq_queue *bfqq,
  1537. unsigned int old_wr_coeff,
  1538. bool wr_or_deserves_wr,
  1539. bool interactive,
  1540. bool in_burst,
  1541. bool soft_rt)
  1542. {
  1543. if (old_wr_coeff == 1 && wr_or_deserves_wr) {
  1544. /* start a weight-raising period */
  1545. if (interactive) {
  1546. bfqq->service_from_wr = 0;
  1547. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1548. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1549. } else {
  1550. /*
  1551. * No interactive weight raising in progress
  1552. * here: assign minus infinity to
  1553. * wr_start_at_switch_to_srt, to make sure
  1554. * that, at the end of the soft-real-time
  1555. * weight raising periods that is starting
  1556. * now, no interactive weight-raising period
  1557. * may be wrongly considered as still in
  1558. * progress (and thus actually started by
  1559. * mistake).
  1560. */
  1561. bfqq->wr_start_at_switch_to_srt =
  1562. bfq_smallest_from_now();
  1563. bfqq->wr_coeff = bfqd->bfq_wr_coeff *
  1564. BFQ_SOFTRT_WEIGHT_FACTOR;
  1565. bfqq->wr_cur_max_time =
  1566. bfqd->bfq_wr_rt_max_time;
  1567. }
  1568. /*
  1569. * If needed, further reduce budget to make sure it is
  1570. * close to bfqq's backlog, so as to reduce the
  1571. * scheduling-error component due to a too large
  1572. * budget. Do not care about throughput consequences,
  1573. * but only about latency. Finally, do not assign a
  1574. * too small budget either, to avoid increasing
  1575. * latency by causing too frequent expirations.
  1576. */
  1577. bfqq->entity.budget = min_t(unsigned long,
  1578. bfqq->entity.budget,
  1579. 2 * bfq_min_budget(bfqd));
  1580. } else if (old_wr_coeff > 1) {
  1581. if (interactive) { /* update wr coeff and duration */
  1582. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  1583. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  1584. } else if (in_burst)
  1585. bfqq->wr_coeff = 1;
  1586. else if (soft_rt) {
  1587. /*
  1588. * The application is now or still meeting the
  1589. * requirements for being deemed soft rt. We
  1590. * can then correctly and safely (re)charge
  1591. * the weight-raising duration for the
  1592. * application with the weight-raising
  1593. * duration for soft rt applications.
  1594. *
  1595. * In particular, doing this recharge now, i.e.,
  1596. * before the weight-raising period for the
  1597. * application finishes, reduces the probability
  1598. * of the following negative scenario:
  1599. * 1) the weight of a soft rt application is
  1600. * raised at startup (as for any newly
  1601. * created application),
  1602. * 2) since the application is not interactive,
  1603. * at a certain time weight-raising is
  1604. * stopped for the application,
  1605. * 3) at that time the application happens to
  1606. * still have pending requests, and hence
  1607. * is destined to not have a chance to be
  1608. * deemed soft rt before these requests are
  1609. * completed (see the comments to the
  1610. * function bfq_bfqq_softrt_next_start()
  1611. * for details on soft rt detection),
  1612. * 4) these pending requests experience a high
  1613. * latency because the application is not
  1614. * weight-raised while they are pending.
  1615. */
  1616. if (bfqq->wr_cur_max_time !=
  1617. bfqd->bfq_wr_rt_max_time) {
  1618. bfqq->wr_start_at_switch_to_srt =
  1619. bfqq->last_wr_start_finish;
  1620. bfqq->wr_cur_max_time =
  1621. bfqd->bfq_wr_rt_max_time;
  1622. bfqq->wr_coeff = bfqd->bfq_wr_coeff *
  1623. BFQ_SOFTRT_WEIGHT_FACTOR;
  1624. }
  1625. bfqq->last_wr_start_finish = jiffies;
  1626. }
  1627. }
  1628. }
  1629. static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
  1630. struct bfq_queue *bfqq)
  1631. {
  1632. return bfqq->dispatched == 0 &&
  1633. time_is_before_jiffies(
  1634. bfqq->budget_timeout +
  1635. bfqd->bfq_wr_min_idle_time);
  1636. }
  1637. /*
  1638. * Return true if bfqq is in a higher priority class, or has a higher
  1639. * weight than the in-service queue.
  1640. */
  1641. static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq,
  1642. struct bfq_queue *in_serv_bfqq)
  1643. {
  1644. int bfqq_weight, in_serv_weight;
  1645. if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class)
  1646. return true;
  1647. if (in_serv_bfqq->entity.parent == bfqq->entity.parent) {
  1648. bfqq_weight = bfqq->entity.weight;
  1649. in_serv_weight = in_serv_bfqq->entity.weight;
  1650. } else {
  1651. if (bfqq->entity.parent)
  1652. bfqq_weight = bfqq->entity.parent->weight;
  1653. else
  1654. bfqq_weight = bfqq->entity.weight;
  1655. if (in_serv_bfqq->entity.parent)
  1656. in_serv_weight = in_serv_bfqq->entity.parent->weight;
  1657. else
  1658. in_serv_weight = in_serv_bfqq->entity.weight;
  1659. }
  1660. return bfqq_weight > in_serv_weight;
  1661. }
  1662. static bool bfq_better_to_idle(struct bfq_queue *bfqq);
  1663. static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
  1664. struct bfq_queue *bfqq,
  1665. int old_wr_coeff,
  1666. struct request *rq,
  1667. bool *interactive)
  1668. {
  1669. bool soft_rt, in_burst, wr_or_deserves_wr,
  1670. bfqq_wants_to_preempt,
  1671. idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
  1672. /*
  1673. * See the comments on
  1674. * bfq_bfqq_update_budg_for_activation for
  1675. * details on the usage of the next variable.
  1676. */
  1677. arrived_in_time = ktime_get_ns() <=
  1678. bfqq->ttime.last_end_request +
  1679. bfqd->bfq_slice_idle * 3;
  1680. /*
  1681. * bfqq deserves to be weight-raised if:
  1682. * - it is sync,
  1683. * - it does not belong to a large burst,
  1684. * - it has been idle for enough time or is soft real-time,
  1685. * - is linked to a bfq_io_cq (it is not shared in any sense),
  1686. * - has a default weight (otherwise we assume the user wanted
  1687. * to control its weight explicitly)
  1688. */
  1689. in_burst = bfq_bfqq_in_large_burst(bfqq);
  1690. soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
  1691. !BFQQ_TOTALLY_SEEKY(bfqq) &&
  1692. !in_burst &&
  1693. time_is_before_jiffies(bfqq->soft_rt_next_start) &&
  1694. bfqq->dispatched == 0 &&
  1695. bfqq->entity.new_weight == 40;
  1696. *interactive = !in_burst && idle_for_long_time &&
  1697. bfqq->entity.new_weight == 40;
  1698. /*
  1699. * Merged bfq_queues are kept out of weight-raising
  1700. * (low-latency) mechanisms. The reason is that these queues
  1701. * are usually created for non-interactive and
  1702. * non-soft-real-time tasks. Yet this is not the case for
  1703. * stably-merged queues. These queues are merged just because
  1704. * they are created shortly after each other. So they may
  1705. * easily serve the I/O of an interactive or soft-real time
  1706. * application, if the application happens to spawn multiple
  1707. * processes. So let also stably-merged queued enjoy weight
  1708. * raising.
  1709. */
  1710. wr_or_deserves_wr = bfqd->low_latency &&
  1711. (bfqq->wr_coeff > 1 ||
  1712. (bfq_bfqq_sync(bfqq) &&
  1713. (bfqq->bic || RQ_BIC(rq)->stably_merged) &&
  1714. (*interactive || soft_rt)));
  1715. /*
  1716. * Using the last flag, update budget and check whether bfqq
  1717. * may want to preempt the in-service queue.
  1718. */
  1719. bfqq_wants_to_preempt =
  1720. bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
  1721. arrived_in_time);
  1722. /*
  1723. * If bfqq happened to be activated in a burst, but has been
  1724. * idle for much more than an interactive queue, then we
  1725. * assume that, in the overall I/O initiated in the burst, the
  1726. * I/O associated with bfqq is finished. So bfqq does not need
  1727. * to be treated as a queue belonging to a burst
  1728. * anymore. Accordingly, we reset bfqq's in_large_burst flag
  1729. * if set, and remove bfqq from the burst list if it's
  1730. * there. We do not decrement burst_size, because the fact
  1731. * that bfqq does not need to belong to the burst list any
  1732. * more does not invalidate the fact that bfqq was created in
  1733. * a burst.
  1734. */
  1735. if (likely(!bfq_bfqq_just_created(bfqq)) &&
  1736. idle_for_long_time &&
  1737. time_is_before_jiffies(
  1738. bfqq->budget_timeout +
  1739. msecs_to_jiffies(10000))) {
  1740. hlist_del_init(&bfqq->burst_list_node);
  1741. bfq_clear_bfqq_in_large_burst(bfqq);
  1742. }
  1743. bfq_clear_bfqq_just_created(bfqq);
  1744. if (bfqd->low_latency) {
  1745. if (unlikely(time_is_after_jiffies(bfqq->split_time)))
  1746. /* wraparound */
  1747. bfqq->split_time =
  1748. jiffies - bfqd->bfq_wr_min_idle_time - 1;
  1749. if (time_is_before_jiffies(bfqq->split_time +
  1750. bfqd->bfq_wr_min_idle_time)) {
  1751. bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
  1752. old_wr_coeff,
  1753. wr_or_deserves_wr,
  1754. *interactive,
  1755. in_burst,
  1756. soft_rt);
  1757. if (old_wr_coeff != bfqq->wr_coeff)
  1758. bfqq->entity.prio_changed = 1;
  1759. }
  1760. }
  1761. bfqq->last_idle_bklogged = jiffies;
  1762. bfqq->service_from_backlogged = 0;
  1763. bfq_clear_bfqq_softrt_update(bfqq);
  1764. bfq_add_bfqq_busy(bfqq);
  1765. /*
  1766. * Expire in-service queue if preemption may be needed for
  1767. * guarantees or throughput. As for guarantees, we care
  1768. * explicitly about two cases. The first is that bfqq has to
  1769. * recover a service hole, as explained in the comments on
  1770. * bfq_bfqq_update_budg_for_activation(), i.e., that
  1771. * bfqq_wants_to_preempt is true. However, if bfqq does not
  1772. * carry time-critical I/O, then bfqq's bandwidth is less
  1773. * important than that of queues that carry time-critical I/O.
  1774. * So, as a further constraint, we consider this case only if
  1775. * bfqq is at least as weight-raised, i.e., at least as time
  1776. * critical, as the in-service queue.
  1777. *
  1778. * The second case is that bfqq is in a higher priority class,
  1779. * or has a higher weight than the in-service queue. If this
  1780. * condition does not hold, we don't care because, even if
  1781. * bfqq does not start to be served immediately, the resulting
  1782. * delay for bfqq's I/O is however lower or much lower than
  1783. * the ideal completion time to be guaranteed to bfqq's I/O.
  1784. *
  1785. * In both cases, preemption is needed only if, according to
  1786. * the timestamps of both bfqq and of the in-service queue,
  1787. * bfqq actually is the next queue to serve. So, to reduce
  1788. * useless preemptions, the return value of
  1789. * next_queue_may_preempt() is considered in the next compound
  1790. * condition too. Yet next_queue_may_preempt() just checks a
  1791. * simple, necessary condition for bfqq to be the next queue
  1792. * to serve. In fact, to evaluate a sufficient condition, the
  1793. * timestamps of the in-service queue would need to be
  1794. * updated, and this operation is quite costly (see the
  1795. * comments on bfq_bfqq_update_budg_for_activation()).
  1796. *
  1797. * As for throughput, we ask bfq_better_to_idle() whether we
  1798. * still need to plug I/O dispatching. If bfq_better_to_idle()
  1799. * says no, then plugging is not needed any longer, either to
  1800. * boost throughput or to perserve service guarantees. Then
  1801. * the best option is to stop plugging I/O, as not doing so
  1802. * would certainly lower throughput. We may end up in this
  1803. * case if: (1) upon a dispatch attempt, we detected that it
  1804. * was better to plug I/O dispatch, and to wait for a new
  1805. * request to arrive for the currently in-service queue, but
  1806. * (2) this switch of bfqq to busy changes the scenario.
  1807. */
  1808. if (bfqd->in_service_queue &&
  1809. ((bfqq_wants_to_preempt &&
  1810. bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
  1811. bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
  1812. !bfq_better_to_idle(bfqd->in_service_queue)) &&
  1813. next_queue_may_preempt(bfqd))
  1814. bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
  1815. false, BFQQE_PREEMPTED);
  1816. }
  1817. static void bfq_reset_inject_limit(struct bfq_data *bfqd,
  1818. struct bfq_queue *bfqq)
  1819. {
  1820. /* invalidate baseline total service time */
  1821. bfqq->last_serv_time_ns = 0;
  1822. /*
  1823. * Reset pointer in case we are waiting for
  1824. * some request completion.
  1825. */
  1826. bfqd->waited_rq = NULL;
  1827. /*
  1828. * If bfqq has a short think time, then start by setting the
  1829. * inject limit to 0 prudentially, because the service time of
  1830. * an injected I/O request may be higher than the think time
  1831. * of bfqq, and therefore, if one request was injected when
  1832. * bfqq remains empty, this injected request might delay the
  1833. * service of the next I/O request for bfqq significantly. In
  1834. * case bfqq can actually tolerate some injection, then the
  1835. * adaptive update will however raise the limit soon. This
  1836. * lucky circumstance holds exactly because bfqq has a short
  1837. * think time, and thus, after remaining empty, is likely to
  1838. * get new I/O enqueued---and then completed---before being
  1839. * expired. This is the very pattern that gives the
  1840. * limit-update algorithm the chance to measure the effect of
  1841. * injection on request service times, and then to update the
  1842. * limit accordingly.
  1843. *
  1844. * However, in the following special case, the inject limit is
  1845. * left to 1 even if the think time is short: bfqq's I/O is
  1846. * synchronized with that of some other queue, i.e., bfqq may
  1847. * receive new I/O only after the I/O of the other queue is
  1848. * completed. Keeping the inject limit to 1 allows the
  1849. * blocking I/O to be served while bfqq is in service. And
  1850. * this is very convenient both for bfqq and for overall
  1851. * throughput, as explained in detail in the comments in
  1852. * bfq_update_has_short_ttime().
  1853. *
  1854. * On the opposite end, if bfqq has a long think time, then
  1855. * start directly by 1, because:
  1856. * a) on the bright side, keeping at most one request in
  1857. * service in the drive is unlikely to cause any harm to the
  1858. * latency of bfqq's requests, as the service time of a single
  1859. * request is likely to be lower than the think time of bfqq;
  1860. * b) on the downside, after becoming empty, bfqq is likely to
  1861. * expire before getting its next request. With this request
  1862. * arrival pattern, it is very hard to sample total service
  1863. * times and update the inject limit accordingly (see comments
  1864. * on bfq_update_inject_limit()). So the limit is likely to be
  1865. * never, or at least seldom, updated. As a consequence, by
  1866. * setting the limit to 1, we avoid that no injection ever
  1867. * occurs with bfqq. On the downside, this proactive step
  1868. * further reduces chances to actually compute the baseline
  1869. * total service time. Thus it reduces chances to execute the
  1870. * limit-update algorithm and possibly raise the limit to more
  1871. * than 1.
  1872. */
  1873. if (bfq_bfqq_has_short_ttime(bfqq))
  1874. bfqq->inject_limit = 0;
  1875. else
  1876. bfqq->inject_limit = 1;
  1877. bfqq->decrease_time_jif = jiffies;
  1878. }
  1879. static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
  1880. {
  1881. u64 tot_io_time = now_ns - bfqq->io_start_time;
  1882. if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0)
  1883. bfqq->tot_idle_time +=
  1884. now_ns - bfqq->ttime.last_end_request;
  1885. if (unlikely(bfq_bfqq_just_created(bfqq)))
  1886. return;
  1887. /*
  1888. * Must be busy for at least about 80% of the time to be
  1889. * considered I/O bound.
  1890. */
  1891. if (bfqq->tot_idle_time * 5 > tot_io_time)
  1892. bfq_clear_bfqq_IO_bound(bfqq);
  1893. else
  1894. bfq_mark_bfqq_IO_bound(bfqq);
  1895. /*
  1896. * Keep an observation window of at most 200 ms in the past
  1897. * from now.
  1898. */
  1899. if (tot_io_time > 200 * NSEC_PER_MSEC) {
  1900. bfqq->io_start_time = now_ns - (tot_io_time>>1);
  1901. bfqq->tot_idle_time >>= 1;
  1902. }
  1903. }
  1904. /*
  1905. * Detect whether bfqq's I/O seems synchronized with that of some
  1906. * other queue, i.e., whether bfqq, after remaining empty, happens to
  1907. * receive new I/O only right after some I/O request of the other
  1908. * queue has been completed. We call waker queue the other queue, and
  1909. * we assume, for simplicity, that bfqq may have at most one waker
  1910. * queue.
  1911. *
  1912. * A remarkable throughput boost can be reached by unconditionally
  1913. * injecting the I/O of the waker queue, every time a new
  1914. * bfq_dispatch_request happens to be invoked while I/O is being
  1915. * plugged for bfqq. In addition to boosting throughput, this
  1916. * unblocks bfqq's I/O, thereby improving bandwidth and latency for
  1917. * bfqq. Note that these same results may be achieved with the general
  1918. * injection mechanism, but less effectively. For details on this
  1919. * aspect, see the comments on the choice of the queue for injection
  1920. * in bfq_select_queue().
  1921. *
  1922. * Turning back to the detection of a waker queue, a queue Q is deemed as a
  1923. * waker queue for bfqq if, for three consecutive times, bfqq happens to become
  1924. * non empty right after a request of Q has been completed within given
  1925. * timeout. In this respect, even if bfqq is empty, we do not check for a waker
  1926. * if it still has some in-flight I/O. In fact, in this case bfqq is actually
  1927. * still being served by the drive, and may receive new I/O on the completion
  1928. * of some of the in-flight requests. In particular, on the first time, Q is
  1929. * tentatively set as a candidate waker queue, while on the third consecutive
  1930. * time that Q is detected, the field waker_bfqq is set to Q, to confirm that Q
  1931. * is a waker queue for bfqq. These detection steps are performed only if bfqq
  1932. * has a long think time, so as to make it more likely that bfqq's I/O is
  1933. * actually being blocked by a synchronization. This last filter, plus the
  1934. * above three-times requirement and time limit for detection, make false
  1935. * positives less likely.
  1936. *
  1937. * NOTE
  1938. *
  1939. * The sooner a waker queue is detected, the sooner throughput can be
  1940. * boosted by injecting I/O from the waker queue. Fortunately,
  1941. * detection is likely to be actually fast, for the following
  1942. * reasons. While blocked by synchronization, bfqq has a long think
  1943. * time. This implies that bfqq's inject limit is at least equal to 1
  1944. * (see the comments in bfq_update_inject_limit()). So, thanks to
  1945. * injection, the waker queue is likely to be served during the very
  1946. * first I/O-plugging time interval for bfqq. This triggers the first
  1947. * step of the detection mechanism. Thanks again to injection, the
  1948. * candidate waker queue is then likely to be confirmed no later than
  1949. * during the next I/O-plugging interval for bfqq.
  1950. *
  1951. * ISSUE
  1952. *
  1953. * On queue merging all waker information is lost.
  1954. */
  1955. static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  1956. u64 now_ns)
  1957. {
  1958. char waker_name[MAX_BFQQ_NAME_LENGTH];
  1959. if (!bfqd->last_completed_rq_bfqq ||
  1960. bfqd->last_completed_rq_bfqq == bfqq ||
  1961. bfq_bfqq_has_short_ttime(bfqq) ||
  1962. now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC)
  1963. return;
  1964. /*
  1965. * We reset waker detection logic also if too much time has passed
  1966. * since the first detection. If wakeups are rare, pointless idling
  1967. * doesn't hurt throughput that much. The condition below makes sure
  1968. * we do not uselessly idle blocking waker in more than 1/64 cases.
  1969. */
  1970. if (bfqd->last_completed_rq_bfqq !=
  1971. bfqq->tentative_waker_bfqq ||
  1972. now_ns > bfqq->waker_detection_started +
  1973. 128 * (u64)bfqd->bfq_slice_idle) {
  1974. /*
  1975. * First synchronization detected with a
  1976. * candidate waker queue, or with a different
  1977. * candidate waker queue from the current one.
  1978. */
  1979. bfqq->tentative_waker_bfqq =
  1980. bfqd->last_completed_rq_bfqq;
  1981. bfqq->num_waker_detections = 1;
  1982. bfqq->waker_detection_started = now_ns;
  1983. bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name,
  1984. MAX_BFQQ_NAME_LENGTH);
  1985. bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s", waker_name);
  1986. } else /* Same tentative waker queue detected again */
  1987. bfqq->num_waker_detections++;
  1988. if (bfqq->num_waker_detections == 3) {
  1989. bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
  1990. bfqq->tentative_waker_bfqq = NULL;
  1991. bfq_bfqq_name(bfqq->waker_bfqq, waker_name,
  1992. MAX_BFQQ_NAME_LENGTH);
  1993. bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name);
  1994. /*
  1995. * If the waker queue disappears, then
  1996. * bfqq->waker_bfqq must be reset. To
  1997. * this goal, we maintain in each
  1998. * waker queue a list, woken_list, of
  1999. * all the queues that reference the
  2000. * waker queue through their
  2001. * waker_bfqq pointer. When the waker
  2002. * queue exits, the waker_bfqq pointer
  2003. * of all the queues in the woken_list
  2004. * is reset.
  2005. *
  2006. * In addition, if bfqq is already in
  2007. * the woken_list of a waker queue,
  2008. * then, before being inserted into
  2009. * the woken_list of a new waker
  2010. * queue, bfqq must be removed from
  2011. * the woken_list of the old waker
  2012. * queue.
  2013. */
  2014. if (!hlist_unhashed(&bfqq->woken_list_node))
  2015. hlist_del_init(&bfqq->woken_list_node);
  2016. hlist_add_head(&bfqq->woken_list_node,
  2017. &bfqd->last_completed_rq_bfqq->woken_list);
  2018. }
  2019. }
  2020. static void bfq_add_request(struct request *rq)
  2021. {
  2022. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  2023. struct bfq_data *bfqd = bfqq->bfqd;
  2024. struct request *next_rq, *prev;
  2025. unsigned int old_wr_coeff = bfqq->wr_coeff;
  2026. bool interactive = false;
  2027. u64 now_ns = ktime_get_ns();
  2028. bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
  2029. bfqq->queued[rq_is_sync(rq)]++;
  2030. /*
  2031. * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
  2032. * may be read without holding the lock in bfq_has_work().
  2033. */
  2034. WRITE_ONCE(bfqd->queued, bfqd->queued + 1);
  2035. if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) {
  2036. bfq_check_waker(bfqd, bfqq, now_ns);
  2037. /*
  2038. * Periodically reset inject limit, to make sure that
  2039. * the latter eventually drops in case workload
  2040. * changes, see step (3) in the comments on
  2041. * bfq_update_inject_limit().
  2042. */
  2043. if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  2044. msecs_to_jiffies(1000)))
  2045. bfq_reset_inject_limit(bfqd, bfqq);
  2046. /*
  2047. * The following conditions must hold to setup a new
  2048. * sampling of total service time, and then a new
  2049. * update of the inject limit:
  2050. * - bfqq is in service, because the total service
  2051. * time is evaluated only for the I/O requests of
  2052. * the queues in service;
  2053. * - this is the right occasion to compute or to
  2054. * lower the baseline total service time, because
  2055. * there are actually no requests in the drive,
  2056. * or
  2057. * the baseline total service time is available, and
  2058. * this is the right occasion to compute the other
  2059. * quantity needed to update the inject limit, i.e.,
  2060. * the total service time caused by the amount of
  2061. * injection allowed by the current value of the
  2062. * limit. It is the right occasion because injection
  2063. * has actually been performed during the service
  2064. * hole, and there are still in-flight requests,
  2065. * which are very likely to be exactly the injected
  2066. * requests, or part of them;
  2067. * - the minimum interval for sampling the total
  2068. * service time and updating the inject limit has
  2069. * elapsed.
  2070. */
  2071. if (bfqq == bfqd->in_service_queue &&
  2072. (bfqd->rq_in_driver == 0 ||
  2073. (bfqq->last_serv_time_ns > 0 &&
  2074. bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
  2075. time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  2076. msecs_to_jiffies(10))) {
  2077. bfqd->last_empty_occupied_ns = ktime_get_ns();
  2078. /*
  2079. * Start the state machine for measuring the
  2080. * total service time of rq: setting
  2081. * wait_dispatch will cause bfqd->waited_rq to
  2082. * be set when rq will be dispatched.
  2083. */
  2084. bfqd->wait_dispatch = true;
  2085. /*
  2086. * If there is no I/O in service in the drive,
  2087. * then possible injection occurred before the
  2088. * arrival of rq will not affect the total
  2089. * service time of rq. So the injection limit
  2090. * must not be updated as a function of such
  2091. * total service time, unless new injection
  2092. * occurs before rq is completed. To have the
  2093. * injection limit updated only in the latter
  2094. * case, reset rqs_injected here (rqs_injected
  2095. * will be set in case injection is performed
  2096. * on bfqq before rq is completed).
  2097. */
  2098. if (bfqd->rq_in_driver == 0)
  2099. bfqd->rqs_injected = false;
  2100. }
  2101. }
  2102. if (bfq_bfqq_sync(bfqq))
  2103. bfq_update_io_intensity(bfqq, now_ns);
  2104. elv_rb_add(&bfqq->sort_list, rq);
  2105. /*
  2106. * Check if this request is a better next-serve candidate.
  2107. */
  2108. prev = bfqq->next_rq;
  2109. next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
  2110. bfqq->next_rq = next_rq;
  2111. /*
  2112. * Adjust priority tree position, if next_rq changes.
  2113. * See comments on bfq_pos_tree_add_move() for the unlikely().
  2114. */
  2115. if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
  2116. bfq_pos_tree_add_move(bfqd, bfqq);
  2117. if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
  2118. bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
  2119. rq, &interactive);
  2120. else {
  2121. if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
  2122. time_is_before_jiffies(
  2123. bfqq->last_wr_start_finish +
  2124. bfqd->bfq_wr_min_inter_arr_async)) {
  2125. bfqq->wr_coeff = bfqd->bfq_wr_coeff;
  2126. bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
  2127. bfqd->wr_busy_queues++;
  2128. bfqq->entity.prio_changed = 1;
  2129. }
  2130. if (prev != bfqq->next_rq)
  2131. bfq_updated_next_req(bfqd, bfqq);
  2132. }
  2133. /*
  2134. * Assign jiffies to last_wr_start_finish in the following
  2135. * cases:
  2136. *
  2137. * . if bfqq is not going to be weight-raised, because, for
  2138. * non weight-raised queues, last_wr_start_finish stores the
  2139. * arrival time of the last request; as of now, this piece
  2140. * of information is used only for deciding whether to
  2141. * weight-raise async queues
  2142. *
  2143. * . if bfqq is not weight-raised, because, if bfqq is now
  2144. * switching to weight-raised, then last_wr_start_finish
  2145. * stores the time when weight-raising starts
  2146. *
  2147. * . if bfqq is interactive, because, regardless of whether
  2148. * bfqq is currently weight-raised, the weight-raising
  2149. * period must start or restart (this case is considered
  2150. * separately because it is not detected by the above
  2151. * conditions, if bfqq is already weight-raised)
  2152. *
  2153. * last_wr_start_finish has to be updated also if bfqq is soft
  2154. * real-time, because the weight-raising period is constantly
  2155. * restarted on idle-to-busy transitions for these queues, but
  2156. * this is already done in bfq_bfqq_handle_idle_busy_switch if
  2157. * needed.
  2158. */
  2159. if (bfqd->low_latency &&
  2160. (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
  2161. bfqq->last_wr_start_finish = jiffies;
  2162. }
  2163. static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
  2164. struct bio *bio,
  2165. struct request_queue *q)
  2166. {
  2167. struct bfq_queue *bfqq = bfqd->bio_bfqq;
  2168. if (bfqq)
  2169. return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
  2170. return NULL;
  2171. }
  2172. static sector_t get_sdist(sector_t last_pos, struct request *rq)
  2173. {
  2174. if (last_pos)
  2175. return abs(blk_rq_pos(rq) - last_pos);
  2176. return 0;
  2177. }
  2178. #if 0 /* Still not clear if we can do without next two functions */
  2179. static void bfq_activate_request(struct request_queue *q, struct request *rq)
  2180. {
  2181. struct bfq_data *bfqd = q->elevator->elevator_data;
  2182. bfqd->rq_in_driver++;
  2183. }
  2184. static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
  2185. {
  2186. struct bfq_data *bfqd = q->elevator->elevator_data;
  2187. bfqd->rq_in_driver--;
  2188. }
  2189. #endif
  2190. static void bfq_remove_request(struct request_queue *q,
  2191. struct request *rq)
  2192. {
  2193. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  2194. struct bfq_data *bfqd = bfqq->bfqd;
  2195. const int sync = rq_is_sync(rq);
  2196. if (bfqq->next_rq == rq) {
  2197. bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
  2198. bfq_updated_next_req(bfqd, bfqq);
  2199. }
  2200. if (rq->queuelist.prev != &rq->queuelist)
  2201. list_del_init(&rq->queuelist);
  2202. bfqq->queued[sync]--;
  2203. /*
  2204. * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it
  2205. * may be read without holding the lock in bfq_has_work().
  2206. */
  2207. WRITE_ONCE(bfqd->queued, bfqd->queued - 1);
  2208. elv_rb_del(&bfqq->sort_list, rq);
  2209. elv_rqhash_del(q, rq);
  2210. if (q->last_merge == rq)
  2211. q->last_merge = NULL;
  2212. if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
  2213. bfqq->next_rq = NULL;
  2214. if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
  2215. bfq_del_bfqq_busy(bfqq, false);
  2216. /*
  2217. * bfqq emptied. In normal operation, when
  2218. * bfqq is empty, bfqq->entity.service and
  2219. * bfqq->entity.budget must contain,
  2220. * respectively, the service received and the
  2221. * budget used last time bfqq emptied. These
  2222. * facts do not hold in this case, as at least
  2223. * this last removal occurred while bfqq is
  2224. * not in service. To avoid inconsistencies,
  2225. * reset both bfqq->entity.service and
  2226. * bfqq->entity.budget, if bfqq has still a
  2227. * process that may issue I/O requests to it.
  2228. */
  2229. bfqq->entity.budget = bfqq->entity.service = 0;
  2230. }
  2231. /*
  2232. * Remove queue from request-position tree as it is empty.
  2233. */
  2234. if (bfqq->pos_root) {
  2235. rb_erase(&bfqq->pos_node, bfqq->pos_root);
  2236. bfqq->pos_root = NULL;
  2237. }
  2238. } else {
  2239. /* see comments on bfq_pos_tree_add_move() for the unlikely() */
  2240. if (unlikely(!bfqd->nonrot_with_queueing))
  2241. bfq_pos_tree_add_move(bfqd, bfqq);
  2242. }
  2243. if (rq->cmd_flags & REQ_META)
  2244. bfqq->meta_pending--;
  2245. }
  2246. static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
  2247. unsigned int nr_segs)
  2248. {
  2249. struct bfq_data *bfqd = q->elevator->elevator_data;
  2250. struct request *free = NULL;
  2251. /*
  2252. * bfq_bic_lookup grabs the queue_lock: invoke it now and
  2253. * store its return value for later use, to avoid nesting
  2254. * queue_lock inside the bfqd->lock. We assume that the bic
  2255. * returned by bfq_bic_lookup does not go away before
  2256. * bfqd->lock is taken.
  2257. */
  2258. struct bfq_io_cq *bic = bfq_bic_lookup(q);
  2259. bool ret;
  2260. spin_lock_irq(&bfqd->lock);
  2261. if (bic) {
  2262. /*
  2263. * Make sure cgroup info is uptodate for current process before
  2264. * considering the merge.
  2265. */
  2266. bfq_bic_update_cgroup(bic, bio);
  2267. bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
  2268. } else {
  2269. bfqd->bio_bfqq = NULL;
  2270. }
  2271. bfqd->bio_bic = bic;
  2272. ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
  2273. spin_unlock_irq(&bfqd->lock);
  2274. if (free)
  2275. blk_mq_free_request(free);
  2276. return ret;
  2277. }
  2278. static int bfq_request_merge(struct request_queue *q, struct request **req,
  2279. struct bio *bio)
  2280. {
  2281. struct bfq_data *bfqd = q->elevator->elevator_data;
  2282. struct request *__rq;
  2283. __rq = bfq_find_rq_fmerge(bfqd, bio, q);
  2284. if (__rq && elv_bio_merge_ok(__rq, bio)) {
  2285. *req = __rq;
  2286. if (blk_discard_mergable(__rq))
  2287. return ELEVATOR_DISCARD_MERGE;
  2288. return ELEVATOR_FRONT_MERGE;
  2289. }
  2290. return ELEVATOR_NO_MERGE;
  2291. }
  2292. static void bfq_request_merged(struct request_queue *q, struct request *req,
  2293. enum elv_merge type)
  2294. {
  2295. if (type == ELEVATOR_FRONT_MERGE &&
  2296. rb_prev(&req->rb_node) &&
  2297. blk_rq_pos(req) <
  2298. blk_rq_pos(container_of(rb_prev(&req->rb_node),
  2299. struct request, rb_node))) {
  2300. struct bfq_queue *bfqq = RQ_BFQQ(req);
  2301. struct bfq_data *bfqd;
  2302. struct request *prev, *next_rq;
  2303. if (!bfqq)
  2304. return;
  2305. bfqd = bfqq->bfqd;
  2306. /* Reposition request in its sort_list */
  2307. elv_rb_del(&bfqq->sort_list, req);
  2308. elv_rb_add(&bfqq->sort_list, req);
  2309. /* Choose next request to be served for bfqq */
  2310. prev = bfqq->next_rq;
  2311. next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
  2312. bfqd->last_position);
  2313. bfqq->next_rq = next_rq;
  2314. /*
  2315. * If next_rq changes, update both the queue's budget to
  2316. * fit the new request and the queue's position in its
  2317. * rq_pos_tree.
  2318. */
  2319. if (prev != bfqq->next_rq) {
  2320. bfq_updated_next_req(bfqd, bfqq);
  2321. /*
  2322. * See comments on bfq_pos_tree_add_move() for
  2323. * the unlikely().
  2324. */
  2325. if (unlikely(!bfqd->nonrot_with_queueing))
  2326. bfq_pos_tree_add_move(bfqd, bfqq);
  2327. }
  2328. }
  2329. }
  2330. /*
  2331. * This function is called to notify the scheduler that the requests
  2332. * rq and 'next' have been merged, with 'next' going away. BFQ
  2333. * exploits this hook to address the following issue: if 'next' has a
  2334. * fifo_time lower that rq, then the fifo_time of rq must be set to
  2335. * the value of 'next', to not forget the greater age of 'next'.
  2336. *
  2337. * NOTE: in this function we assume that rq is in a bfq_queue, basing
  2338. * on that rq is picked from the hash table q->elevator->hash, which,
  2339. * in its turn, is filled only with I/O requests present in
  2340. * bfq_queues, while BFQ is in use for the request queue q. In fact,
  2341. * the function that fills this hash table (elv_rqhash_add) is called
  2342. * only by bfq_insert_request.
  2343. */
  2344. static void bfq_requests_merged(struct request_queue *q, struct request *rq,
  2345. struct request *next)
  2346. {
  2347. struct bfq_queue *bfqq = RQ_BFQQ(rq),
  2348. *next_bfqq = RQ_BFQQ(next);
  2349. if (!bfqq)
  2350. goto remove;
  2351. /*
  2352. * If next and rq belong to the same bfq_queue and next is older
  2353. * than rq, then reposition rq in the fifo (by substituting next
  2354. * with rq). Otherwise, if next and rq belong to different
  2355. * bfq_queues, never reposition rq: in fact, we would have to
  2356. * reposition it with respect to next's position in its own fifo,
  2357. * which would most certainly be too expensive with respect to
  2358. * the benefits.
  2359. */
  2360. if (bfqq == next_bfqq &&
  2361. !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  2362. next->fifo_time < rq->fifo_time) {
  2363. list_del_init(&rq->queuelist);
  2364. list_replace_init(&next->queuelist, &rq->queuelist);
  2365. rq->fifo_time = next->fifo_time;
  2366. }
  2367. if (bfqq->next_rq == next)
  2368. bfqq->next_rq = rq;
  2369. bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
  2370. remove:
  2371. /* Merged request may be in the IO scheduler. Remove it. */
  2372. if (!RB_EMPTY_NODE(&next->rb_node)) {
  2373. bfq_remove_request(next->q, next);
  2374. if (next_bfqq)
  2375. bfqg_stats_update_io_remove(bfqq_group(next_bfqq),
  2376. next->cmd_flags);
  2377. }
  2378. }
  2379. /* Must be called with bfqq != NULL */
  2380. static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
  2381. {
  2382. /*
  2383. * If bfqq has been enjoying interactive weight-raising, then
  2384. * reset soft_rt_next_start. We do it for the following
  2385. * reason. bfqq may have been conveying the I/O needed to load
  2386. * a soft real-time application. Such an application actually
  2387. * exhibits a soft real-time I/O pattern after it finishes
  2388. * loading, and finally starts doing its job. But, if bfqq has
  2389. * been receiving a lot of bandwidth so far (likely to happen
  2390. * on a fast device), then soft_rt_next_start now contains a
  2391. * high value that. So, without this reset, bfqq would be
  2392. * prevented from being possibly considered as soft_rt for a
  2393. * very long time.
  2394. */
  2395. if (bfqq->wr_cur_max_time !=
  2396. bfqq->bfqd->bfq_wr_rt_max_time)
  2397. bfqq->soft_rt_next_start = jiffies;
  2398. if (bfq_bfqq_busy(bfqq))
  2399. bfqq->bfqd->wr_busy_queues--;
  2400. bfqq->wr_coeff = 1;
  2401. bfqq->wr_cur_max_time = 0;
  2402. bfqq->last_wr_start_finish = jiffies;
  2403. /*
  2404. * Trigger a weight change on the next invocation of
  2405. * __bfq_entity_update_weight_prio.
  2406. */
  2407. bfqq->entity.prio_changed = 1;
  2408. }
  2409. void bfq_end_wr_async_queues(struct bfq_data *bfqd,
  2410. struct bfq_group *bfqg)
  2411. {
  2412. int i, j;
  2413. for (i = 0; i < 2; i++)
  2414. for (j = 0; j < IOPRIO_NR_LEVELS; j++)
  2415. if (bfqg->async_bfqq[i][j])
  2416. bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
  2417. if (bfqg->async_idle_bfqq)
  2418. bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
  2419. }
  2420. static void bfq_end_wr(struct bfq_data *bfqd)
  2421. {
  2422. struct bfq_queue *bfqq;
  2423. spin_lock_irq(&bfqd->lock);
  2424. list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
  2425. bfq_bfqq_end_wr(bfqq);
  2426. list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
  2427. bfq_bfqq_end_wr(bfqq);
  2428. bfq_end_wr_async(bfqd);
  2429. spin_unlock_irq(&bfqd->lock);
  2430. }
  2431. static sector_t bfq_io_struct_pos(void *io_struct, bool request)
  2432. {
  2433. if (request)
  2434. return blk_rq_pos(io_struct);
  2435. else
  2436. return ((struct bio *)io_struct)->bi_iter.bi_sector;
  2437. }
  2438. static int bfq_rq_close_to_sector(void *io_struct, bool request,
  2439. sector_t sector)
  2440. {
  2441. return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
  2442. BFQQ_CLOSE_THR;
  2443. }
  2444. static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
  2445. struct bfq_queue *bfqq,
  2446. sector_t sector)
  2447. {
  2448. struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree;
  2449. struct rb_node *parent, *node;
  2450. struct bfq_queue *__bfqq;
  2451. if (RB_EMPTY_ROOT(root))
  2452. return NULL;
  2453. /*
  2454. * First, if we find a request starting at the end of the last
  2455. * request, choose it.
  2456. */
  2457. __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
  2458. if (__bfqq)
  2459. return __bfqq;
  2460. /*
  2461. * If the exact sector wasn't found, the parent of the NULL leaf
  2462. * will contain the closest sector (rq_pos_tree sorted by
  2463. * next_request position).
  2464. */
  2465. __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
  2466. if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
  2467. return __bfqq;
  2468. if (blk_rq_pos(__bfqq->next_rq) < sector)
  2469. node = rb_next(&__bfqq->pos_node);
  2470. else
  2471. node = rb_prev(&__bfqq->pos_node);
  2472. if (!node)
  2473. return NULL;
  2474. __bfqq = rb_entry(node, struct bfq_queue, pos_node);
  2475. if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
  2476. return __bfqq;
  2477. return NULL;
  2478. }
  2479. static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
  2480. struct bfq_queue *cur_bfqq,
  2481. sector_t sector)
  2482. {
  2483. struct bfq_queue *bfqq;
  2484. /*
  2485. * We shall notice if some of the queues are cooperating,
  2486. * e.g., working closely on the same area of the device. In
  2487. * that case, we can group them together and: 1) don't waste
  2488. * time idling, and 2) serve the union of their requests in
  2489. * the best possible order for throughput.
  2490. */
  2491. bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
  2492. if (!bfqq || bfqq == cur_bfqq)
  2493. return NULL;
  2494. return bfqq;
  2495. }
  2496. static struct bfq_queue *
  2497. bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
  2498. {
  2499. int process_refs, new_process_refs;
  2500. struct bfq_queue *__bfqq;
  2501. /*
  2502. * If there are no process references on the new_bfqq, then it is
  2503. * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
  2504. * may have dropped their last reference (not just their last process
  2505. * reference).
  2506. */
  2507. if (!bfqq_process_refs(new_bfqq))
  2508. return NULL;
  2509. /* Avoid a circular list and skip interim queue merges. */
  2510. while ((__bfqq = new_bfqq->new_bfqq)) {
  2511. if (__bfqq == bfqq)
  2512. return NULL;
  2513. new_bfqq = __bfqq;
  2514. }
  2515. process_refs = bfqq_process_refs(bfqq);
  2516. new_process_refs = bfqq_process_refs(new_bfqq);
  2517. /*
  2518. * If the process for the bfqq has gone away, there is no
  2519. * sense in merging the queues.
  2520. */
  2521. if (process_refs == 0 || new_process_refs == 0)
  2522. return NULL;
  2523. /*
  2524. * Make sure merged queues belong to the same parent. Parents could
  2525. * have changed since the time we decided the two queues are suitable
  2526. * for merging.
  2527. */
  2528. if (new_bfqq->entity.parent != bfqq->entity.parent)
  2529. return NULL;
  2530. bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
  2531. new_bfqq->pid);
  2532. /*
  2533. * Merging is just a redirection: the requests of the process
  2534. * owning one of the two queues are redirected to the other queue.
  2535. * The latter queue, in its turn, is set as shared if this is the
  2536. * first time that the requests of some process are redirected to
  2537. * it.
  2538. *
  2539. * We redirect bfqq to new_bfqq and not the opposite, because
  2540. * we are in the context of the process owning bfqq, thus we
  2541. * have the io_cq of this process. So we can immediately
  2542. * configure this io_cq to redirect the requests of the
  2543. * process to new_bfqq. In contrast, the io_cq of new_bfqq is
  2544. * not available any more (new_bfqq->bic == NULL).
  2545. *
  2546. * Anyway, even in case new_bfqq coincides with the in-service
  2547. * queue, redirecting requests the in-service queue is the
  2548. * best option, as we feed the in-service queue with new
  2549. * requests close to the last request served and, by doing so,
  2550. * are likely to increase the throughput.
  2551. */
  2552. bfqq->new_bfqq = new_bfqq;
  2553. /*
  2554. * The above assignment schedules the following redirections:
  2555. * each time some I/O for bfqq arrives, the process that
  2556. * generated that I/O is disassociated from bfqq and
  2557. * associated with new_bfqq. Here we increases new_bfqq->ref
  2558. * in advance, adding the number of processes that are
  2559. * expected to be associated with new_bfqq as they happen to
  2560. * issue I/O.
  2561. */
  2562. new_bfqq->ref += process_refs;
  2563. return new_bfqq;
  2564. }
  2565. static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
  2566. struct bfq_queue *new_bfqq)
  2567. {
  2568. if (bfq_too_late_for_merging(new_bfqq))
  2569. return false;
  2570. if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
  2571. (bfqq->ioprio_class != new_bfqq->ioprio_class))
  2572. return false;
  2573. /*
  2574. * If either of the queues has already been detected as seeky,
  2575. * then merging it with the other queue is unlikely to lead to
  2576. * sequential I/O.
  2577. */
  2578. if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
  2579. return false;
  2580. /*
  2581. * Interleaved I/O is known to be done by (some) applications
  2582. * only for reads, so it does not make sense to merge async
  2583. * queues.
  2584. */
  2585. if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
  2586. return false;
  2587. return true;
  2588. }
  2589. static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
  2590. struct bfq_queue *bfqq);
  2591. /*
  2592. * Attempt to schedule a merge of bfqq with the currently in-service
  2593. * queue or with a close queue among the scheduled queues. Return
  2594. * NULL if no merge was scheduled, a pointer to the shared bfq_queue
  2595. * structure otherwise.
  2596. *
  2597. * The OOM queue is not allowed to participate to cooperation: in fact, since
  2598. * the requests temporarily redirected to the OOM queue could be redirected
  2599. * again to dedicated queues at any time, the state needed to correctly
  2600. * handle merging with the OOM queue would be quite complex and expensive
  2601. * to maintain. Besides, in such a critical condition as an out of memory,
  2602. * the benefits of queue merging may be little relevant, or even negligible.
  2603. *
  2604. * WARNING: queue merging may impair fairness among non-weight raised
  2605. * queues, for at least two reasons: 1) the original weight of a
  2606. * merged queue may change during the merged state, 2) even being the
  2607. * weight the same, a merged queue may be bloated with many more
  2608. * requests than the ones produced by its originally-associated
  2609. * process.
  2610. */
  2611. static struct bfq_queue *
  2612. bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  2613. void *io_struct, bool request, struct bfq_io_cq *bic)
  2614. {
  2615. struct bfq_queue *in_service_bfqq, *new_bfqq;
  2616. /* if a merge has already been setup, then proceed with that first */
  2617. if (bfqq->new_bfqq)
  2618. return bfqq->new_bfqq;
  2619. /*
  2620. * Check delayed stable merge for rotational or non-queueing
  2621. * devs. For this branch to be executed, bfqq must not be
  2622. * currently merged with some other queue (i.e., bfqq->bic
  2623. * must be non null). If we considered also merged queues,
  2624. * then we should also check whether bfqq has already been
  2625. * merged with bic->stable_merge_bfqq. But this would be
  2626. * costly and complicated.
  2627. */
  2628. if (unlikely(!bfqd->nonrot_with_queueing)) {
  2629. /*
  2630. * Make sure also that bfqq is sync, because
  2631. * bic->stable_merge_bfqq may point to some queue (for
  2632. * stable merging) also if bic is associated with a
  2633. * sync queue, but this bfqq is async
  2634. */
  2635. if (bfq_bfqq_sync(bfqq) && bic->stable_merge_bfqq &&
  2636. !bfq_bfqq_just_created(bfqq) &&
  2637. time_is_before_jiffies(bfqq->split_time +
  2638. msecs_to_jiffies(bfq_late_stable_merging)) &&
  2639. time_is_before_jiffies(bfqq->creation_time +
  2640. msecs_to_jiffies(bfq_late_stable_merging))) {
  2641. struct bfq_queue *stable_merge_bfqq =
  2642. bic->stable_merge_bfqq;
  2643. int proc_ref = min(bfqq_process_refs(bfqq),
  2644. bfqq_process_refs(stable_merge_bfqq));
  2645. /* deschedule stable merge, because done or aborted here */
  2646. bfq_put_stable_ref(stable_merge_bfqq);
  2647. bic->stable_merge_bfqq = NULL;
  2648. if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
  2649. proc_ref > 0) {
  2650. /* next function will take at least one ref */
  2651. struct bfq_queue *new_bfqq =
  2652. bfq_setup_merge(bfqq, stable_merge_bfqq);
  2653. if (new_bfqq) {
  2654. bic->stably_merged = true;
  2655. if (new_bfqq->bic)
  2656. new_bfqq->bic->stably_merged =
  2657. true;
  2658. }
  2659. return new_bfqq;
  2660. } else
  2661. return NULL;
  2662. }
  2663. }
  2664. /*
  2665. * Do not perform queue merging if the device is non
  2666. * rotational and performs internal queueing. In fact, such a
  2667. * device reaches a high speed through internal parallelism
  2668. * and pipelining. This means that, to reach a high
  2669. * throughput, it must have many requests enqueued at the same
  2670. * time. But, in this configuration, the internal scheduling
  2671. * algorithm of the device does exactly the job of queue
  2672. * merging: it reorders requests so as to obtain as much as
  2673. * possible a sequential I/O pattern. As a consequence, with
  2674. * the workload generated by processes doing interleaved I/O,
  2675. * the throughput reached by the device is likely to be the
  2676. * same, with and without queue merging.
  2677. *
  2678. * Disabling merging also provides a remarkable benefit in
  2679. * terms of throughput. Merging tends to make many workloads
  2680. * artificially more uneven, because of shared queues
  2681. * remaining non empty for incomparably more time than
  2682. * non-merged queues. This may accentuate workload
  2683. * asymmetries. For example, if one of the queues in a set of
  2684. * merged queues has a higher weight than a normal queue, then
  2685. * the shared queue may inherit such a high weight and, by
  2686. * staying almost always active, may force BFQ to perform I/O
  2687. * plugging most of the time. This evidently makes it harder
  2688. * for BFQ to let the device reach a high throughput.
  2689. *
  2690. * Finally, the likely() macro below is not used because one
  2691. * of the two branches is more likely than the other, but to
  2692. * have the code path after the following if() executed as
  2693. * fast as possible for the case of a non rotational device
  2694. * with queueing. We want it because this is the fastest kind
  2695. * of device. On the opposite end, the likely() may lengthen
  2696. * the execution time of BFQ for the case of slower devices
  2697. * (rotational or at least without queueing). But in this case
  2698. * the execution time of BFQ matters very little, if not at
  2699. * all.
  2700. */
  2701. if (likely(bfqd->nonrot_with_queueing))
  2702. return NULL;
  2703. /*
  2704. * Prevent bfqq from being merged if it has been created too
  2705. * long ago. The idea is that true cooperating processes, and
  2706. * thus their associated bfq_queues, are supposed to be
  2707. * created shortly after each other. This is the case, e.g.,
  2708. * for KVM/QEMU and dump I/O threads. Basing on this
  2709. * assumption, the following filtering greatly reduces the
  2710. * probability that two non-cooperating processes, which just
  2711. * happen to do close I/O for some short time interval, have
  2712. * their queues merged by mistake.
  2713. */
  2714. if (bfq_too_late_for_merging(bfqq))
  2715. return NULL;
  2716. if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
  2717. return NULL;
  2718. /* If there is only one backlogged queue, don't search. */
  2719. if (bfq_tot_busy_queues(bfqd) == 1)
  2720. return NULL;
  2721. in_service_bfqq = bfqd->in_service_queue;
  2722. if (in_service_bfqq && in_service_bfqq != bfqq &&
  2723. likely(in_service_bfqq != &bfqd->oom_bfqq) &&
  2724. bfq_rq_close_to_sector(io_struct, request,
  2725. bfqd->in_serv_last_pos) &&
  2726. bfqq->entity.parent == in_service_bfqq->entity.parent &&
  2727. bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
  2728. new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
  2729. if (new_bfqq)
  2730. return new_bfqq;
  2731. }
  2732. /*
  2733. * Check whether there is a cooperator among currently scheduled
  2734. * queues. The only thing we need is that the bio/request is not
  2735. * NULL, as we need it to establish whether a cooperator exists.
  2736. */
  2737. new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
  2738. bfq_io_struct_pos(io_struct, request));
  2739. if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
  2740. bfq_may_be_close_cooperator(bfqq, new_bfqq))
  2741. return bfq_setup_merge(bfqq, new_bfqq);
  2742. return NULL;
  2743. }
  2744. static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
  2745. {
  2746. struct bfq_io_cq *bic = bfqq->bic;
  2747. /*
  2748. * If !bfqq->bic, the queue is already shared or its requests
  2749. * have already been redirected to a shared queue; both idle window
  2750. * and weight raising state have already been saved. Do nothing.
  2751. */
  2752. if (!bic)
  2753. return;
  2754. bic->saved_last_serv_time_ns = bfqq->last_serv_time_ns;
  2755. bic->saved_inject_limit = bfqq->inject_limit;
  2756. bic->saved_decrease_time_jif = bfqq->decrease_time_jif;
  2757. bic->saved_weight = bfqq->entity.orig_weight;
  2758. bic->saved_ttime = bfqq->ttime;
  2759. bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
  2760. bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
  2761. bic->saved_io_start_time = bfqq->io_start_time;
  2762. bic->saved_tot_idle_time = bfqq->tot_idle_time;
  2763. bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
  2764. bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
  2765. if (unlikely(bfq_bfqq_just_created(bfqq) &&
  2766. !bfq_bfqq_in_large_burst(bfqq) &&
  2767. bfqq->bfqd->low_latency)) {
  2768. /*
  2769. * bfqq being merged right after being created: bfqq
  2770. * would have deserved interactive weight raising, but
  2771. * did not make it to be set in a weight-raised state,
  2772. * because of this early merge. Store directly the
  2773. * weight-raising state that would have been assigned
  2774. * to bfqq, so that to avoid that bfqq unjustly fails
  2775. * to enjoy weight raising if split soon.
  2776. */
  2777. bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff;
  2778. bic->saved_wr_start_at_switch_to_srt = bfq_smallest_from_now();
  2779. bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd);
  2780. bic->saved_last_wr_start_finish = jiffies;
  2781. } else {
  2782. bic->saved_wr_coeff = bfqq->wr_coeff;
  2783. bic->saved_wr_start_at_switch_to_srt =
  2784. bfqq->wr_start_at_switch_to_srt;
  2785. bic->saved_service_from_wr = bfqq->service_from_wr;
  2786. bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
  2787. bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
  2788. }
  2789. }
  2790. static void
  2791. bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, struct bfq_queue *new_bfqq)
  2792. {
  2793. if (cur_bfqq->entity.parent &&
  2794. cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq)
  2795. cur_bfqq->entity.parent->last_bfqq_created = new_bfqq;
  2796. else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq)
  2797. cur_bfqq->bfqd->last_bfqq_created = new_bfqq;
  2798. }
  2799. void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  2800. {
  2801. /*
  2802. * To prevent bfqq's service guarantees from being violated,
  2803. * bfqq may be left busy, i.e., queued for service, even if
  2804. * empty (see comments in __bfq_bfqq_expire() for
  2805. * details). But, if no process will send requests to bfqq any
  2806. * longer, then there is no point in keeping bfqq queued for
  2807. * service. In addition, keeping bfqq queued for service, but
  2808. * with no process ref any longer, may have caused bfqq to be
  2809. * freed when dequeued from service. But this is assumed to
  2810. * never happen.
  2811. */
  2812. if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
  2813. bfqq != bfqd->in_service_queue)
  2814. bfq_del_bfqq_busy(bfqq, false);
  2815. bfq_reassign_last_bfqq(bfqq, NULL);
  2816. bfq_put_queue(bfqq);
  2817. }
  2818. static void
  2819. bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
  2820. struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
  2821. {
  2822. bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
  2823. (unsigned long)new_bfqq->pid);
  2824. /* Save weight raising and idle window of the merged queues */
  2825. bfq_bfqq_save_state(bfqq);
  2826. bfq_bfqq_save_state(new_bfqq);
  2827. if (bfq_bfqq_IO_bound(bfqq))
  2828. bfq_mark_bfqq_IO_bound(new_bfqq);
  2829. bfq_clear_bfqq_IO_bound(bfqq);
  2830. /*
  2831. * The processes associated with bfqq are cooperators of the
  2832. * processes associated with new_bfqq. So, if bfqq has a
  2833. * waker, then assume that all these processes will be happy
  2834. * to let bfqq's waker freely inject I/O when they have no
  2835. * I/O.
  2836. */
  2837. if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq &&
  2838. bfqq->waker_bfqq != new_bfqq) {
  2839. new_bfqq->waker_bfqq = bfqq->waker_bfqq;
  2840. new_bfqq->tentative_waker_bfqq = NULL;
  2841. /*
  2842. * If the waker queue disappears, then
  2843. * new_bfqq->waker_bfqq must be reset. So insert
  2844. * new_bfqq into the woken_list of the waker. See
  2845. * bfq_check_waker for details.
  2846. */
  2847. hlist_add_head(&new_bfqq->woken_list_node,
  2848. &new_bfqq->waker_bfqq->woken_list);
  2849. }
  2850. /*
  2851. * If bfqq is weight-raised, then let new_bfqq inherit
  2852. * weight-raising. To reduce false positives, neglect the case
  2853. * where bfqq has just been created, but has not yet made it
  2854. * to be weight-raised (which may happen because EQM may merge
  2855. * bfqq even before bfq_add_request is executed for the first
  2856. * time for bfqq). Handling this case would however be very
  2857. * easy, thanks to the flag just_created.
  2858. */
  2859. if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
  2860. new_bfqq->wr_coeff = bfqq->wr_coeff;
  2861. new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
  2862. new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
  2863. new_bfqq->wr_start_at_switch_to_srt =
  2864. bfqq->wr_start_at_switch_to_srt;
  2865. if (bfq_bfqq_busy(new_bfqq))
  2866. bfqd->wr_busy_queues++;
  2867. new_bfqq->entity.prio_changed = 1;
  2868. }
  2869. if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
  2870. bfqq->wr_coeff = 1;
  2871. bfqq->entity.prio_changed = 1;
  2872. if (bfq_bfqq_busy(bfqq))
  2873. bfqd->wr_busy_queues--;
  2874. }
  2875. bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
  2876. bfqd->wr_busy_queues);
  2877. /*
  2878. * Merge queues (that is, let bic redirect its requests to new_bfqq)
  2879. */
  2880. bic_set_bfqq(bic, new_bfqq, true);
  2881. bfq_mark_bfqq_coop(new_bfqq);
  2882. /*
  2883. * new_bfqq now belongs to at least two bics (it is a shared queue):
  2884. * set new_bfqq->bic to NULL. bfqq either:
  2885. * - does not belong to any bic any more, and hence bfqq->bic must
  2886. * be set to NULL, or
  2887. * - is a queue whose owning bics have already been redirected to a
  2888. * different queue, hence the queue is destined to not belong to
  2889. * any bic soon and bfqq->bic is already NULL (therefore the next
  2890. * assignment causes no harm).
  2891. */
  2892. new_bfqq->bic = NULL;
  2893. /*
  2894. * If the queue is shared, the pid is the pid of one of the associated
  2895. * processes. Which pid depends on the exact sequence of merge events
  2896. * the queue underwent. So printing such a pid is useless and confusing
  2897. * because it reports a random pid between those of the associated
  2898. * processes.
  2899. * We mark such a queue with a pid -1, and then print SHARED instead of
  2900. * a pid in logging messages.
  2901. */
  2902. new_bfqq->pid = -1;
  2903. bfqq->bic = NULL;
  2904. bfq_reassign_last_bfqq(bfqq, new_bfqq);
  2905. bfq_release_process_ref(bfqd, bfqq);
  2906. }
  2907. static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
  2908. struct bio *bio)
  2909. {
  2910. struct bfq_data *bfqd = q->elevator->elevator_data;
  2911. bool is_sync = op_is_sync(bio->bi_opf);
  2912. struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
  2913. /*
  2914. * Disallow merge of a sync bio into an async request.
  2915. */
  2916. if (is_sync && !rq_is_sync(rq))
  2917. return false;
  2918. /*
  2919. * Lookup the bfqq that this bio will be queued with. Allow
  2920. * merge only if rq is queued there.
  2921. */
  2922. if (!bfqq)
  2923. return false;
  2924. /*
  2925. * We take advantage of this function to perform an early merge
  2926. * of the queues of possible cooperating processes.
  2927. */
  2928. new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false, bfqd->bio_bic);
  2929. if (new_bfqq) {
  2930. /*
  2931. * bic still points to bfqq, then it has not yet been
  2932. * redirected to some other bfq_queue, and a queue
  2933. * merge between bfqq and new_bfqq can be safely
  2934. * fulfilled, i.e., bic can be redirected to new_bfqq
  2935. * and bfqq can be put.
  2936. */
  2937. bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
  2938. new_bfqq);
  2939. /*
  2940. * If we get here, bio will be queued into new_queue,
  2941. * so use new_bfqq to decide whether bio and rq can be
  2942. * merged.
  2943. */
  2944. bfqq = new_bfqq;
  2945. /*
  2946. * Change also bqfd->bio_bfqq, as
  2947. * bfqd->bio_bic now points to new_bfqq, and
  2948. * this function may be invoked again (and then may
  2949. * use again bqfd->bio_bfqq).
  2950. */
  2951. bfqd->bio_bfqq = bfqq;
  2952. }
  2953. return bfqq == RQ_BFQQ(rq);
  2954. }
  2955. /*
  2956. * Set the maximum time for the in-service queue to consume its
  2957. * budget. This prevents seeky processes from lowering the throughput.
  2958. * In practice, a time-slice service scheme is used with seeky
  2959. * processes.
  2960. */
  2961. static void bfq_set_budget_timeout(struct bfq_data *bfqd,
  2962. struct bfq_queue *bfqq)
  2963. {
  2964. unsigned int timeout_coeff;
  2965. if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
  2966. timeout_coeff = 1;
  2967. else
  2968. timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
  2969. bfqd->last_budget_start = ktime_get();
  2970. bfqq->budget_timeout = jiffies +
  2971. bfqd->bfq_timeout * timeout_coeff;
  2972. }
  2973. static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
  2974. struct bfq_queue *bfqq)
  2975. {
  2976. if (bfqq) {
  2977. bfq_clear_bfqq_fifo_expire(bfqq);
  2978. bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
  2979. if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
  2980. bfqq->wr_coeff > 1 &&
  2981. bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  2982. time_is_before_jiffies(bfqq->budget_timeout)) {
  2983. /*
  2984. * For soft real-time queues, move the start
  2985. * of the weight-raising period forward by the
  2986. * time the queue has not received any
  2987. * service. Otherwise, a relatively long
  2988. * service delay is likely to cause the
  2989. * weight-raising period of the queue to end,
  2990. * because of the short duration of the
  2991. * weight-raising period of a soft real-time
  2992. * queue. It is worth noting that this move
  2993. * is not so dangerous for the other queues,
  2994. * because soft real-time queues are not
  2995. * greedy.
  2996. *
  2997. * To not add a further variable, we use the
  2998. * overloaded field budget_timeout to
  2999. * determine for how long the queue has not
  3000. * received service, i.e., how much time has
  3001. * elapsed since the queue expired. However,
  3002. * this is a little imprecise, because
  3003. * budget_timeout is set to jiffies if bfqq
  3004. * not only expires, but also remains with no
  3005. * request.
  3006. */
  3007. if (time_after(bfqq->budget_timeout,
  3008. bfqq->last_wr_start_finish))
  3009. bfqq->last_wr_start_finish +=
  3010. jiffies - bfqq->budget_timeout;
  3011. else
  3012. bfqq->last_wr_start_finish = jiffies;
  3013. }
  3014. bfq_set_budget_timeout(bfqd, bfqq);
  3015. bfq_log_bfqq(bfqd, bfqq,
  3016. "set_in_service_queue, cur-budget = %d",
  3017. bfqq->entity.budget);
  3018. }
  3019. bfqd->in_service_queue = bfqq;
  3020. bfqd->in_serv_last_pos = 0;
  3021. }
  3022. /*
  3023. * Get and set a new queue for service.
  3024. */
  3025. static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
  3026. {
  3027. struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
  3028. __bfq_set_in_service_queue(bfqd, bfqq);
  3029. return bfqq;
  3030. }
  3031. static void bfq_arm_slice_timer(struct bfq_data *bfqd)
  3032. {
  3033. struct bfq_queue *bfqq = bfqd->in_service_queue;
  3034. u32 sl;
  3035. bfq_mark_bfqq_wait_request(bfqq);
  3036. /*
  3037. * We don't want to idle for seeks, but we do want to allow
  3038. * fair distribution of slice time for a process doing back-to-back
  3039. * seeks. So allow a little bit of time for him to submit a new rq.
  3040. */
  3041. sl = bfqd->bfq_slice_idle;
  3042. /*
  3043. * Unless the queue is being weight-raised or the scenario is
  3044. * asymmetric, grant only minimum idle time if the queue
  3045. * is seeky. A long idling is preserved for a weight-raised
  3046. * queue, or, more in general, in an asymmetric scenario,
  3047. * because a long idling is needed for guaranteeing to a queue
  3048. * its reserved share of the throughput (in particular, it is
  3049. * needed if the queue has a higher weight than some other
  3050. * queue).
  3051. */
  3052. if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
  3053. !bfq_asymmetric_scenario(bfqd, bfqq))
  3054. sl = min_t(u64, sl, BFQ_MIN_TT);
  3055. else if (bfqq->wr_coeff > 1)
  3056. sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
  3057. bfqd->last_idling_start = ktime_get();
  3058. bfqd->last_idling_start_jiffies = jiffies;
  3059. hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
  3060. HRTIMER_MODE_REL);
  3061. bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
  3062. }
  3063. /*
  3064. * In autotuning mode, max_budget is dynamically recomputed as the
  3065. * amount of sectors transferred in timeout at the estimated peak
  3066. * rate. This enables BFQ to utilize a full timeslice with a full
  3067. * budget, even if the in-service queue is served at peak rate. And
  3068. * this maximises throughput with sequential workloads.
  3069. */
  3070. static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
  3071. {
  3072. return (u64)bfqd->peak_rate * USEC_PER_MSEC *
  3073. jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
  3074. }
  3075. /*
  3076. * Update parameters related to throughput and responsiveness, as a
  3077. * function of the estimated peak rate. See comments on
  3078. * bfq_calc_max_budget(), and on the ref_wr_duration array.
  3079. */
  3080. static void update_thr_responsiveness_params(struct bfq_data *bfqd)
  3081. {
  3082. if (bfqd->bfq_user_max_budget == 0) {
  3083. bfqd->bfq_max_budget =
  3084. bfq_calc_max_budget(bfqd);
  3085. bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget);
  3086. }
  3087. }
  3088. static void bfq_reset_rate_computation(struct bfq_data *bfqd,
  3089. struct request *rq)
  3090. {
  3091. if (rq != NULL) { /* new rq dispatch now, reset accordingly */
  3092. bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
  3093. bfqd->peak_rate_samples = 1;
  3094. bfqd->sequential_samples = 0;
  3095. bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
  3096. blk_rq_sectors(rq);
  3097. } else /* no new rq dispatched, just reset the number of samples */
  3098. bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
  3099. bfq_log(bfqd,
  3100. "reset_rate_computation at end, sample %u/%u tot_sects %llu",
  3101. bfqd->peak_rate_samples, bfqd->sequential_samples,
  3102. bfqd->tot_sectors_dispatched);
  3103. }
  3104. static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
  3105. {
  3106. u32 rate, weight, divisor;
  3107. /*
  3108. * For the convergence property to hold (see comments on
  3109. * bfq_update_peak_rate()) and for the assessment to be
  3110. * reliable, a minimum number of samples must be present, and
  3111. * a minimum amount of time must have elapsed. If not so, do
  3112. * not compute new rate. Just reset parameters, to get ready
  3113. * for a new evaluation attempt.
  3114. */
  3115. if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
  3116. bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
  3117. goto reset_computation;
  3118. /*
  3119. * If a new request completion has occurred after last
  3120. * dispatch, then, to approximate the rate at which requests
  3121. * have been served by the device, it is more precise to
  3122. * extend the observation interval to the last completion.
  3123. */
  3124. bfqd->delta_from_first =
  3125. max_t(u64, bfqd->delta_from_first,
  3126. bfqd->last_completion - bfqd->first_dispatch);
  3127. /*
  3128. * Rate computed in sects/usec, and not sects/nsec, for
  3129. * precision issues.
  3130. */
  3131. rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
  3132. div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
  3133. /*
  3134. * Peak rate not updated if:
  3135. * - the percentage of sequential dispatches is below 3/4 of the
  3136. * total, and rate is below the current estimated peak rate
  3137. * - rate is unreasonably high (> 20M sectors/sec)
  3138. */
  3139. if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
  3140. rate <= bfqd->peak_rate) ||
  3141. rate > 20<<BFQ_RATE_SHIFT)
  3142. goto reset_computation;
  3143. /*
  3144. * We have to update the peak rate, at last! To this purpose,
  3145. * we use a low-pass filter. We compute the smoothing constant
  3146. * of the filter as a function of the 'weight' of the new
  3147. * measured rate.
  3148. *
  3149. * As can be seen in next formulas, we define this weight as a
  3150. * quantity proportional to how sequential the workload is,
  3151. * and to how long the observation time interval is.
  3152. *
  3153. * The weight runs from 0 to 8. The maximum value of the
  3154. * weight, 8, yields the minimum value for the smoothing
  3155. * constant. At this minimum value for the smoothing constant,
  3156. * the measured rate contributes for half of the next value of
  3157. * the estimated peak rate.
  3158. *
  3159. * So, the first step is to compute the weight as a function
  3160. * of how sequential the workload is. Note that the weight
  3161. * cannot reach 9, because bfqd->sequential_samples cannot
  3162. * become equal to bfqd->peak_rate_samples, which, in its
  3163. * turn, holds true because bfqd->sequential_samples is not
  3164. * incremented for the first sample.
  3165. */
  3166. weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
  3167. /*
  3168. * Second step: further refine the weight as a function of the
  3169. * duration of the observation interval.
  3170. */
  3171. weight = min_t(u32, 8,
  3172. div_u64(weight * bfqd->delta_from_first,
  3173. BFQ_RATE_REF_INTERVAL));
  3174. /*
  3175. * Divisor ranging from 10, for minimum weight, to 2, for
  3176. * maximum weight.
  3177. */
  3178. divisor = 10 - weight;
  3179. /*
  3180. * Finally, update peak rate:
  3181. *
  3182. * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
  3183. */
  3184. bfqd->peak_rate *= divisor-1;
  3185. bfqd->peak_rate /= divisor;
  3186. rate /= divisor; /* smoothing constant alpha = 1/divisor */
  3187. bfqd->peak_rate += rate;
  3188. /*
  3189. * For a very slow device, bfqd->peak_rate can reach 0 (see
  3190. * the minimum representable values reported in the comments
  3191. * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid
  3192. * divisions by zero where bfqd->peak_rate is used as a
  3193. * divisor.
  3194. */
  3195. bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate);
  3196. update_thr_responsiveness_params(bfqd);
  3197. reset_computation:
  3198. bfq_reset_rate_computation(bfqd, rq);
  3199. }
  3200. /*
  3201. * Update the read/write peak rate (the main quantity used for
  3202. * auto-tuning, see update_thr_responsiveness_params()).
  3203. *
  3204. * It is not trivial to estimate the peak rate (correctly): because of
  3205. * the presence of sw and hw queues between the scheduler and the
  3206. * device components that finally serve I/O requests, it is hard to
  3207. * say exactly when a given dispatched request is served inside the
  3208. * device, and for how long. As a consequence, it is hard to know
  3209. * precisely at what rate a given set of requests is actually served
  3210. * by the device.
  3211. *
  3212. * On the opposite end, the dispatch time of any request is trivially
  3213. * available, and, from this piece of information, the "dispatch rate"
  3214. * of requests can be immediately computed. So, the idea in the next
  3215. * function is to use what is known, namely request dispatch times
  3216. * (plus, when useful, request completion times), to estimate what is
  3217. * unknown, namely in-device request service rate.
  3218. *
  3219. * The main issue is that, because of the above facts, the rate at
  3220. * which a certain set of requests is dispatched over a certain time
  3221. * interval can vary greatly with respect to the rate at which the
  3222. * same requests are then served. But, since the size of any
  3223. * intermediate queue is limited, and the service scheme is lossless
  3224. * (no request is silently dropped), the following obvious convergence
  3225. * property holds: the number of requests dispatched MUST become
  3226. * closer and closer to the number of requests completed as the
  3227. * observation interval grows. This is the key property used in
  3228. * the next function to estimate the peak service rate as a function
  3229. * of the observed dispatch rate. The function assumes to be invoked
  3230. * on every request dispatch.
  3231. */
  3232. static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
  3233. {
  3234. u64 now_ns = ktime_get_ns();
  3235. if (bfqd->peak_rate_samples == 0) { /* first dispatch */
  3236. bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
  3237. bfqd->peak_rate_samples);
  3238. bfq_reset_rate_computation(bfqd, rq);
  3239. goto update_last_values; /* will add one sample */
  3240. }
  3241. /*
  3242. * Device idle for very long: the observation interval lasting
  3243. * up to this dispatch cannot be a valid observation interval
  3244. * for computing a new peak rate (similarly to the late-
  3245. * completion event in bfq_completed_request()). Go to
  3246. * update_rate_and_reset to have the following three steps
  3247. * taken:
  3248. * - close the observation interval at the last (previous)
  3249. * request dispatch or completion
  3250. * - compute rate, if possible, for that observation interval
  3251. * - start a new observation interval with this dispatch
  3252. */
  3253. if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
  3254. bfqd->rq_in_driver == 0)
  3255. goto update_rate_and_reset;
  3256. /* Update sampling information */
  3257. bfqd->peak_rate_samples++;
  3258. if ((bfqd->rq_in_driver > 0 ||
  3259. now_ns - bfqd->last_completion < BFQ_MIN_TT)
  3260. && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
  3261. bfqd->sequential_samples++;
  3262. bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
  3263. /* Reset max observed rq size every 32 dispatches */
  3264. if (likely(bfqd->peak_rate_samples % 32))
  3265. bfqd->last_rq_max_size =
  3266. max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
  3267. else
  3268. bfqd->last_rq_max_size = blk_rq_sectors(rq);
  3269. bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
  3270. /* Target observation interval not yet reached, go on sampling */
  3271. if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
  3272. goto update_last_values;
  3273. update_rate_and_reset:
  3274. bfq_update_rate_reset(bfqd, rq);
  3275. update_last_values:
  3276. bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  3277. if (RQ_BFQQ(rq) == bfqd->in_service_queue)
  3278. bfqd->in_serv_last_pos = bfqd->last_position;
  3279. bfqd->last_dispatch = now_ns;
  3280. }
  3281. /*
  3282. * Remove request from internal lists.
  3283. */
  3284. static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
  3285. {
  3286. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  3287. /*
  3288. * For consistency, the next instruction should have been
  3289. * executed after removing the request from the queue and
  3290. * dispatching it. We execute instead this instruction before
  3291. * bfq_remove_request() (and hence introduce a temporary
  3292. * inconsistency), for efficiency. In fact, should this
  3293. * dispatch occur for a non in-service bfqq, this anticipated
  3294. * increment prevents two counters related to bfqq->dispatched
  3295. * from risking to be, first, uselessly decremented, and then
  3296. * incremented again when the (new) value of bfqq->dispatched
  3297. * happens to be taken into account.
  3298. */
  3299. bfqq->dispatched++;
  3300. bfq_update_peak_rate(q->elevator->elevator_data, rq);
  3301. bfq_remove_request(q, rq);
  3302. }
  3303. /*
  3304. * There is a case where idling does not have to be performed for
  3305. * throughput concerns, but to preserve the throughput share of
  3306. * the process associated with bfqq.
  3307. *
  3308. * To introduce this case, we can note that allowing the drive
  3309. * to enqueue more than one request at a time, and hence
  3310. * delegating de facto final scheduling decisions to the
  3311. * drive's internal scheduler, entails loss of control on the
  3312. * actual request service order. In particular, the critical
  3313. * situation is when requests from different processes happen
  3314. * to be present, at the same time, in the internal queue(s)
  3315. * of the drive. In such a situation, the drive, by deciding
  3316. * the service order of the internally-queued requests, does
  3317. * determine also the actual throughput distribution among
  3318. * these processes. But the drive typically has no notion or
  3319. * concern about per-process throughput distribution, and
  3320. * makes its decisions only on a per-request basis. Therefore,
  3321. * the service distribution enforced by the drive's internal
  3322. * scheduler is likely to coincide with the desired throughput
  3323. * distribution only in a completely symmetric, or favorably
  3324. * skewed scenario where:
  3325. * (i-a) each of these processes must get the same throughput as
  3326. * the others,
  3327. * (i-b) in case (i-a) does not hold, it holds that the process
  3328. * associated with bfqq must receive a lower or equal
  3329. * throughput than any of the other processes;
  3330. * (ii) the I/O of each process has the same properties, in
  3331. * terms of locality (sequential or random), direction
  3332. * (reads or writes), request sizes, greediness
  3333. * (from I/O-bound to sporadic), and so on;
  3334. * In fact, in such a scenario, the drive tends to treat the requests
  3335. * of each process in about the same way as the requests of the
  3336. * others, and thus to provide each of these processes with about the
  3337. * same throughput. This is exactly the desired throughput
  3338. * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
  3339. * even more convenient distribution for (the process associated with)
  3340. * bfqq.
  3341. *
  3342. * In contrast, in any asymmetric or unfavorable scenario, device
  3343. * idling (I/O-dispatch plugging) is certainly needed to guarantee
  3344. * that bfqq receives its assigned fraction of the device throughput
  3345. * (see [1] for details).
  3346. *
  3347. * The problem is that idling may significantly reduce throughput with
  3348. * certain combinations of types of I/O and devices. An important
  3349. * example is sync random I/O on flash storage with command
  3350. * queueing. So, unless bfqq falls in cases where idling also boosts
  3351. * throughput, it is important to check conditions (i-a), i(-b) and
  3352. * (ii) accurately, so as to avoid idling when not strictly needed for
  3353. * service guarantees.
  3354. *
  3355. * Unfortunately, it is extremely difficult to thoroughly check
  3356. * condition (ii). And, in case there are active groups, it becomes
  3357. * very difficult to check conditions (i-a) and (i-b) too. In fact,
  3358. * if there are active groups, then, for conditions (i-a) or (i-b) to
  3359. * become false 'indirectly', it is enough that an active group
  3360. * contains more active processes or sub-groups than some other active
  3361. * group. More precisely, for conditions (i-a) or (i-b) to become
  3362. * false because of such a group, it is not even necessary that the
  3363. * group is (still) active: it is sufficient that, even if the group
  3364. * has become inactive, some of its descendant processes still have
  3365. * some request already dispatched but still waiting for
  3366. * completion. In fact, requests have still to be guaranteed their
  3367. * share of the throughput even after being dispatched. In this
  3368. * respect, it is easy to show that, if a group frequently becomes
  3369. * inactive while still having in-flight requests, and if, when this
  3370. * happens, the group is not considered in the calculation of whether
  3371. * the scenario is asymmetric, then the group may fail to be
  3372. * guaranteed its fair share of the throughput (basically because
  3373. * idling may not be performed for the descendant processes of the
  3374. * group, but it had to be). We address this issue with the following
  3375. * bi-modal behavior, implemented in the function
  3376. * bfq_asymmetric_scenario().
  3377. *
  3378. * If there are groups with requests waiting for completion
  3379. * (as commented above, some of these groups may even be
  3380. * already inactive), then the scenario is tagged as
  3381. * asymmetric, conservatively, without checking any of the
  3382. * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
  3383. * This behavior matches also the fact that groups are created
  3384. * exactly if controlling I/O is a primary concern (to
  3385. * preserve bandwidth and latency guarantees).
  3386. *
  3387. * On the opposite end, if there are no groups with requests waiting
  3388. * for completion, then only conditions (i-a) and (i-b) are actually
  3389. * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
  3390. * idling is not performed, regardless of whether condition (ii)
  3391. * holds. In other words, only if conditions (i-a) and (i-b) do not
  3392. * hold, then idling is allowed, and the device tends to be prevented
  3393. * from queueing many requests, possibly of several processes. Since
  3394. * there are no groups with requests waiting for completion, then, to
  3395. * control conditions (i-a) and (i-b) it is enough to check just
  3396. * whether all the queues with requests waiting for completion also
  3397. * have the same weight.
  3398. *
  3399. * Not checking condition (ii) evidently exposes bfqq to the
  3400. * risk of getting less throughput than its fair share.
  3401. * However, for queues with the same weight, a further
  3402. * mechanism, preemption, mitigates or even eliminates this
  3403. * problem. And it does so without consequences on overall
  3404. * throughput. This mechanism and its benefits are explained
  3405. * in the next three paragraphs.
  3406. *
  3407. * Even if a queue, say Q, is expired when it remains idle, Q
  3408. * can still preempt the new in-service queue if the next
  3409. * request of Q arrives soon (see the comments on
  3410. * bfq_bfqq_update_budg_for_activation). If all queues and
  3411. * groups have the same weight, this form of preemption,
  3412. * combined with the hole-recovery heuristic described in the
  3413. * comments on function bfq_bfqq_update_budg_for_activation,
  3414. * are enough to preserve a correct bandwidth distribution in
  3415. * the mid term, even without idling. In fact, even if not
  3416. * idling allows the internal queues of the device to contain
  3417. * many requests, and thus to reorder requests, we can rather
  3418. * safely assume that the internal scheduler still preserves a
  3419. * minimum of mid-term fairness.
  3420. *
  3421. * More precisely, this preemption-based, idleless approach
  3422. * provides fairness in terms of IOPS, and not sectors per
  3423. * second. This can be seen with a simple example. Suppose
  3424. * that there are two queues with the same weight, but that
  3425. * the first queue receives requests of 8 sectors, while the
  3426. * second queue receives requests of 1024 sectors. In
  3427. * addition, suppose that each of the two queues contains at
  3428. * most one request at a time, which implies that each queue
  3429. * always remains idle after it is served. Finally, after
  3430. * remaining idle, each queue receives very quickly a new
  3431. * request. It follows that the two queues are served
  3432. * alternatively, preempting each other if needed. This
  3433. * implies that, although both queues have the same weight,
  3434. * the queue with large requests receives a service that is
  3435. * 1024/8 times as high as the service received by the other
  3436. * queue.
  3437. *
  3438. * The motivation for using preemption instead of idling (for
  3439. * queues with the same weight) is that, by not idling,
  3440. * service guarantees are preserved (completely or at least in
  3441. * part) without minimally sacrificing throughput. And, if
  3442. * there is no active group, then the primary expectation for
  3443. * this device is probably a high throughput.
  3444. *
  3445. * We are now left only with explaining the two sub-conditions in the
  3446. * additional compound condition that is checked below for deciding
  3447. * whether the scenario is asymmetric. To explain the first
  3448. * sub-condition, we need to add that the function
  3449. * bfq_asymmetric_scenario checks the weights of only
  3450. * non-weight-raised queues, for efficiency reasons (see comments on
  3451. * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
  3452. * is checked explicitly here. More precisely, the compound condition
  3453. * below takes into account also the fact that, even if bfqq is being
  3454. * weight-raised, the scenario is still symmetric if all queues with
  3455. * requests waiting for completion happen to be
  3456. * weight-raised. Actually, we should be even more precise here, and
  3457. * differentiate between interactive weight raising and soft real-time
  3458. * weight raising.
  3459. *
  3460. * The second sub-condition checked in the compound condition is
  3461. * whether there is a fair amount of already in-flight I/O not
  3462. * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
  3463. * following reason. The drive may decide to serve in-flight
  3464. * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
  3465. * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
  3466. * I/O-dispatching is not plugged, then, while bfqq remains empty, a
  3467. * basically uncontrolled amount of I/O from other queues may be
  3468. * dispatched too, possibly causing the service of bfqq's I/O to be
  3469. * delayed even longer in the drive. This problem gets more and more
  3470. * serious as the speed and the queue depth of the drive grow,
  3471. * because, as these two quantities grow, the probability to find no
  3472. * queue busy but many requests in flight grows too. By contrast,
  3473. * plugging I/O dispatching minimizes the delay induced by already
  3474. * in-flight I/O, and enables bfqq to recover the bandwidth it may
  3475. * lose because of this delay.
  3476. *
  3477. * As a side note, it is worth considering that the above
  3478. * device-idling countermeasures may however fail in the following
  3479. * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
  3480. * in a time period during which all symmetry sub-conditions hold, and
  3481. * therefore the device is allowed to enqueue many requests, but at
  3482. * some later point in time some sub-condition stops to hold, then it
  3483. * may become impossible to make requests be served in the desired
  3484. * order until all the requests already queued in the device have been
  3485. * served. The last sub-condition commented above somewhat mitigates
  3486. * this problem for weight-raised queues.
  3487. *
  3488. * However, as an additional mitigation for this problem, we preserve
  3489. * plugging for a special symmetric case that may suddenly turn into
  3490. * asymmetric: the case where only bfqq is busy. In this case, not
  3491. * expiring bfqq does not cause any harm to any other queues in terms
  3492. * of service guarantees. In contrast, it avoids the following unlucky
  3493. * sequence of events: (1) bfqq is expired, (2) a new queue with a
  3494. * lower weight than bfqq becomes busy (or more queues), (3) the new
  3495. * queue is served until a new request arrives for bfqq, (4) when bfqq
  3496. * is finally served, there are so many requests of the new queue in
  3497. * the drive that the pending requests for bfqq take a lot of time to
  3498. * be served. In particular, event (2) may case even already
  3499. * dispatched requests of bfqq to be delayed, inside the drive. So, to
  3500. * avoid this series of events, the scenario is preventively declared
  3501. * as asymmetric also if bfqq is the only busy queues
  3502. */
  3503. static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
  3504. struct bfq_queue *bfqq)
  3505. {
  3506. int tot_busy_queues = bfq_tot_busy_queues(bfqd);
  3507. /* No point in idling for bfqq if it won't get requests any longer */
  3508. if (unlikely(!bfqq_process_refs(bfqq)))
  3509. return false;
  3510. return (bfqq->wr_coeff > 1 &&
  3511. (bfqd->wr_busy_queues <
  3512. tot_busy_queues ||
  3513. bfqd->rq_in_driver >=
  3514. bfqq->dispatched + 4)) ||
  3515. bfq_asymmetric_scenario(bfqd, bfqq) ||
  3516. tot_busy_queues == 1;
  3517. }
  3518. static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3519. enum bfqq_expiration reason)
  3520. {
  3521. /*
  3522. * If this bfqq is shared between multiple processes, check
  3523. * to make sure that those processes are still issuing I/Os
  3524. * within the mean seek distance. If not, it may be time to
  3525. * break the queues apart again.
  3526. */
  3527. if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
  3528. bfq_mark_bfqq_split_coop(bfqq);
  3529. /*
  3530. * Consider queues with a higher finish virtual time than
  3531. * bfqq. If idling_needed_for_service_guarantees(bfqq) returns
  3532. * true, then bfqq's bandwidth would be violated if an
  3533. * uncontrolled amount of I/O from these queues were
  3534. * dispatched while bfqq is waiting for its new I/O to
  3535. * arrive. This is exactly what may happen if this is a forced
  3536. * expiration caused by a preemption attempt, and if bfqq is
  3537. * not re-scheduled. To prevent this from happening, re-queue
  3538. * bfqq if it needs I/O-dispatch plugging, even if it is
  3539. * empty. By doing so, bfqq is granted to be served before the
  3540. * above queues (provided that bfqq is of course eligible).
  3541. */
  3542. if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
  3543. !(reason == BFQQE_PREEMPTED &&
  3544. idling_needed_for_service_guarantees(bfqd, bfqq))) {
  3545. if (bfqq->dispatched == 0)
  3546. /*
  3547. * Overloading budget_timeout field to store
  3548. * the time at which the queue remains with no
  3549. * backlog and no outstanding request; used by
  3550. * the weight-raising mechanism.
  3551. */
  3552. bfqq->budget_timeout = jiffies;
  3553. bfq_del_bfqq_busy(bfqq, true);
  3554. } else {
  3555. bfq_requeue_bfqq(bfqd, bfqq, true);
  3556. /*
  3557. * Resort priority tree of potential close cooperators.
  3558. * See comments on bfq_pos_tree_add_move() for the unlikely().
  3559. */
  3560. if (unlikely(!bfqd->nonrot_with_queueing &&
  3561. !RB_EMPTY_ROOT(&bfqq->sort_list)))
  3562. bfq_pos_tree_add_move(bfqd, bfqq);
  3563. }
  3564. /*
  3565. * All in-service entities must have been properly deactivated
  3566. * or requeued before executing the next function, which
  3567. * resets all in-service entities as no more in service. This
  3568. * may cause bfqq to be freed. If this happens, the next
  3569. * function returns true.
  3570. */
  3571. return __bfq_bfqd_reset_in_service(bfqd);
  3572. }
  3573. /**
  3574. * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
  3575. * @bfqd: device data.
  3576. * @bfqq: queue to update.
  3577. * @reason: reason for expiration.
  3578. *
  3579. * Handle the feedback on @bfqq budget at queue expiration.
  3580. * See the body for detailed comments.
  3581. */
  3582. static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
  3583. struct bfq_queue *bfqq,
  3584. enum bfqq_expiration reason)
  3585. {
  3586. struct request *next_rq;
  3587. int budget, min_budget;
  3588. min_budget = bfq_min_budget(bfqd);
  3589. if (bfqq->wr_coeff == 1)
  3590. budget = bfqq->max_budget;
  3591. else /*
  3592. * Use a constant, low budget for weight-raised queues,
  3593. * to help achieve a low latency. Keep it slightly higher
  3594. * than the minimum possible budget, to cause a little
  3595. * bit fewer expirations.
  3596. */
  3597. budget = 2 * min_budget;
  3598. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
  3599. bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
  3600. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
  3601. budget, bfq_min_budget(bfqd));
  3602. bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
  3603. bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
  3604. if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
  3605. switch (reason) {
  3606. /*
  3607. * Caveat: in all the following cases we trade latency
  3608. * for throughput.
  3609. */
  3610. case BFQQE_TOO_IDLE:
  3611. /*
  3612. * This is the only case where we may reduce
  3613. * the budget: if there is no request of the
  3614. * process still waiting for completion, then
  3615. * we assume (tentatively) that the timer has
  3616. * expired because the batch of requests of
  3617. * the process could have been served with a
  3618. * smaller budget. Hence, betting that
  3619. * process will behave in the same way when it
  3620. * becomes backlogged again, we reduce its
  3621. * next budget. As long as we guess right,
  3622. * this budget cut reduces the latency
  3623. * experienced by the process.
  3624. *
  3625. * However, if there are still outstanding
  3626. * requests, then the process may have not yet
  3627. * issued its next request just because it is
  3628. * still waiting for the completion of some of
  3629. * the still outstanding ones. So in this
  3630. * subcase we do not reduce its budget, on the
  3631. * contrary we increase it to possibly boost
  3632. * the throughput, as discussed in the
  3633. * comments to the BUDGET_TIMEOUT case.
  3634. */
  3635. if (bfqq->dispatched > 0) /* still outstanding reqs */
  3636. budget = min(budget * 2, bfqd->bfq_max_budget);
  3637. else {
  3638. if (budget > 5 * min_budget)
  3639. budget -= 4 * min_budget;
  3640. else
  3641. budget = min_budget;
  3642. }
  3643. break;
  3644. case BFQQE_BUDGET_TIMEOUT:
  3645. /*
  3646. * We double the budget here because it gives
  3647. * the chance to boost the throughput if this
  3648. * is not a seeky process (and has bumped into
  3649. * this timeout because of, e.g., ZBR).
  3650. */
  3651. budget = min(budget * 2, bfqd->bfq_max_budget);
  3652. break;
  3653. case BFQQE_BUDGET_EXHAUSTED:
  3654. /*
  3655. * The process still has backlog, and did not
  3656. * let either the budget timeout or the disk
  3657. * idling timeout expire. Hence it is not
  3658. * seeky, has a short thinktime and may be
  3659. * happy with a higher budget too. So
  3660. * definitely increase the budget of this good
  3661. * candidate to boost the disk throughput.
  3662. */
  3663. budget = min(budget * 4, bfqd->bfq_max_budget);
  3664. break;
  3665. case BFQQE_NO_MORE_REQUESTS:
  3666. /*
  3667. * For queues that expire for this reason, it
  3668. * is particularly important to keep the
  3669. * budget close to the actual service they
  3670. * need. Doing so reduces the timestamp
  3671. * misalignment problem described in the
  3672. * comments in the body of
  3673. * __bfq_activate_entity. In fact, suppose
  3674. * that a queue systematically expires for
  3675. * BFQQE_NO_MORE_REQUESTS and presents a
  3676. * new request in time to enjoy timestamp
  3677. * back-shifting. The larger the budget of the
  3678. * queue is with respect to the service the
  3679. * queue actually requests in each service
  3680. * slot, the more times the queue can be
  3681. * reactivated with the same virtual finish
  3682. * time. It follows that, even if this finish
  3683. * time is pushed to the system virtual time
  3684. * to reduce the consequent timestamp
  3685. * misalignment, the queue unjustly enjoys for
  3686. * many re-activations a lower finish time
  3687. * than all newly activated queues.
  3688. *
  3689. * The service needed by bfqq is measured
  3690. * quite precisely by bfqq->entity.service.
  3691. * Since bfqq does not enjoy device idling,
  3692. * bfqq->entity.service is equal to the number
  3693. * of sectors that the process associated with
  3694. * bfqq requested to read/write before waiting
  3695. * for request completions, or blocking for
  3696. * other reasons.
  3697. */
  3698. budget = max_t(int, bfqq->entity.service, min_budget);
  3699. break;
  3700. default:
  3701. return;
  3702. }
  3703. } else if (!bfq_bfqq_sync(bfqq)) {
  3704. /*
  3705. * Async queues get always the maximum possible
  3706. * budget, as for them we do not care about latency
  3707. * (in addition, their ability to dispatch is limited
  3708. * by the charging factor).
  3709. */
  3710. budget = bfqd->bfq_max_budget;
  3711. }
  3712. bfqq->max_budget = budget;
  3713. if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
  3714. !bfqd->bfq_user_max_budget)
  3715. bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
  3716. /*
  3717. * If there is still backlog, then assign a new budget, making
  3718. * sure that it is large enough for the next request. Since
  3719. * the finish time of bfqq must be kept in sync with the
  3720. * budget, be sure to call __bfq_bfqq_expire() *after* this
  3721. * update.
  3722. *
  3723. * If there is no backlog, then no need to update the budget;
  3724. * it will be updated on the arrival of a new request.
  3725. */
  3726. next_rq = bfqq->next_rq;
  3727. if (next_rq)
  3728. bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
  3729. bfq_serv_to_charge(next_rq, bfqq));
  3730. bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
  3731. next_rq ? blk_rq_sectors(next_rq) : 0,
  3732. bfqq->entity.budget);
  3733. }
  3734. /*
  3735. * Return true if the process associated with bfqq is "slow". The slow
  3736. * flag is used, in addition to the budget timeout, to reduce the
  3737. * amount of service provided to seeky processes, and thus reduce
  3738. * their chances to lower the throughput. More details in the comments
  3739. * on the function bfq_bfqq_expire().
  3740. *
  3741. * An important observation is in order: as discussed in the comments
  3742. * on the function bfq_update_peak_rate(), with devices with internal
  3743. * queues, it is hard if ever possible to know when and for how long
  3744. * an I/O request is processed by the device (apart from the trivial
  3745. * I/O pattern where a new request is dispatched only after the
  3746. * previous one has been completed). This makes it hard to evaluate
  3747. * the real rate at which the I/O requests of each bfq_queue are
  3748. * served. In fact, for an I/O scheduler like BFQ, serving a
  3749. * bfq_queue means just dispatching its requests during its service
  3750. * slot (i.e., until the budget of the queue is exhausted, or the
  3751. * queue remains idle, or, finally, a timeout fires). But, during the
  3752. * service slot of a bfq_queue, around 100 ms at most, the device may
  3753. * be even still processing requests of bfq_queues served in previous
  3754. * service slots. On the opposite end, the requests of the in-service
  3755. * bfq_queue may be completed after the service slot of the queue
  3756. * finishes.
  3757. *
  3758. * Anyway, unless more sophisticated solutions are used
  3759. * (where possible), the sum of the sizes of the requests dispatched
  3760. * during the service slot of a bfq_queue is probably the only
  3761. * approximation available for the service received by the bfq_queue
  3762. * during its service slot. And this sum is the quantity used in this
  3763. * function to evaluate the I/O speed of a process.
  3764. */
  3765. static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  3766. bool compensate, enum bfqq_expiration reason,
  3767. unsigned long *delta_ms)
  3768. {
  3769. ktime_t delta_ktime;
  3770. u32 delta_usecs;
  3771. bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
  3772. if (!bfq_bfqq_sync(bfqq))
  3773. return false;
  3774. if (compensate)
  3775. delta_ktime = bfqd->last_idling_start;
  3776. else
  3777. delta_ktime = ktime_get();
  3778. delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
  3779. delta_usecs = ktime_to_us(delta_ktime);
  3780. /* don't use too short time intervals */
  3781. if (delta_usecs < 1000) {
  3782. if (blk_queue_nonrot(bfqd->queue))
  3783. /*
  3784. * give same worst-case guarantees as idling
  3785. * for seeky
  3786. */
  3787. *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
  3788. else /* charge at least one seek */
  3789. *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
  3790. return slow;
  3791. }
  3792. *delta_ms = delta_usecs / USEC_PER_MSEC;
  3793. /*
  3794. * Use only long (> 20ms) intervals to filter out excessive
  3795. * spikes in service rate estimation.
  3796. */
  3797. if (delta_usecs > 20000) {
  3798. /*
  3799. * Caveat for rotational devices: processes doing I/O
  3800. * in the slower disk zones tend to be slow(er) even
  3801. * if not seeky. In this respect, the estimated peak
  3802. * rate is likely to be an average over the disk
  3803. * surface. Accordingly, to not be too harsh with
  3804. * unlucky processes, a process is deemed slow only if
  3805. * its rate has been lower than half of the estimated
  3806. * peak rate.
  3807. */
  3808. slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
  3809. }
  3810. bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
  3811. return slow;
  3812. }
  3813. /*
  3814. * To be deemed as soft real-time, an application must meet two
  3815. * requirements. First, the application must not require an average
  3816. * bandwidth higher than the approximate bandwidth required to playback or
  3817. * record a compressed high-definition video.
  3818. * The next function is invoked on the completion of the last request of a
  3819. * batch, to compute the next-start time instant, soft_rt_next_start, such
  3820. * that, if the next request of the application does not arrive before
  3821. * soft_rt_next_start, then the above requirement on the bandwidth is met.
  3822. *
  3823. * The second requirement is that the request pattern of the application is
  3824. * isochronous, i.e., that, after issuing a request or a batch of requests,
  3825. * the application stops issuing new requests until all its pending requests
  3826. * have been completed. After that, the application may issue a new batch,
  3827. * and so on.
  3828. * For this reason the next function is invoked to compute
  3829. * soft_rt_next_start only for applications that meet this requirement,
  3830. * whereas soft_rt_next_start is set to infinity for applications that do
  3831. * not.
  3832. *
  3833. * Unfortunately, even a greedy (i.e., I/O-bound) application may
  3834. * happen to meet, occasionally or systematically, both the above
  3835. * bandwidth and isochrony requirements. This may happen at least in
  3836. * the following circumstances. First, if the CPU load is high. The
  3837. * application may stop issuing requests while the CPUs are busy
  3838. * serving other processes, then restart, then stop again for a while,
  3839. * and so on. The other circumstances are related to the storage
  3840. * device: the storage device is highly loaded or reaches a low-enough
  3841. * throughput with the I/O of the application (e.g., because the I/O
  3842. * is random and/or the device is slow). In all these cases, the
  3843. * I/O of the application may be simply slowed down enough to meet
  3844. * the bandwidth and isochrony requirements. To reduce the probability
  3845. * that greedy applications are deemed as soft real-time in these
  3846. * corner cases, a further rule is used in the computation of
  3847. * soft_rt_next_start: the return value of this function is forced to
  3848. * be higher than the maximum between the following two quantities.
  3849. *
  3850. * (a) Current time plus: (1) the maximum time for which the arrival
  3851. * of a request is waited for when a sync queue becomes idle,
  3852. * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
  3853. * postpone for a moment the reason for adding a few extra
  3854. * jiffies; we get back to it after next item (b). Lower-bounding
  3855. * the return value of this function with the current time plus
  3856. * bfqd->bfq_slice_idle tends to filter out greedy applications,
  3857. * because the latter issue their next request as soon as possible
  3858. * after the last one has been completed. In contrast, a soft
  3859. * real-time application spends some time processing data, after a
  3860. * batch of its requests has been completed.
  3861. *
  3862. * (b) Current value of bfqq->soft_rt_next_start. As pointed out
  3863. * above, greedy applications may happen to meet both the
  3864. * bandwidth and isochrony requirements under heavy CPU or
  3865. * storage-device load. In more detail, in these scenarios, these
  3866. * applications happen, only for limited time periods, to do I/O
  3867. * slowly enough to meet all the requirements described so far,
  3868. * including the filtering in above item (a). These slow-speed
  3869. * time intervals are usually interspersed between other time
  3870. * intervals during which these applications do I/O at a very high
  3871. * speed. Fortunately, exactly because of the high speed of the
  3872. * I/O in the high-speed intervals, the values returned by this
  3873. * function happen to be so high, near the end of any such
  3874. * high-speed interval, to be likely to fall *after* the end of
  3875. * the low-speed time interval that follows. These high values are
  3876. * stored in bfqq->soft_rt_next_start after each invocation of
  3877. * this function. As a consequence, if the last value of
  3878. * bfqq->soft_rt_next_start is constantly used to lower-bound the
  3879. * next value that this function may return, then, from the very
  3880. * beginning of a low-speed interval, bfqq->soft_rt_next_start is
  3881. * likely to be constantly kept so high that any I/O request
  3882. * issued during the low-speed interval is considered as arriving
  3883. * to soon for the application to be deemed as soft
  3884. * real-time. Then, in the high-speed interval that follows, the
  3885. * application will not be deemed as soft real-time, just because
  3886. * it will do I/O at a high speed. And so on.
  3887. *
  3888. * Getting back to the filtering in item (a), in the following two
  3889. * cases this filtering might be easily passed by a greedy
  3890. * application, if the reference quantity was just
  3891. * bfqd->bfq_slice_idle:
  3892. * 1) HZ is so low that the duration of a jiffy is comparable to or
  3893. * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
  3894. * devices with HZ=100. The time granularity may be so coarse
  3895. * that the approximation, in jiffies, of bfqd->bfq_slice_idle
  3896. * is rather lower than the exact value.
  3897. * 2) jiffies, instead of increasing at a constant rate, may stop increasing
  3898. * for a while, then suddenly 'jump' by several units to recover the lost
  3899. * increments. This seems to happen, e.g., inside virtual machines.
  3900. * To address this issue, in the filtering in (a) we do not use as a
  3901. * reference time interval just bfqd->bfq_slice_idle, but
  3902. * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
  3903. * minimum number of jiffies for which the filter seems to be quite
  3904. * precise also in embedded systems and KVM/QEMU virtual machines.
  3905. */
  3906. static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
  3907. struct bfq_queue *bfqq)
  3908. {
  3909. return max3(bfqq->soft_rt_next_start,
  3910. bfqq->last_idle_bklogged +
  3911. HZ * bfqq->service_from_backlogged /
  3912. bfqd->bfq_wr_max_softrt_rate,
  3913. jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
  3914. }
  3915. /**
  3916. * bfq_bfqq_expire - expire a queue.
  3917. * @bfqd: device owning the queue.
  3918. * @bfqq: the queue to expire.
  3919. * @compensate: if true, compensate for the time spent idling.
  3920. * @reason: the reason causing the expiration.
  3921. *
  3922. * If the process associated with bfqq does slow I/O (e.g., because it
  3923. * issues random requests), we charge bfqq with the time it has been
  3924. * in service instead of the service it has received (see
  3925. * bfq_bfqq_charge_time for details on how this goal is achieved). As
  3926. * a consequence, bfqq will typically get higher timestamps upon
  3927. * reactivation, and hence it will be rescheduled as if it had
  3928. * received more service than what it has actually received. In the
  3929. * end, bfqq receives less service in proportion to how slowly its
  3930. * associated process consumes its budgets (and hence how seriously it
  3931. * tends to lower the throughput). In addition, this time-charging
  3932. * strategy guarantees time fairness among slow processes. In
  3933. * contrast, if the process associated with bfqq is not slow, we
  3934. * charge bfqq exactly with the service it has received.
  3935. *
  3936. * Charging time to the first type of queues and the exact service to
  3937. * the other has the effect of using the WF2Q+ policy to schedule the
  3938. * former on a timeslice basis, without violating service domain
  3939. * guarantees among the latter.
  3940. */
  3941. void bfq_bfqq_expire(struct bfq_data *bfqd,
  3942. struct bfq_queue *bfqq,
  3943. bool compensate,
  3944. enum bfqq_expiration reason)
  3945. {
  3946. bool slow;
  3947. unsigned long delta = 0;
  3948. struct bfq_entity *entity = &bfqq->entity;
  3949. /*
  3950. * Check whether the process is slow (see bfq_bfqq_is_slow).
  3951. */
  3952. slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
  3953. /*
  3954. * As above explained, charge slow (typically seeky) and
  3955. * timed-out queues with the time and not the service
  3956. * received, to favor sequential workloads.
  3957. *
  3958. * Processes doing I/O in the slower disk zones will tend to
  3959. * be slow(er) even if not seeky. Therefore, since the
  3960. * estimated peak rate is actually an average over the disk
  3961. * surface, these processes may timeout just for bad luck. To
  3962. * avoid punishing them, do not charge time to processes that
  3963. * succeeded in consuming at least 2/3 of their budget. This
  3964. * allows BFQ to preserve enough elasticity to still perform
  3965. * bandwidth, and not time, distribution with little unlucky
  3966. * or quasi-sequential processes.
  3967. */
  3968. if (bfqq->wr_coeff == 1 &&
  3969. (slow ||
  3970. (reason == BFQQE_BUDGET_TIMEOUT &&
  3971. bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
  3972. bfq_bfqq_charge_time(bfqd, bfqq, delta);
  3973. if (bfqd->low_latency && bfqq->wr_coeff == 1)
  3974. bfqq->last_wr_start_finish = jiffies;
  3975. if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
  3976. RB_EMPTY_ROOT(&bfqq->sort_list)) {
  3977. /*
  3978. * If we get here, and there are no outstanding
  3979. * requests, then the request pattern is isochronous
  3980. * (see the comments on the function
  3981. * bfq_bfqq_softrt_next_start()). Therefore we can
  3982. * compute soft_rt_next_start.
  3983. *
  3984. * If, instead, the queue still has outstanding
  3985. * requests, then we have to wait for the completion
  3986. * of all the outstanding requests to discover whether
  3987. * the request pattern is actually isochronous.
  3988. */
  3989. if (bfqq->dispatched == 0)
  3990. bfqq->soft_rt_next_start =
  3991. bfq_bfqq_softrt_next_start(bfqd, bfqq);
  3992. else if (bfqq->dispatched > 0) {
  3993. /*
  3994. * Schedule an update of soft_rt_next_start to when
  3995. * the task may be discovered to be isochronous.
  3996. */
  3997. bfq_mark_bfqq_softrt_update(bfqq);
  3998. }
  3999. }
  4000. bfq_log_bfqq(bfqd, bfqq,
  4001. "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason,
  4002. slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
  4003. /*
  4004. * bfqq expired, so no total service time needs to be computed
  4005. * any longer: reset state machine for measuring total service
  4006. * times.
  4007. */
  4008. bfqd->rqs_injected = bfqd->wait_dispatch = false;
  4009. bfqd->waited_rq = NULL;
  4010. /*
  4011. * Increase, decrease or leave budget unchanged according to
  4012. * reason.
  4013. */
  4014. __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
  4015. if (__bfq_bfqq_expire(bfqd, bfqq, reason))
  4016. /* bfqq is gone, no more actions on it */
  4017. return;
  4018. /* mark bfqq as waiting a request only if a bic still points to it */
  4019. if (!bfq_bfqq_busy(bfqq) &&
  4020. reason != BFQQE_BUDGET_TIMEOUT &&
  4021. reason != BFQQE_BUDGET_EXHAUSTED) {
  4022. bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
  4023. /*
  4024. * Not setting service to 0, because, if the next rq
  4025. * arrives in time, the queue will go on receiving
  4026. * service with this same budget (as if it never expired)
  4027. */
  4028. } else
  4029. entity->service = 0;
  4030. /*
  4031. * Reset the received-service counter for every parent entity.
  4032. * Differently from what happens with bfqq->entity.service,
  4033. * the resetting of this counter never needs to be postponed
  4034. * for parent entities. In fact, in case bfqq may have a
  4035. * chance to go on being served using the last, partially
  4036. * consumed budget, bfqq->entity.service needs to be kept,
  4037. * because if bfqq then actually goes on being served using
  4038. * the same budget, the last value of bfqq->entity.service is
  4039. * needed to properly decrement bfqq->entity.budget by the
  4040. * portion already consumed. In contrast, it is not necessary
  4041. * to keep entity->service for parent entities too, because
  4042. * the bubble up of the new value of bfqq->entity.budget will
  4043. * make sure that the budgets of parent entities are correct,
  4044. * even in case bfqq and thus parent entities go on receiving
  4045. * service with the same budget.
  4046. */
  4047. entity = entity->parent;
  4048. for_each_entity(entity)
  4049. entity->service = 0;
  4050. }
  4051. /*
  4052. * Budget timeout is not implemented through a dedicated timer, but
  4053. * just checked on request arrivals and completions, as well as on
  4054. * idle timer expirations.
  4055. */
  4056. static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
  4057. {
  4058. return time_is_before_eq_jiffies(bfqq->budget_timeout);
  4059. }
  4060. /*
  4061. * If we expire a queue that is actively waiting (i.e., with the
  4062. * device idled) for the arrival of a new request, then we may incur
  4063. * the timestamp misalignment problem described in the body of the
  4064. * function __bfq_activate_entity. Hence we return true only if this
  4065. * condition does not hold, or if the queue is slow enough to deserve
  4066. * only to be kicked off for preserving a high throughput.
  4067. */
  4068. static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
  4069. {
  4070. bfq_log_bfqq(bfqq->bfqd, bfqq,
  4071. "may_budget_timeout: wait_request %d left %d timeout %d",
  4072. bfq_bfqq_wait_request(bfqq),
  4073. bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
  4074. bfq_bfqq_budget_timeout(bfqq));
  4075. return (!bfq_bfqq_wait_request(bfqq) ||
  4076. bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
  4077. &&
  4078. bfq_bfqq_budget_timeout(bfqq);
  4079. }
  4080. static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
  4081. struct bfq_queue *bfqq)
  4082. {
  4083. bool rot_without_queueing =
  4084. !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
  4085. bfqq_sequential_and_IO_bound,
  4086. idling_boosts_thr;
  4087. /* No point in idling for bfqq if it won't get requests any longer */
  4088. if (unlikely(!bfqq_process_refs(bfqq)))
  4089. return false;
  4090. bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
  4091. bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
  4092. /*
  4093. * The next variable takes into account the cases where idling
  4094. * boosts the throughput.
  4095. *
  4096. * The value of the variable is computed considering, first, that
  4097. * idling is virtually always beneficial for the throughput if:
  4098. * (a) the device is not NCQ-capable and rotational, or
  4099. * (b) regardless of the presence of NCQ, the device is rotational and
  4100. * the request pattern for bfqq is I/O-bound and sequential, or
  4101. * (c) regardless of whether it is rotational, the device is
  4102. * not NCQ-capable and the request pattern for bfqq is
  4103. * I/O-bound and sequential.
  4104. *
  4105. * Secondly, and in contrast to the above item (b), idling an
  4106. * NCQ-capable flash-based device would not boost the
  4107. * throughput even with sequential I/O; rather it would lower
  4108. * the throughput in proportion to how fast the device
  4109. * is. Accordingly, the next variable is true if any of the
  4110. * above conditions (a), (b) or (c) is true, and, in
  4111. * particular, happens to be false if bfqd is an NCQ-capable
  4112. * flash-based device.
  4113. */
  4114. idling_boosts_thr = rot_without_queueing ||
  4115. ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) &&
  4116. bfqq_sequential_and_IO_bound);
  4117. /*
  4118. * The return value of this function is equal to that of
  4119. * idling_boosts_thr, unless a special case holds. In this
  4120. * special case, described below, idling may cause problems to
  4121. * weight-raised queues.
  4122. *
  4123. * When the request pool is saturated (e.g., in the presence
  4124. * of write hogs), if the processes associated with
  4125. * non-weight-raised queues ask for requests at a lower rate,
  4126. * then processes associated with weight-raised queues have a
  4127. * higher probability to get a request from the pool
  4128. * immediately (or at least soon) when they need one. Thus
  4129. * they have a higher probability to actually get a fraction
  4130. * of the device throughput proportional to their high
  4131. * weight. This is especially true with NCQ-capable drives,
  4132. * which enqueue several requests in advance, and further
  4133. * reorder internally-queued requests.
  4134. *
  4135. * For this reason, we force to false the return value if
  4136. * there are weight-raised busy queues. In this case, and if
  4137. * bfqq is not weight-raised, this guarantees that the device
  4138. * is not idled for bfqq (if, instead, bfqq is weight-raised,
  4139. * then idling will be guaranteed by another variable, see
  4140. * below). Combined with the timestamping rules of BFQ (see
  4141. * [1] for details), this behavior causes bfqq, and hence any
  4142. * sync non-weight-raised queue, to get a lower number of
  4143. * requests served, and thus to ask for a lower number of
  4144. * requests from the request pool, before the busy
  4145. * weight-raised queues get served again. This often mitigates
  4146. * starvation problems in the presence of heavy write
  4147. * workloads and NCQ, thereby guaranteeing a higher
  4148. * application and system responsiveness in these hostile
  4149. * scenarios.
  4150. */
  4151. return idling_boosts_thr &&
  4152. bfqd->wr_busy_queues == 0;
  4153. }
  4154. /*
  4155. * For a queue that becomes empty, device idling is allowed only if
  4156. * this function returns true for that queue. As a consequence, since
  4157. * device idling plays a critical role for both throughput boosting
  4158. * and service guarantees, the return value of this function plays a
  4159. * critical role as well.
  4160. *
  4161. * In a nutshell, this function returns true only if idling is
  4162. * beneficial for throughput or, even if detrimental for throughput,
  4163. * idling is however necessary to preserve service guarantees (low
  4164. * latency, desired throughput distribution, ...). In particular, on
  4165. * NCQ-capable devices, this function tries to return false, so as to
  4166. * help keep the drives' internal queues full, whenever this helps the
  4167. * device boost the throughput without causing any service-guarantee
  4168. * issue.
  4169. *
  4170. * Most of the issues taken into account to get the return value of
  4171. * this function are not trivial. We discuss these issues in the two
  4172. * functions providing the main pieces of information needed by this
  4173. * function.
  4174. */
  4175. static bool bfq_better_to_idle(struct bfq_queue *bfqq)
  4176. {
  4177. struct bfq_data *bfqd = bfqq->bfqd;
  4178. bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
  4179. /* No point in idling for bfqq if it won't get requests any longer */
  4180. if (unlikely(!bfqq_process_refs(bfqq)))
  4181. return false;
  4182. if (unlikely(bfqd->strict_guarantees))
  4183. return true;
  4184. /*
  4185. * Idling is performed only if slice_idle > 0. In addition, we
  4186. * do not idle if
  4187. * (a) bfqq is async
  4188. * (b) bfqq is in the idle io prio class: in this case we do
  4189. * not idle because we want to minimize the bandwidth that
  4190. * queues in this class can steal to higher-priority queues
  4191. */
  4192. if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
  4193. bfq_class_idle(bfqq))
  4194. return false;
  4195. idling_boosts_thr_with_no_issue =
  4196. idling_boosts_thr_without_issues(bfqd, bfqq);
  4197. idling_needed_for_service_guar =
  4198. idling_needed_for_service_guarantees(bfqd, bfqq);
  4199. /*
  4200. * We have now the two components we need to compute the
  4201. * return value of the function, which is true only if idling
  4202. * either boosts the throughput (without issues), or is
  4203. * necessary to preserve service guarantees.
  4204. */
  4205. return idling_boosts_thr_with_no_issue ||
  4206. idling_needed_for_service_guar;
  4207. }
  4208. /*
  4209. * If the in-service queue is empty but the function bfq_better_to_idle
  4210. * returns true, then:
  4211. * 1) the queue must remain in service and cannot be expired, and
  4212. * 2) the device must be idled to wait for the possible arrival of a new
  4213. * request for the queue.
  4214. * See the comments on the function bfq_better_to_idle for the reasons
  4215. * why performing device idling is the best choice to boost the throughput
  4216. * and preserve service guarantees when bfq_better_to_idle itself
  4217. * returns true.
  4218. */
  4219. static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
  4220. {
  4221. return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
  4222. }
  4223. /*
  4224. * This function chooses the queue from which to pick the next extra
  4225. * I/O request to inject, if it finds a compatible queue. See the
  4226. * comments on bfq_update_inject_limit() for details on the injection
  4227. * mechanism, and for the definitions of the quantities mentioned
  4228. * below.
  4229. */
  4230. static struct bfq_queue *
  4231. bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
  4232. {
  4233. struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
  4234. unsigned int limit = in_serv_bfqq->inject_limit;
  4235. /*
  4236. * If
  4237. * - bfqq is not weight-raised and therefore does not carry
  4238. * time-critical I/O,
  4239. * or
  4240. * - regardless of whether bfqq is weight-raised, bfqq has
  4241. * however a long think time, during which it can absorb the
  4242. * effect of an appropriate number of extra I/O requests
  4243. * from other queues (see bfq_update_inject_limit for
  4244. * details on the computation of this number);
  4245. * then injection can be performed without restrictions.
  4246. */
  4247. bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
  4248. !bfq_bfqq_has_short_ttime(in_serv_bfqq);
  4249. /*
  4250. * If
  4251. * - the baseline total service time could not be sampled yet,
  4252. * so the inject limit happens to be still 0, and
  4253. * - a lot of time has elapsed since the plugging of I/O
  4254. * dispatching started, so drive speed is being wasted
  4255. * significantly;
  4256. * then temporarily raise inject limit to one request.
  4257. */
  4258. if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
  4259. bfq_bfqq_wait_request(in_serv_bfqq) &&
  4260. time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
  4261. bfqd->bfq_slice_idle)
  4262. )
  4263. limit = 1;
  4264. if (bfqd->rq_in_driver >= limit)
  4265. return NULL;
  4266. /*
  4267. * Linear search of the source queue for injection; but, with
  4268. * a high probability, very few steps are needed to find a
  4269. * candidate queue, i.e., a queue with enough budget left for
  4270. * its next request. In fact:
  4271. * - BFQ dynamically updates the budget of every queue so as
  4272. * to accommodate the expected backlog of the queue;
  4273. * - if a queue gets all its requests dispatched as injected
  4274. * service, then the queue is removed from the active list
  4275. * (and re-added only if it gets new requests, but then it
  4276. * is assigned again enough budget for its new backlog).
  4277. */
  4278. list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
  4279. if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
  4280. (in_serv_always_inject || bfqq->wr_coeff > 1) &&
  4281. bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
  4282. bfq_bfqq_budget_left(bfqq)) {
  4283. /*
  4284. * Allow for only one large in-flight request
  4285. * on non-rotational devices, for the
  4286. * following reason. On non-rotationl drives,
  4287. * large requests take much longer than
  4288. * smaller requests to be served. In addition,
  4289. * the drive prefers to serve large requests
  4290. * w.r.t. to small ones, if it can choose. So,
  4291. * having more than one large requests queued
  4292. * in the drive may easily make the next first
  4293. * request of the in-service queue wait for so
  4294. * long to break bfqq's service guarantees. On
  4295. * the bright side, large requests let the
  4296. * drive reach a very high throughput, even if
  4297. * there is only one in-flight large request
  4298. * at a time.
  4299. */
  4300. if (blk_queue_nonrot(bfqd->queue) &&
  4301. blk_rq_sectors(bfqq->next_rq) >=
  4302. BFQQ_SECT_THR_NONROT)
  4303. limit = min_t(unsigned int, 1, limit);
  4304. else
  4305. limit = in_serv_bfqq->inject_limit;
  4306. if (bfqd->rq_in_driver < limit) {
  4307. bfqd->rqs_injected = true;
  4308. return bfqq;
  4309. }
  4310. }
  4311. return NULL;
  4312. }
  4313. /*
  4314. * Select a queue for service. If we have a current queue in service,
  4315. * check whether to continue servicing it, or retrieve and set a new one.
  4316. */
  4317. static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
  4318. {
  4319. struct bfq_queue *bfqq;
  4320. struct request *next_rq;
  4321. enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
  4322. bfqq = bfqd->in_service_queue;
  4323. if (!bfqq)
  4324. goto new_queue;
  4325. bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
  4326. /*
  4327. * Do not expire bfqq for budget timeout if bfqq may be about
  4328. * to enjoy device idling. The reason why, in this case, we
  4329. * prevent bfqq from expiring is the same as in the comments
  4330. * on the case where bfq_bfqq_must_idle() returns true, in
  4331. * bfq_completed_request().
  4332. */
  4333. if (bfq_may_expire_for_budg_timeout(bfqq) &&
  4334. !bfq_bfqq_must_idle(bfqq))
  4335. goto expire;
  4336. check_queue:
  4337. /*
  4338. * This loop is rarely executed more than once. Even when it
  4339. * happens, it is much more convenient to re-execute this loop
  4340. * than to return NULL and trigger a new dispatch to get a
  4341. * request served.
  4342. */
  4343. next_rq = bfqq->next_rq;
  4344. /*
  4345. * If bfqq has requests queued and it has enough budget left to
  4346. * serve them, keep the queue, otherwise expire it.
  4347. */
  4348. if (next_rq) {
  4349. if (bfq_serv_to_charge(next_rq, bfqq) >
  4350. bfq_bfqq_budget_left(bfqq)) {
  4351. /*
  4352. * Expire the queue for budget exhaustion,
  4353. * which makes sure that the next budget is
  4354. * enough to serve the next request, even if
  4355. * it comes from the fifo expired path.
  4356. */
  4357. reason = BFQQE_BUDGET_EXHAUSTED;
  4358. goto expire;
  4359. } else {
  4360. /*
  4361. * The idle timer may be pending because we may
  4362. * not disable disk idling even when a new request
  4363. * arrives.
  4364. */
  4365. if (bfq_bfqq_wait_request(bfqq)) {
  4366. /*
  4367. * If we get here: 1) at least a new request
  4368. * has arrived but we have not disabled the
  4369. * timer because the request was too small,
  4370. * 2) then the block layer has unplugged
  4371. * the device, causing the dispatch to be
  4372. * invoked.
  4373. *
  4374. * Since the device is unplugged, now the
  4375. * requests are probably large enough to
  4376. * provide a reasonable throughput.
  4377. * So we disable idling.
  4378. */
  4379. bfq_clear_bfqq_wait_request(bfqq);
  4380. hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
  4381. }
  4382. goto keep_queue;
  4383. }
  4384. }
  4385. /*
  4386. * No requests pending. However, if the in-service queue is idling
  4387. * for a new request, or has requests waiting for a completion and
  4388. * may idle after their completion, then keep it anyway.
  4389. *
  4390. * Yet, inject service from other queues if it boosts
  4391. * throughput and is possible.
  4392. */
  4393. if (bfq_bfqq_wait_request(bfqq) ||
  4394. (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
  4395. struct bfq_queue *async_bfqq =
  4396. bfqq->bic && bfqq->bic->bfqq[0] &&
  4397. bfq_bfqq_busy(bfqq->bic->bfqq[0]) &&
  4398. bfqq->bic->bfqq[0]->next_rq ?
  4399. bfqq->bic->bfqq[0] : NULL;
  4400. struct bfq_queue *blocked_bfqq =
  4401. !hlist_empty(&bfqq->woken_list) ?
  4402. container_of(bfqq->woken_list.first,
  4403. struct bfq_queue,
  4404. woken_list_node)
  4405. : NULL;
  4406. /*
  4407. * The next four mutually-exclusive ifs decide
  4408. * whether to try injection, and choose the queue to
  4409. * pick an I/O request from.
  4410. *
  4411. * The first if checks whether the process associated
  4412. * with bfqq has also async I/O pending. If so, it
  4413. * injects such I/O unconditionally. Injecting async
  4414. * I/O from the same process can cause no harm to the
  4415. * process. On the contrary, it can only increase
  4416. * bandwidth and reduce latency for the process.
  4417. *
  4418. * The second if checks whether there happens to be a
  4419. * non-empty waker queue for bfqq, i.e., a queue whose
  4420. * I/O needs to be completed for bfqq to receive new
  4421. * I/O. This happens, e.g., if bfqq is associated with
  4422. * a process that does some sync. A sync generates
  4423. * extra blocking I/O, which must be completed before
  4424. * the process associated with bfqq can go on with its
  4425. * I/O. If the I/O of the waker queue is not served,
  4426. * then bfqq remains empty, and no I/O is dispatched,
  4427. * until the idle timeout fires for bfqq. This is
  4428. * likely to result in lower bandwidth and higher
  4429. * latencies for bfqq, and in a severe loss of total
  4430. * throughput. The best action to take is therefore to
  4431. * serve the waker queue as soon as possible. So do it
  4432. * (without relying on the third alternative below for
  4433. * eventually serving waker_bfqq's I/O; see the last
  4434. * paragraph for further details). This systematic
  4435. * injection of I/O from the waker queue does not
  4436. * cause any delay to bfqq's I/O. On the contrary,
  4437. * next bfqq's I/O is brought forward dramatically,
  4438. * for it is not blocked for milliseconds.
  4439. *
  4440. * The third if checks whether there is a queue woken
  4441. * by bfqq, and currently with pending I/O. Such a
  4442. * woken queue does not steal bandwidth from bfqq,
  4443. * because it remains soon without I/O if bfqq is not
  4444. * served. So there is virtually no risk of loss of
  4445. * bandwidth for bfqq if this woken queue has I/O
  4446. * dispatched while bfqq is waiting for new I/O.
  4447. *
  4448. * The fourth if checks whether bfqq is a queue for
  4449. * which it is better to avoid injection. It is so if
  4450. * bfqq delivers more throughput when served without
  4451. * any further I/O from other queues in the middle, or
  4452. * if the service times of bfqq's I/O requests both
  4453. * count more than overall throughput, and may be
  4454. * easily increased by injection (this happens if bfqq
  4455. * has a short think time). If none of these
  4456. * conditions holds, then a candidate queue for
  4457. * injection is looked for through
  4458. * bfq_choose_bfqq_for_injection(). Note that the
  4459. * latter may return NULL (for example if the inject
  4460. * limit for bfqq is currently 0).
  4461. *
  4462. * NOTE: motivation for the second alternative
  4463. *
  4464. * Thanks to the way the inject limit is updated in
  4465. * bfq_update_has_short_ttime(), it is rather likely
  4466. * that, if I/O is being plugged for bfqq and the
  4467. * waker queue has pending I/O requests that are
  4468. * blocking bfqq's I/O, then the fourth alternative
  4469. * above lets the waker queue get served before the
  4470. * I/O-plugging timeout fires. So one may deem the
  4471. * second alternative superfluous. It is not, because
  4472. * the fourth alternative may be way less effective in
  4473. * case of a synchronization. For two main
  4474. * reasons. First, throughput may be low because the
  4475. * inject limit may be too low to guarantee the same
  4476. * amount of injected I/O, from the waker queue or
  4477. * other queues, that the second alternative
  4478. * guarantees (the second alternative unconditionally
  4479. * injects a pending I/O request of the waker queue
  4480. * for each bfq_dispatch_request()). Second, with the
  4481. * fourth alternative, the duration of the plugging,
  4482. * i.e., the time before bfqq finally receives new I/O,
  4483. * may not be minimized, because the waker queue may
  4484. * happen to be served only after other queues.
  4485. */
  4486. if (async_bfqq &&
  4487. icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
  4488. bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
  4489. bfq_bfqq_budget_left(async_bfqq))
  4490. bfqq = bfqq->bic->bfqq[0];
  4491. else if (bfqq->waker_bfqq &&
  4492. bfq_bfqq_busy(bfqq->waker_bfqq) &&
  4493. bfqq->waker_bfqq->next_rq &&
  4494. bfq_serv_to_charge(bfqq->waker_bfqq->next_rq,
  4495. bfqq->waker_bfqq) <=
  4496. bfq_bfqq_budget_left(bfqq->waker_bfqq)
  4497. )
  4498. bfqq = bfqq->waker_bfqq;
  4499. else if (blocked_bfqq &&
  4500. bfq_bfqq_busy(blocked_bfqq) &&
  4501. blocked_bfqq->next_rq &&
  4502. bfq_serv_to_charge(blocked_bfqq->next_rq,
  4503. blocked_bfqq) <=
  4504. bfq_bfqq_budget_left(blocked_bfqq)
  4505. )
  4506. bfqq = blocked_bfqq;
  4507. else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
  4508. (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
  4509. !bfq_bfqq_has_short_ttime(bfqq)))
  4510. bfqq = bfq_choose_bfqq_for_injection(bfqd);
  4511. else
  4512. bfqq = NULL;
  4513. goto keep_queue;
  4514. }
  4515. reason = BFQQE_NO_MORE_REQUESTS;
  4516. expire:
  4517. bfq_bfqq_expire(bfqd, bfqq, false, reason);
  4518. new_queue:
  4519. bfqq = bfq_set_in_service_queue(bfqd);
  4520. if (bfqq) {
  4521. bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
  4522. goto check_queue;
  4523. }
  4524. keep_queue:
  4525. if (bfqq)
  4526. bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
  4527. else
  4528. bfq_log(bfqd, "select_queue: no queue returned");
  4529. return bfqq;
  4530. }
  4531. static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  4532. {
  4533. struct bfq_entity *entity = &bfqq->entity;
  4534. if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
  4535. bfq_log_bfqq(bfqd, bfqq,
  4536. "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
  4537. jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
  4538. jiffies_to_msecs(bfqq->wr_cur_max_time),
  4539. bfqq->wr_coeff,
  4540. bfqq->entity.weight, bfqq->entity.orig_weight);
  4541. if (entity->prio_changed)
  4542. bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
  4543. /*
  4544. * If the queue was activated in a burst, or too much
  4545. * time has elapsed from the beginning of this
  4546. * weight-raising period, then end weight raising.
  4547. */
  4548. if (bfq_bfqq_in_large_burst(bfqq))
  4549. bfq_bfqq_end_wr(bfqq);
  4550. else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
  4551. bfqq->wr_cur_max_time)) {
  4552. if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
  4553. time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
  4554. bfq_wr_duration(bfqd))) {
  4555. /*
  4556. * Either in interactive weight
  4557. * raising, or in soft_rt weight
  4558. * raising with the
  4559. * interactive-weight-raising period
  4560. * elapsed (so no switch back to
  4561. * interactive weight raising).
  4562. */
  4563. bfq_bfqq_end_wr(bfqq);
  4564. } else { /*
  4565. * soft_rt finishing while still in
  4566. * interactive period, switch back to
  4567. * interactive weight raising
  4568. */
  4569. switch_back_to_interactive_wr(bfqq, bfqd);
  4570. bfqq->entity.prio_changed = 1;
  4571. }
  4572. }
  4573. if (bfqq->wr_coeff > 1 &&
  4574. bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
  4575. bfqq->service_from_wr > max_service_from_wr) {
  4576. /* see comments on max_service_from_wr */
  4577. bfq_bfqq_end_wr(bfqq);
  4578. }
  4579. }
  4580. /*
  4581. * To improve latency (for this or other queues), immediately
  4582. * update weight both if it must be raised and if it must be
  4583. * lowered. Since, entity may be on some active tree here, and
  4584. * might have a pending change of its ioprio class, invoke
  4585. * next function with the last parameter unset (see the
  4586. * comments on the function).
  4587. */
  4588. if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
  4589. __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity),
  4590. entity, false);
  4591. }
  4592. /*
  4593. * Dispatch next request from bfqq.
  4594. */
  4595. static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
  4596. struct bfq_queue *bfqq)
  4597. {
  4598. struct request *rq = bfqq->next_rq;
  4599. unsigned long service_to_charge;
  4600. service_to_charge = bfq_serv_to_charge(rq, bfqq);
  4601. bfq_bfqq_served(bfqq, service_to_charge);
  4602. if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
  4603. bfqd->wait_dispatch = false;
  4604. bfqd->waited_rq = rq;
  4605. }
  4606. bfq_dispatch_remove(bfqd->queue, rq);
  4607. if (bfqq != bfqd->in_service_queue)
  4608. goto return_rq;
  4609. /*
  4610. * If weight raising has to terminate for bfqq, then next
  4611. * function causes an immediate update of bfqq's weight,
  4612. * without waiting for next activation. As a consequence, on
  4613. * expiration, bfqq will be timestamped as if has never been
  4614. * weight-raised during this service slot, even if it has
  4615. * received part or even most of the service as a
  4616. * weight-raised queue. This inflates bfqq's timestamps, which
  4617. * is beneficial, as bfqq is then more willing to leave the
  4618. * device immediately to possible other weight-raised queues.
  4619. */
  4620. bfq_update_wr_data(bfqd, bfqq);
  4621. /*
  4622. * Expire bfqq, pretending that its budget expired, if bfqq
  4623. * belongs to CLASS_IDLE and other queues are waiting for
  4624. * service.
  4625. */
  4626. if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
  4627. goto return_rq;
  4628. bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
  4629. return_rq:
  4630. return rq;
  4631. }
  4632. static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
  4633. {
  4634. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4635. /*
  4636. * Avoiding lock: a race on bfqd->queued should cause at
  4637. * most a call to dispatch for nothing
  4638. */
  4639. return !list_empty_careful(&bfqd->dispatch) ||
  4640. READ_ONCE(bfqd->queued);
  4641. }
  4642. static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
  4643. {
  4644. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4645. struct request *rq = NULL;
  4646. struct bfq_queue *bfqq = NULL;
  4647. if (!list_empty(&bfqd->dispatch)) {
  4648. rq = list_first_entry(&bfqd->dispatch, struct request,
  4649. queuelist);
  4650. list_del_init(&rq->queuelist);
  4651. bfqq = RQ_BFQQ(rq);
  4652. if (bfqq) {
  4653. /*
  4654. * Increment counters here, because this
  4655. * dispatch does not follow the standard
  4656. * dispatch flow (where counters are
  4657. * incremented)
  4658. */
  4659. bfqq->dispatched++;
  4660. goto inc_in_driver_start_rq;
  4661. }
  4662. /*
  4663. * We exploit the bfq_finish_requeue_request hook to
  4664. * decrement rq_in_driver, but
  4665. * bfq_finish_requeue_request will not be invoked on
  4666. * this request. So, to avoid unbalance, just start
  4667. * this request, without incrementing rq_in_driver. As
  4668. * a negative consequence, rq_in_driver is deceptively
  4669. * lower than it should be while this request is in
  4670. * service. This may cause bfq_schedule_dispatch to be
  4671. * invoked uselessly.
  4672. *
  4673. * As for implementing an exact solution, the
  4674. * bfq_finish_requeue_request hook, if defined, is
  4675. * probably invoked also on this request. So, by
  4676. * exploiting this hook, we could 1) increment
  4677. * rq_in_driver here, and 2) decrement it in
  4678. * bfq_finish_requeue_request. Such a solution would
  4679. * let the value of the counter be always accurate,
  4680. * but it would entail using an extra interface
  4681. * function. This cost seems higher than the benefit,
  4682. * being the frequency of non-elevator-private
  4683. * requests very low.
  4684. */
  4685. goto start_rq;
  4686. }
  4687. bfq_log(bfqd, "dispatch requests: %d busy queues",
  4688. bfq_tot_busy_queues(bfqd));
  4689. if (bfq_tot_busy_queues(bfqd) == 0)
  4690. goto exit;
  4691. /*
  4692. * Force device to serve one request at a time if
  4693. * strict_guarantees is true. Forcing this service scheme is
  4694. * currently the ONLY way to guarantee that the request
  4695. * service order enforced by the scheduler is respected by a
  4696. * queueing device. Otherwise the device is free even to make
  4697. * some unlucky request wait for as long as the device
  4698. * wishes.
  4699. *
  4700. * Of course, serving one request at a time may cause loss of
  4701. * throughput.
  4702. */
  4703. if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
  4704. goto exit;
  4705. bfqq = bfq_select_queue(bfqd);
  4706. if (!bfqq)
  4707. goto exit;
  4708. rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
  4709. if (rq) {
  4710. inc_in_driver_start_rq:
  4711. bfqd->rq_in_driver++;
  4712. start_rq:
  4713. rq->rq_flags |= RQF_STARTED;
  4714. }
  4715. exit:
  4716. return rq;
  4717. }
  4718. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  4719. static void bfq_update_dispatch_stats(struct request_queue *q,
  4720. struct request *rq,
  4721. struct bfq_queue *in_serv_queue,
  4722. bool idle_timer_disabled)
  4723. {
  4724. struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
  4725. if (!idle_timer_disabled && !bfqq)
  4726. return;
  4727. /*
  4728. * rq and bfqq are guaranteed to exist until this function
  4729. * ends, for the following reasons. First, rq can be
  4730. * dispatched to the device, and then can be completed and
  4731. * freed, only after this function ends. Second, rq cannot be
  4732. * merged (and thus freed because of a merge) any longer,
  4733. * because it has already started. Thus rq cannot be freed
  4734. * before this function ends, and, since rq has a reference to
  4735. * bfqq, the same guarantee holds for bfqq too.
  4736. *
  4737. * In addition, the following queue lock guarantees that
  4738. * bfqq_group(bfqq) exists as well.
  4739. */
  4740. spin_lock_irq(&q->queue_lock);
  4741. if (idle_timer_disabled)
  4742. /*
  4743. * Since the idle timer has been disabled,
  4744. * in_serv_queue contained some request when
  4745. * __bfq_dispatch_request was invoked above, which
  4746. * implies that rq was picked exactly from
  4747. * in_serv_queue. Thus in_serv_queue == bfqq, and is
  4748. * therefore guaranteed to exist because of the above
  4749. * arguments.
  4750. */
  4751. bfqg_stats_update_idle_time(bfqq_group(in_serv_queue));
  4752. if (bfqq) {
  4753. struct bfq_group *bfqg = bfqq_group(bfqq);
  4754. bfqg_stats_update_avg_queue_size(bfqg);
  4755. bfqg_stats_set_start_empty_time(bfqg);
  4756. bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
  4757. }
  4758. spin_unlock_irq(&q->queue_lock);
  4759. }
  4760. #else
  4761. static inline void bfq_update_dispatch_stats(struct request_queue *q,
  4762. struct request *rq,
  4763. struct bfq_queue *in_serv_queue,
  4764. bool idle_timer_disabled) {}
  4765. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  4766. static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
  4767. {
  4768. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  4769. struct request *rq;
  4770. struct bfq_queue *in_serv_queue;
  4771. bool waiting_rq, idle_timer_disabled = false;
  4772. spin_lock_irq(&bfqd->lock);
  4773. in_serv_queue = bfqd->in_service_queue;
  4774. waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
  4775. rq = __bfq_dispatch_request(hctx);
  4776. if (in_serv_queue == bfqd->in_service_queue) {
  4777. idle_timer_disabled =
  4778. waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
  4779. }
  4780. spin_unlock_irq(&bfqd->lock);
  4781. bfq_update_dispatch_stats(hctx->queue, rq,
  4782. idle_timer_disabled ? in_serv_queue : NULL,
  4783. idle_timer_disabled);
  4784. return rq;
  4785. }
  4786. /*
  4787. * Task holds one reference to the queue, dropped when task exits. Each rq
  4788. * in-flight on this queue also holds a reference, dropped when rq is freed.
  4789. *
  4790. * Scheduler lock must be held here. Recall not to use bfqq after calling
  4791. * this function on it.
  4792. */
  4793. void bfq_put_queue(struct bfq_queue *bfqq)
  4794. {
  4795. struct bfq_queue *item;
  4796. struct hlist_node *n;
  4797. struct bfq_group *bfqg = bfqq_group(bfqq);
  4798. bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
  4799. bfqq->ref--;
  4800. if (bfqq->ref)
  4801. return;
  4802. if (!hlist_unhashed(&bfqq->burst_list_node)) {
  4803. hlist_del_init(&bfqq->burst_list_node);
  4804. /*
  4805. * Decrement also burst size after the removal, if the
  4806. * process associated with bfqq is exiting, and thus
  4807. * does not contribute to the burst any longer. This
  4808. * decrement helps filter out false positives of large
  4809. * bursts, when some short-lived process (often due to
  4810. * the execution of commands by some service) happens
  4811. * to start and exit while a complex application is
  4812. * starting, and thus spawning several processes that
  4813. * do I/O (and that *must not* be treated as a large
  4814. * burst, see comments on bfq_handle_burst).
  4815. *
  4816. * In particular, the decrement is performed only if:
  4817. * 1) bfqq is not a merged queue, because, if it is,
  4818. * then this free of bfqq is not triggered by the exit
  4819. * of the process bfqq is associated with, but exactly
  4820. * by the fact that bfqq has just been merged.
  4821. * 2) burst_size is greater than 0, to handle
  4822. * unbalanced decrements. Unbalanced decrements may
  4823. * happen in te following case: bfqq is inserted into
  4824. * the current burst list--without incrementing
  4825. * bust_size--because of a split, but the current
  4826. * burst list is not the burst list bfqq belonged to
  4827. * (see comments on the case of a split in
  4828. * bfq_set_request).
  4829. */
  4830. if (bfqq->bic && bfqq->bfqd->burst_size > 0)
  4831. bfqq->bfqd->burst_size--;
  4832. }
  4833. /*
  4834. * bfqq does not exist any longer, so it cannot be woken by
  4835. * any other queue, and cannot wake any other queue. Then bfqq
  4836. * must be removed from the woken list of its possible waker
  4837. * queue, and all queues in the woken list of bfqq must stop
  4838. * having a waker queue. Strictly speaking, these updates
  4839. * should be performed when bfqq remains with no I/O source
  4840. * attached to it, which happens before bfqq gets freed. In
  4841. * particular, this happens when the last process associated
  4842. * with bfqq exits or gets associated with a different
  4843. * queue. However, both events lead to bfqq being freed soon,
  4844. * and dangling references would come out only after bfqq gets
  4845. * freed. So these updates are done here, as a simple and safe
  4846. * way to handle all cases.
  4847. */
  4848. /* remove bfqq from woken list */
  4849. if (!hlist_unhashed(&bfqq->woken_list_node))
  4850. hlist_del_init(&bfqq->woken_list_node);
  4851. /* reset waker for all queues in woken list */
  4852. hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
  4853. woken_list_node) {
  4854. item->waker_bfqq = NULL;
  4855. hlist_del_init(&item->woken_list_node);
  4856. }
  4857. if (bfqq->bfqd->last_completed_rq_bfqq == bfqq)
  4858. bfqq->bfqd->last_completed_rq_bfqq = NULL;
  4859. kmem_cache_free(bfq_pool, bfqq);
  4860. bfqg_and_blkg_put(bfqg);
  4861. }
  4862. static void bfq_put_stable_ref(struct bfq_queue *bfqq)
  4863. {
  4864. bfqq->stable_ref--;
  4865. bfq_put_queue(bfqq);
  4866. }
  4867. void bfq_put_cooperator(struct bfq_queue *bfqq)
  4868. {
  4869. struct bfq_queue *__bfqq, *next;
  4870. /*
  4871. * If this queue was scheduled to merge with another queue, be
  4872. * sure to drop the reference taken on that queue (and others in
  4873. * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
  4874. */
  4875. __bfqq = bfqq->new_bfqq;
  4876. while (__bfqq) {
  4877. if (__bfqq == bfqq)
  4878. break;
  4879. next = __bfqq->new_bfqq;
  4880. bfq_put_queue(__bfqq);
  4881. __bfqq = next;
  4882. }
  4883. }
  4884. static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  4885. {
  4886. if (bfqq == bfqd->in_service_queue) {
  4887. __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
  4888. bfq_schedule_dispatch(bfqd);
  4889. }
  4890. bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
  4891. bfq_put_cooperator(bfqq);
  4892. bfq_release_process_ref(bfqd, bfqq);
  4893. }
  4894. static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
  4895. {
  4896. struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
  4897. struct bfq_data *bfqd;
  4898. if (bfqq)
  4899. bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
  4900. if (bfqq && bfqd) {
  4901. unsigned long flags;
  4902. spin_lock_irqsave(&bfqd->lock, flags);
  4903. bic_set_bfqq(bic, NULL, is_sync);
  4904. bfq_exit_bfqq(bfqd, bfqq);
  4905. spin_unlock_irqrestore(&bfqd->lock, flags);
  4906. }
  4907. }
  4908. static void bfq_exit_icq(struct io_cq *icq)
  4909. {
  4910. struct bfq_io_cq *bic = icq_to_bic(icq);
  4911. if (bic->stable_merge_bfqq) {
  4912. struct bfq_data *bfqd = bic->stable_merge_bfqq->bfqd;
  4913. /*
  4914. * bfqd is NULL if scheduler already exited, and in
  4915. * that case this is the last time bfqq is accessed.
  4916. */
  4917. if (bfqd) {
  4918. unsigned long flags;
  4919. spin_lock_irqsave(&bfqd->lock, flags);
  4920. bfq_put_stable_ref(bic->stable_merge_bfqq);
  4921. spin_unlock_irqrestore(&bfqd->lock, flags);
  4922. } else {
  4923. bfq_put_stable_ref(bic->stable_merge_bfqq);
  4924. }
  4925. }
  4926. bfq_exit_icq_bfqq(bic, true);
  4927. bfq_exit_icq_bfqq(bic, false);
  4928. }
  4929. /*
  4930. * Update the entity prio values; note that the new values will not
  4931. * be used until the next (re)activation.
  4932. */
  4933. static void
  4934. bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
  4935. {
  4936. struct task_struct *tsk = current;
  4937. int ioprio_class;
  4938. struct bfq_data *bfqd = bfqq->bfqd;
  4939. if (!bfqd)
  4940. return;
  4941. ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
  4942. switch (ioprio_class) {
  4943. default:
  4944. pr_err("bdi %s: bfq: bad prio class %d\n",
  4945. bdi_dev_name(bfqq->bfqd->queue->disk->bdi),
  4946. ioprio_class);
  4947. fallthrough;
  4948. case IOPRIO_CLASS_NONE:
  4949. /*
  4950. * No prio set, inherit CPU scheduling settings.
  4951. */
  4952. bfqq->new_ioprio = task_nice_ioprio(tsk);
  4953. bfqq->new_ioprio_class = task_nice_ioclass(tsk);
  4954. break;
  4955. case IOPRIO_CLASS_RT:
  4956. bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
  4957. bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
  4958. break;
  4959. case IOPRIO_CLASS_BE:
  4960. bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
  4961. bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
  4962. break;
  4963. case IOPRIO_CLASS_IDLE:
  4964. bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
  4965. bfqq->new_ioprio = 7;
  4966. break;
  4967. }
  4968. if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) {
  4969. pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
  4970. bfqq->new_ioprio);
  4971. bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1;
  4972. }
  4973. bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
  4974. bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d",
  4975. bfqq->new_ioprio, bfqq->entity.new_weight);
  4976. bfqq->entity.prio_changed = 1;
  4977. }
  4978. static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  4979. struct bio *bio, bool is_sync,
  4980. struct bfq_io_cq *bic,
  4981. bool respawn);
  4982. static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
  4983. {
  4984. struct bfq_data *bfqd = bic_to_bfqd(bic);
  4985. struct bfq_queue *bfqq;
  4986. int ioprio = bic->icq.ioc->ioprio;
  4987. /*
  4988. * This condition may trigger on a newly created bic, be sure to
  4989. * drop the lock before returning.
  4990. */
  4991. if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
  4992. return;
  4993. bic->ioprio = ioprio;
  4994. bfqq = bic_to_bfqq(bic, false);
  4995. if (bfqq) {
  4996. struct bfq_queue *old_bfqq = bfqq;
  4997. bfqq = bfq_get_queue(bfqd, bio, false, bic, true);
  4998. bic_set_bfqq(bic, bfqq, false);
  4999. bfq_release_process_ref(bfqd, old_bfqq);
  5000. }
  5001. bfqq = bic_to_bfqq(bic, true);
  5002. if (bfqq)
  5003. bfq_set_next_ioprio_data(bfqq, bic);
  5004. }
  5005. static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5006. struct bfq_io_cq *bic, pid_t pid, int is_sync)
  5007. {
  5008. u64 now_ns = ktime_get_ns();
  5009. RB_CLEAR_NODE(&bfqq->entity.rb_node);
  5010. INIT_LIST_HEAD(&bfqq->fifo);
  5011. INIT_HLIST_NODE(&bfqq->burst_list_node);
  5012. INIT_HLIST_NODE(&bfqq->woken_list_node);
  5013. INIT_HLIST_HEAD(&bfqq->woken_list);
  5014. bfqq->ref = 0;
  5015. bfqq->bfqd = bfqd;
  5016. if (bic)
  5017. bfq_set_next_ioprio_data(bfqq, bic);
  5018. if (is_sync) {
  5019. /*
  5020. * No need to mark as has_short_ttime if in
  5021. * idle_class, because no device idling is performed
  5022. * for queues in idle class
  5023. */
  5024. if (!bfq_class_idle(bfqq))
  5025. /* tentatively mark as has_short_ttime */
  5026. bfq_mark_bfqq_has_short_ttime(bfqq);
  5027. bfq_mark_bfqq_sync(bfqq);
  5028. bfq_mark_bfqq_just_created(bfqq);
  5029. } else
  5030. bfq_clear_bfqq_sync(bfqq);
  5031. /* set end request to minus infinity from now */
  5032. bfqq->ttime.last_end_request = now_ns + 1;
  5033. bfqq->creation_time = jiffies;
  5034. bfqq->io_start_time = now_ns;
  5035. bfq_mark_bfqq_IO_bound(bfqq);
  5036. bfqq->pid = pid;
  5037. /* Tentative initial value to trade off between thr and lat */
  5038. bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
  5039. bfqq->budget_timeout = bfq_smallest_from_now();
  5040. bfqq->wr_coeff = 1;
  5041. bfqq->last_wr_start_finish = jiffies;
  5042. bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
  5043. bfqq->split_time = bfq_smallest_from_now();
  5044. /*
  5045. * To not forget the possibly high bandwidth consumed by a
  5046. * process/queue in the recent past,
  5047. * bfq_bfqq_softrt_next_start() returns a value at least equal
  5048. * to the current value of bfqq->soft_rt_next_start (see
  5049. * comments on bfq_bfqq_softrt_next_start). Set
  5050. * soft_rt_next_start to now, to mean that bfqq has consumed
  5051. * no bandwidth so far.
  5052. */
  5053. bfqq->soft_rt_next_start = jiffies;
  5054. /* first request is almost certainly seeky */
  5055. bfqq->seek_history = 1;
  5056. }
  5057. static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
  5058. struct bfq_group *bfqg,
  5059. int ioprio_class, int ioprio)
  5060. {
  5061. switch (ioprio_class) {
  5062. case IOPRIO_CLASS_RT:
  5063. return &bfqg->async_bfqq[0][ioprio];
  5064. case IOPRIO_CLASS_NONE:
  5065. ioprio = IOPRIO_BE_NORM;
  5066. fallthrough;
  5067. case IOPRIO_CLASS_BE:
  5068. return &bfqg->async_bfqq[1][ioprio];
  5069. case IOPRIO_CLASS_IDLE:
  5070. return &bfqg->async_idle_bfqq;
  5071. default:
  5072. return NULL;
  5073. }
  5074. }
  5075. static struct bfq_queue *
  5076. bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5077. struct bfq_io_cq *bic,
  5078. struct bfq_queue *last_bfqq_created)
  5079. {
  5080. struct bfq_queue *new_bfqq =
  5081. bfq_setup_merge(bfqq, last_bfqq_created);
  5082. if (!new_bfqq)
  5083. return bfqq;
  5084. if (new_bfqq->bic)
  5085. new_bfqq->bic->stably_merged = true;
  5086. bic->stably_merged = true;
  5087. /*
  5088. * Reusing merge functions. This implies that
  5089. * bfqq->bic must be set too, for
  5090. * bfq_merge_bfqqs to correctly save bfqq's
  5091. * state before killing it.
  5092. */
  5093. bfqq->bic = bic;
  5094. bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
  5095. return new_bfqq;
  5096. }
  5097. /*
  5098. * Many throughput-sensitive workloads are made of several parallel
  5099. * I/O flows, with all flows generated by the same application, or
  5100. * more generically by the same task (e.g., system boot). The most
  5101. * counterproductive action with these workloads is plugging I/O
  5102. * dispatch when one of the bfq_queues associated with these flows
  5103. * remains temporarily empty.
  5104. *
  5105. * To avoid this plugging, BFQ has been using a burst-handling
  5106. * mechanism for years now. This mechanism has proven effective for
  5107. * throughput, and not detrimental for service guarantees. The
  5108. * following function pushes this mechanism a little bit further,
  5109. * basing on the following two facts.
  5110. *
  5111. * First, all the I/O flows of a the same application or task
  5112. * contribute to the execution/completion of that common application
  5113. * or task. So the performance figures that matter are total
  5114. * throughput of the flows and task-wide I/O latency. In particular,
  5115. * these flows do not need to be protected from each other, in terms
  5116. * of individual bandwidth or latency.
  5117. *
  5118. * Second, the above fact holds regardless of the number of flows.
  5119. *
  5120. * Putting these two facts together, this commits merges stably the
  5121. * bfq_queues associated with these I/O flows, i.e., with the
  5122. * processes that generate these IO/ flows, regardless of how many the
  5123. * involved processes are.
  5124. *
  5125. * To decide whether a set of bfq_queues is actually associated with
  5126. * the I/O flows of a common application or task, and to merge these
  5127. * queues stably, this function operates as follows: given a bfq_queue,
  5128. * say Q2, currently being created, and the last bfq_queue, say Q1,
  5129. * created before Q2, Q2 is merged stably with Q1 if
  5130. * - very little time has elapsed since when Q1 was created
  5131. * - Q2 has the same ioprio as Q1
  5132. * - Q2 belongs to the same group as Q1
  5133. *
  5134. * Merging bfq_queues also reduces scheduling overhead. A fio test
  5135. * with ten random readers on /dev/nullb shows a throughput boost of
  5136. * 40%, with a quadcore. Since BFQ's execution time amounts to ~50% of
  5137. * the total per-request processing time, the above throughput boost
  5138. * implies that BFQ's overhead is reduced by more than 50%.
  5139. *
  5140. * This new mechanism most certainly obsoletes the current
  5141. * burst-handling heuristics. We keep those heuristics for the moment.
  5142. */
  5143. static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd,
  5144. struct bfq_queue *bfqq,
  5145. struct bfq_io_cq *bic)
  5146. {
  5147. struct bfq_queue **source_bfqq = bfqq->entity.parent ?
  5148. &bfqq->entity.parent->last_bfqq_created :
  5149. &bfqd->last_bfqq_created;
  5150. struct bfq_queue *last_bfqq_created = *source_bfqq;
  5151. /*
  5152. * If last_bfqq_created has not been set yet, then init it. If
  5153. * it has been set already, but too long ago, then move it
  5154. * forward to bfqq. Finally, move also if bfqq belongs to a
  5155. * different group than last_bfqq_created, or if bfqq has a
  5156. * different ioprio or ioprio_class. If none of these
  5157. * conditions holds true, then try an early stable merge or
  5158. * schedule a delayed stable merge.
  5159. *
  5160. * A delayed merge is scheduled (instead of performing an
  5161. * early merge), in case bfqq might soon prove to be more
  5162. * throughput-beneficial if not merged. Currently this is
  5163. * possible only if bfqd is rotational with no queueing. For
  5164. * such a drive, not merging bfqq is better for throughput if
  5165. * bfqq happens to contain sequential I/O. So, we wait a
  5166. * little bit for enough I/O to flow through bfqq. After that,
  5167. * if such an I/O is sequential, then the merge is
  5168. * canceled. Otherwise the merge is finally performed.
  5169. */
  5170. if (!last_bfqq_created ||
  5171. time_before(last_bfqq_created->creation_time +
  5172. msecs_to_jiffies(bfq_activation_stable_merging),
  5173. bfqq->creation_time) ||
  5174. bfqq->entity.parent != last_bfqq_created->entity.parent ||
  5175. bfqq->ioprio != last_bfqq_created->ioprio ||
  5176. bfqq->ioprio_class != last_bfqq_created->ioprio_class)
  5177. *source_bfqq = bfqq;
  5178. else if (time_after_eq(last_bfqq_created->creation_time +
  5179. bfqd->bfq_burst_interval,
  5180. bfqq->creation_time)) {
  5181. if (likely(bfqd->nonrot_with_queueing))
  5182. /*
  5183. * With this type of drive, leaving
  5184. * bfqq alone may provide no
  5185. * throughput benefits compared with
  5186. * merging bfqq. So merge bfqq now.
  5187. */
  5188. bfqq = bfq_do_early_stable_merge(bfqd, bfqq,
  5189. bic,
  5190. last_bfqq_created);
  5191. else { /* schedule tentative stable merge */
  5192. /*
  5193. * get reference on last_bfqq_created,
  5194. * to prevent it from being freed,
  5195. * until we decide whether to merge
  5196. */
  5197. last_bfqq_created->ref++;
  5198. /*
  5199. * need to keep track of stable refs, to
  5200. * compute process refs correctly
  5201. */
  5202. last_bfqq_created->stable_ref++;
  5203. /*
  5204. * Record the bfqq to merge to.
  5205. */
  5206. bic->stable_merge_bfqq = last_bfqq_created;
  5207. }
  5208. }
  5209. return bfqq;
  5210. }
  5211. static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
  5212. struct bio *bio, bool is_sync,
  5213. struct bfq_io_cq *bic,
  5214. bool respawn)
  5215. {
  5216. const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
  5217. const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
  5218. struct bfq_queue **async_bfqq = NULL;
  5219. struct bfq_queue *bfqq;
  5220. struct bfq_group *bfqg;
  5221. bfqg = bfq_bio_bfqg(bfqd, bio);
  5222. if (!is_sync) {
  5223. async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
  5224. ioprio);
  5225. bfqq = *async_bfqq;
  5226. if (bfqq)
  5227. goto out;
  5228. }
  5229. bfqq = kmem_cache_alloc_node(bfq_pool,
  5230. GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
  5231. bfqd->queue->node);
  5232. if (bfqq) {
  5233. bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
  5234. is_sync);
  5235. bfq_init_entity(&bfqq->entity, bfqg);
  5236. bfq_log_bfqq(bfqd, bfqq, "allocated");
  5237. } else {
  5238. bfqq = &bfqd->oom_bfqq;
  5239. bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
  5240. goto out;
  5241. }
  5242. /*
  5243. * Pin the queue now that it's allocated, scheduler exit will
  5244. * prune it.
  5245. */
  5246. if (async_bfqq) {
  5247. bfqq->ref++; /*
  5248. * Extra group reference, w.r.t. sync
  5249. * queue. This extra reference is removed
  5250. * only if bfqq->bfqg disappears, to
  5251. * guarantee that this queue is not freed
  5252. * until its group goes away.
  5253. */
  5254. bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
  5255. bfqq, bfqq->ref);
  5256. *async_bfqq = bfqq;
  5257. }
  5258. out:
  5259. bfqq->ref++; /* get a process reference to this queue */
  5260. if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
  5261. bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
  5262. return bfqq;
  5263. }
  5264. static void bfq_update_io_thinktime(struct bfq_data *bfqd,
  5265. struct bfq_queue *bfqq)
  5266. {
  5267. struct bfq_ttime *ttime = &bfqq->ttime;
  5268. u64 elapsed;
  5269. /*
  5270. * We are really interested in how long it takes for the queue to
  5271. * become busy when there is no outstanding IO for this queue. So
  5272. * ignore cases when the bfq queue has already IO queued.
  5273. */
  5274. if (bfqq->dispatched || bfq_bfqq_busy(bfqq))
  5275. return;
  5276. elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
  5277. elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
  5278. ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
  5279. ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
  5280. ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
  5281. ttime->ttime_samples);
  5282. }
  5283. static void
  5284. bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5285. struct request *rq)
  5286. {
  5287. bfqq->seek_history <<= 1;
  5288. bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
  5289. if (bfqq->wr_coeff > 1 &&
  5290. bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
  5291. BFQQ_TOTALLY_SEEKY(bfqq)) {
  5292. if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
  5293. bfq_wr_duration(bfqd))) {
  5294. /*
  5295. * In soft_rt weight raising with the
  5296. * interactive-weight-raising period
  5297. * elapsed (so no switch back to
  5298. * interactive weight raising).
  5299. */
  5300. bfq_bfqq_end_wr(bfqq);
  5301. } else { /*
  5302. * stopping soft_rt weight raising
  5303. * while still in interactive period,
  5304. * switch back to interactive weight
  5305. * raising
  5306. */
  5307. switch_back_to_interactive_wr(bfqq, bfqd);
  5308. bfqq->entity.prio_changed = 1;
  5309. }
  5310. }
  5311. }
  5312. static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
  5313. struct bfq_queue *bfqq,
  5314. struct bfq_io_cq *bic)
  5315. {
  5316. bool has_short_ttime = true, state_changed;
  5317. /*
  5318. * No need to update has_short_ttime if bfqq is async or in
  5319. * idle io prio class, or if bfq_slice_idle is zero, because
  5320. * no device idling is performed for bfqq in this case.
  5321. */
  5322. if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) ||
  5323. bfqd->bfq_slice_idle == 0)
  5324. return;
  5325. /* Idle window just restored, statistics are meaningless. */
  5326. if (time_is_after_eq_jiffies(bfqq->split_time +
  5327. bfqd->bfq_wr_min_idle_time))
  5328. return;
  5329. /* Think time is infinite if no process is linked to
  5330. * bfqq. Otherwise check average think time to decide whether
  5331. * to mark as has_short_ttime. To this goal, compare average
  5332. * think time with half the I/O-plugging timeout.
  5333. */
  5334. if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
  5335. (bfq_sample_valid(bfqq->ttime.ttime_samples) &&
  5336. bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1))
  5337. has_short_ttime = false;
  5338. state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq);
  5339. if (has_short_ttime)
  5340. bfq_mark_bfqq_has_short_ttime(bfqq);
  5341. else
  5342. bfq_clear_bfqq_has_short_ttime(bfqq);
  5343. /*
  5344. * Until the base value for the total service time gets
  5345. * finally computed for bfqq, the inject limit does depend on
  5346. * the think-time state (short|long). In particular, the limit
  5347. * is 0 or 1 if the think time is deemed, respectively, as
  5348. * short or long (details in the comments in
  5349. * bfq_update_inject_limit()). Accordingly, the next
  5350. * instructions reset the inject limit if the think-time state
  5351. * has changed and the above base value is still to be
  5352. * computed.
  5353. *
  5354. * However, the reset is performed only if more than 100 ms
  5355. * have elapsed since the last update of the inject limit, or
  5356. * (inclusive) if the change is from short to long think
  5357. * time. The reason for this waiting is as follows.
  5358. *
  5359. * bfqq may have a long think time because of a
  5360. * synchronization with some other queue, i.e., because the
  5361. * I/O of some other queue may need to be completed for bfqq
  5362. * to receive new I/O. Details in the comments on the choice
  5363. * of the queue for injection in bfq_select_queue().
  5364. *
  5365. * As stressed in those comments, if such a synchronization is
  5366. * actually in place, then, without injection on bfqq, the
  5367. * blocking I/O cannot happen to served while bfqq is in
  5368. * service. As a consequence, if bfqq is granted
  5369. * I/O-dispatch-plugging, then bfqq remains empty, and no I/O
  5370. * is dispatched, until the idle timeout fires. This is likely
  5371. * to result in lower bandwidth and higher latencies for bfqq,
  5372. * and in a severe loss of total throughput.
  5373. *
  5374. * On the opposite end, a non-zero inject limit may allow the
  5375. * I/O that blocks bfqq to be executed soon, and therefore
  5376. * bfqq to receive new I/O soon.
  5377. *
  5378. * But, if the blocking gets actually eliminated, then the
  5379. * next think-time sample for bfqq may be very low. This in
  5380. * turn may cause bfqq's think time to be deemed
  5381. * short. Without the 100 ms barrier, this new state change
  5382. * would cause the body of the next if to be executed
  5383. * immediately. But this would set to 0 the inject
  5384. * limit. Without injection, the blocking I/O would cause the
  5385. * think time of bfqq to become long again, and therefore the
  5386. * inject limit to be raised again, and so on. The only effect
  5387. * of such a steady oscillation between the two think-time
  5388. * states would be to prevent effective injection on bfqq.
  5389. *
  5390. * In contrast, if the inject limit is not reset during such a
  5391. * long time interval as 100 ms, then the number of short
  5392. * think time samples can grow significantly before the reset
  5393. * is performed. As a consequence, the think time state can
  5394. * become stable before the reset. Therefore there will be no
  5395. * state change when the 100 ms elapse, and no reset of the
  5396. * inject limit. The inject limit remains steadily equal to 1
  5397. * both during and after the 100 ms. So injection can be
  5398. * performed at all times, and throughput gets boosted.
  5399. *
  5400. * An inject limit equal to 1 is however in conflict, in
  5401. * general, with the fact that the think time of bfqq is
  5402. * short, because injection may be likely to delay bfqq's I/O
  5403. * (as explained in the comments in
  5404. * bfq_update_inject_limit()). But this does not happen in
  5405. * this special case, because bfqq's low think time is due to
  5406. * an effective handling of a synchronization, through
  5407. * injection. In this special case, bfqq's I/O does not get
  5408. * delayed by injection; on the contrary, bfqq's I/O is
  5409. * brought forward, because it is not blocked for
  5410. * milliseconds.
  5411. *
  5412. * In addition, serving the blocking I/O much sooner, and much
  5413. * more frequently than once per I/O-plugging timeout, makes
  5414. * it much quicker to detect a waker queue (the concept of
  5415. * waker queue is defined in the comments in
  5416. * bfq_add_request()). This makes it possible to start sooner
  5417. * to boost throughput more effectively, by injecting the I/O
  5418. * of the waker queue unconditionally on every
  5419. * bfq_dispatch_request().
  5420. *
  5421. * One last, important benefit of not resetting the inject
  5422. * limit before 100 ms is that, during this time interval, the
  5423. * base value for the total service time is likely to get
  5424. * finally computed for bfqq, freeing the inject limit from
  5425. * its relation with the think time.
  5426. */
  5427. if (state_changed && bfqq->last_serv_time_ns == 0 &&
  5428. (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
  5429. msecs_to_jiffies(100)) ||
  5430. !has_short_ttime))
  5431. bfq_reset_inject_limit(bfqd, bfqq);
  5432. }
  5433. /*
  5434. * Called when a new fs request (rq) is added to bfqq. Check if there's
  5435. * something we should do about it.
  5436. */
  5437. static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  5438. struct request *rq)
  5439. {
  5440. if (rq->cmd_flags & REQ_META)
  5441. bfqq->meta_pending++;
  5442. bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  5443. if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
  5444. bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
  5445. blk_rq_sectors(rq) < 32;
  5446. bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
  5447. /*
  5448. * There is just this request queued: if
  5449. * - the request is small, and
  5450. * - we are idling to boost throughput, and
  5451. * - the queue is not to be expired,
  5452. * then just exit.
  5453. *
  5454. * In this way, if the device is being idled to wait
  5455. * for a new request from the in-service queue, we
  5456. * avoid unplugging the device and committing the
  5457. * device to serve just a small request. In contrast
  5458. * we wait for the block layer to decide when to
  5459. * unplug the device: hopefully, new requests will be
  5460. * merged to this one quickly, then the device will be
  5461. * unplugged and larger requests will be dispatched.
  5462. */
  5463. if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
  5464. !budget_timeout)
  5465. return;
  5466. /*
  5467. * A large enough request arrived, or idling is being
  5468. * performed to preserve service guarantees, or
  5469. * finally the queue is to be expired: in all these
  5470. * cases disk idling is to be stopped, so clear
  5471. * wait_request flag and reset timer.
  5472. */
  5473. bfq_clear_bfqq_wait_request(bfqq);
  5474. hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
  5475. /*
  5476. * The queue is not empty, because a new request just
  5477. * arrived. Hence we can safely expire the queue, in
  5478. * case of budget timeout, without risking that the
  5479. * timestamps of the queue are not updated correctly.
  5480. * See [1] for more details.
  5481. */
  5482. if (budget_timeout)
  5483. bfq_bfqq_expire(bfqd, bfqq, false,
  5484. BFQQE_BUDGET_TIMEOUT);
  5485. }
  5486. }
  5487. static void bfqq_request_allocated(struct bfq_queue *bfqq)
  5488. {
  5489. struct bfq_entity *entity = &bfqq->entity;
  5490. for_each_entity(entity)
  5491. entity->allocated++;
  5492. }
  5493. static void bfqq_request_freed(struct bfq_queue *bfqq)
  5494. {
  5495. struct bfq_entity *entity = &bfqq->entity;
  5496. for_each_entity(entity)
  5497. entity->allocated--;
  5498. }
  5499. /* returns true if it causes the idle timer to be disabled */
  5500. static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
  5501. {
  5502. struct bfq_queue *bfqq = RQ_BFQQ(rq),
  5503. *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true,
  5504. RQ_BIC(rq));
  5505. bool waiting, idle_timer_disabled = false;
  5506. if (new_bfqq) {
  5507. /*
  5508. * Release the request's reference to the old bfqq
  5509. * and make sure one is taken to the shared queue.
  5510. */
  5511. bfqq_request_allocated(new_bfqq);
  5512. bfqq_request_freed(bfqq);
  5513. new_bfqq->ref++;
  5514. /*
  5515. * If the bic associated with the process
  5516. * issuing this request still points to bfqq
  5517. * (and thus has not been already redirected
  5518. * to new_bfqq or even some other bfq_queue),
  5519. * then complete the merge and redirect it to
  5520. * new_bfqq.
  5521. */
  5522. if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
  5523. bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
  5524. bfqq, new_bfqq);
  5525. bfq_clear_bfqq_just_created(bfqq);
  5526. /*
  5527. * rq is about to be enqueued into new_bfqq,
  5528. * release rq reference on bfqq
  5529. */
  5530. bfq_put_queue(bfqq);
  5531. rq->elv.priv[1] = new_bfqq;
  5532. bfqq = new_bfqq;
  5533. }
  5534. bfq_update_io_thinktime(bfqd, bfqq);
  5535. bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq));
  5536. bfq_update_io_seektime(bfqd, bfqq, rq);
  5537. waiting = bfqq && bfq_bfqq_wait_request(bfqq);
  5538. bfq_add_request(rq);
  5539. idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
  5540. rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
  5541. list_add_tail(&rq->queuelist, &bfqq->fifo);
  5542. bfq_rq_enqueued(bfqd, bfqq, rq);
  5543. return idle_timer_disabled;
  5544. }
  5545. #ifdef CONFIG_BFQ_CGROUP_DEBUG
  5546. static void bfq_update_insert_stats(struct request_queue *q,
  5547. struct bfq_queue *bfqq,
  5548. bool idle_timer_disabled,
  5549. blk_opf_t cmd_flags)
  5550. {
  5551. if (!bfqq)
  5552. return;
  5553. /*
  5554. * bfqq still exists, because it can disappear only after
  5555. * either it is merged with another queue, or the process it
  5556. * is associated with exits. But both actions must be taken by
  5557. * the same process currently executing this flow of
  5558. * instructions.
  5559. *
  5560. * In addition, the following queue lock guarantees that
  5561. * bfqq_group(bfqq) exists as well.
  5562. */
  5563. spin_lock_irq(&q->queue_lock);
  5564. bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
  5565. if (idle_timer_disabled)
  5566. bfqg_stats_update_idle_time(bfqq_group(bfqq));
  5567. spin_unlock_irq(&q->queue_lock);
  5568. }
  5569. #else
  5570. static inline void bfq_update_insert_stats(struct request_queue *q,
  5571. struct bfq_queue *bfqq,
  5572. bool idle_timer_disabled,
  5573. blk_opf_t cmd_flags) {}
  5574. #endif /* CONFIG_BFQ_CGROUP_DEBUG */
  5575. static struct bfq_queue *bfq_init_rq(struct request *rq);
  5576. static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  5577. bool at_head)
  5578. {
  5579. struct request_queue *q = hctx->queue;
  5580. struct bfq_data *bfqd = q->elevator->elevator_data;
  5581. struct bfq_queue *bfqq;
  5582. bool idle_timer_disabled = false;
  5583. blk_opf_t cmd_flags;
  5584. LIST_HEAD(free);
  5585. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  5586. if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
  5587. bfqg_stats_update_legacy_io(q, rq);
  5588. #endif
  5589. spin_lock_irq(&bfqd->lock);
  5590. bfqq = bfq_init_rq(rq);
  5591. if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
  5592. spin_unlock_irq(&bfqd->lock);
  5593. blk_mq_free_requests(&free);
  5594. return;
  5595. }
  5596. trace_block_rq_insert(rq);
  5597. if (!bfqq || at_head) {
  5598. if (at_head)
  5599. list_add(&rq->queuelist, &bfqd->dispatch);
  5600. else
  5601. list_add_tail(&rq->queuelist, &bfqd->dispatch);
  5602. } else {
  5603. idle_timer_disabled = __bfq_insert_request(bfqd, rq);
  5604. /*
  5605. * Update bfqq, because, if a queue merge has occurred
  5606. * in __bfq_insert_request, then rq has been
  5607. * redirected into a new queue.
  5608. */
  5609. bfqq = RQ_BFQQ(rq);
  5610. if (rq_mergeable(rq)) {
  5611. elv_rqhash_add(q, rq);
  5612. if (!q->last_merge)
  5613. q->last_merge = rq;
  5614. }
  5615. }
  5616. /*
  5617. * Cache cmd_flags before releasing scheduler lock, because rq
  5618. * may disappear afterwards (for example, because of a request
  5619. * merge).
  5620. */
  5621. cmd_flags = rq->cmd_flags;
  5622. spin_unlock_irq(&bfqd->lock);
  5623. bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
  5624. cmd_flags);
  5625. }
  5626. static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
  5627. struct list_head *list, bool at_head)
  5628. {
  5629. while (!list_empty(list)) {
  5630. struct request *rq;
  5631. rq = list_first_entry(list, struct request, queuelist);
  5632. list_del_init(&rq->queuelist);
  5633. bfq_insert_request(hctx, rq, at_head);
  5634. }
  5635. }
  5636. static void bfq_update_hw_tag(struct bfq_data *bfqd)
  5637. {
  5638. struct bfq_queue *bfqq = bfqd->in_service_queue;
  5639. bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
  5640. bfqd->rq_in_driver);
  5641. if (bfqd->hw_tag == 1)
  5642. return;
  5643. /*
  5644. * This sample is valid if the number of outstanding requests
  5645. * is large enough to allow a queueing behavior. Note that the
  5646. * sum is not exact, as it's not taking into account deactivated
  5647. * requests.
  5648. */
  5649. if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
  5650. return;
  5651. /*
  5652. * If active queue hasn't enough requests and can idle, bfq might not
  5653. * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  5654. * case
  5655. */
  5656. if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
  5657. bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
  5658. BFQ_HW_QUEUE_THRESHOLD &&
  5659. bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
  5660. return;
  5661. if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
  5662. return;
  5663. bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
  5664. bfqd->max_rq_in_driver = 0;
  5665. bfqd->hw_tag_samples = 0;
  5666. bfqd->nonrot_with_queueing =
  5667. blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
  5668. }
  5669. static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
  5670. {
  5671. u64 now_ns;
  5672. u32 delta_us;
  5673. bfq_update_hw_tag(bfqd);
  5674. bfqd->rq_in_driver--;
  5675. bfqq->dispatched--;
  5676. if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
  5677. /*
  5678. * Set budget_timeout (which we overload to store the
  5679. * time at which the queue remains with no backlog and
  5680. * no outstanding request; used by the weight-raising
  5681. * mechanism).
  5682. */
  5683. bfqq->budget_timeout = jiffies;
  5684. bfq_weights_tree_remove(bfqd, bfqq);
  5685. }
  5686. now_ns = ktime_get_ns();
  5687. bfqq->ttime.last_end_request = now_ns;
  5688. /*
  5689. * Using us instead of ns, to get a reasonable precision in
  5690. * computing rate in next check.
  5691. */
  5692. delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
  5693. /*
  5694. * If the request took rather long to complete, and, according
  5695. * to the maximum request size recorded, this completion latency
  5696. * implies that the request was certainly served at a very low
  5697. * rate (less than 1M sectors/sec), then the whole observation
  5698. * interval that lasts up to this time instant cannot be a
  5699. * valid time interval for computing a new peak rate. Invoke
  5700. * bfq_update_rate_reset to have the following three steps
  5701. * taken:
  5702. * - close the observation interval at the last (previous)
  5703. * request dispatch or completion
  5704. * - compute rate, if possible, for that observation interval
  5705. * - reset to zero samples, which will trigger a proper
  5706. * re-initialization of the observation interval on next
  5707. * dispatch
  5708. */
  5709. if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
  5710. (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
  5711. 1UL<<(BFQ_RATE_SHIFT - 10))
  5712. bfq_update_rate_reset(bfqd, NULL);
  5713. bfqd->last_completion = now_ns;
  5714. /*
  5715. * Shared queues are likely to receive I/O at a high
  5716. * rate. This may deceptively let them be considered as wakers
  5717. * of other queues. But a false waker will unjustly steal
  5718. * bandwidth to its supposedly woken queue. So considering
  5719. * also shared queues in the waking mechanism may cause more
  5720. * control troubles than throughput benefits. Then reset
  5721. * last_completed_rq_bfqq if bfqq is a shared queue.
  5722. */
  5723. if (!bfq_bfqq_coop(bfqq))
  5724. bfqd->last_completed_rq_bfqq = bfqq;
  5725. else
  5726. bfqd->last_completed_rq_bfqq = NULL;
  5727. /*
  5728. * If we are waiting to discover whether the request pattern
  5729. * of the task associated with the queue is actually
  5730. * isochronous, and both requisites for this condition to hold
  5731. * are now satisfied, then compute soft_rt_next_start (see the
  5732. * comments on the function bfq_bfqq_softrt_next_start()). We
  5733. * do not compute soft_rt_next_start if bfqq is in interactive
  5734. * weight raising (see the comments in bfq_bfqq_expire() for
  5735. * an explanation). We schedule this delayed update when bfqq
  5736. * expires, if it still has in-flight requests.
  5737. */
  5738. if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
  5739. RB_EMPTY_ROOT(&bfqq->sort_list) &&
  5740. bfqq->wr_coeff != bfqd->bfq_wr_coeff)
  5741. bfqq->soft_rt_next_start =
  5742. bfq_bfqq_softrt_next_start(bfqd, bfqq);
  5743. /*
  5744. * If this is the in-service queue, check if it needs to be expired,
  5745. * or if we want to idle in case it has no pending requests.
  5746. */
  5747. if (bfqd->in_service_queue == bfqq) {
  5748. if (bfq_bfqq_must_idle(bfqq)) {
  5749. if (bfqq->dispatched == 0)
  5750. bfq_arm_slice_timer(bfqd);
  5751. /*
  5752. * If we get here, we do not expire bfqq, even
  5753. * if bfqq was in budget timeout or had no
  5754. * more requests (as controlled in the next
  5755. * conditional instructions). The reason for
  5756. * not expiring bfqq is as follows.
  5757. *
  5758. * Here bfqq->dispatched > 0 holds, but
  5759. * bfq_bfqq_must_idle() returned true. This
  5760. * implies that, even if no request arrives
  5761. * for bfqq before bfqq->dispatched reaches 0,
  5762. * bfqq will, however, not be expired on the
  5763. * completion event that causes bfqq->dispatch
  5764. * to reach zero. In contrast, on this event,
  5765. * bfqq will start enjoying device idling
  5766. * (I/O-dispatch plugging).
  5767. *
  5768. * But, if we expired bfqq here, bfqq would
  5769. * not have the chance to enjoy device idling
  5770. * when bfqq->dispatched finally reaches
  5771. * zero. This would expose bfqq to violation
  5772. * of its reserved service guarantees.
  5773. */
  5774. return;
  5775. } else if (bfq_may_expire_for_budg_timeout(bfqq))
  5776. bfq_bfqq_expire(bfqd, bfqq, false,
  5777. BFQQE_BUDGET_TIMEOUT);
  5778. else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
  5779. (bfqq->dispatched == 0 ||
  5780. !bfq_better_to_idle(bfqq)))
  5781. bfq_bfqq_expire(bfqd, bfqq, false,
  5782. BFQQE_NO_MORE_REQUESTS);
  5783. }
  5784. if (!bfqd->rq_in_driver)
  5785. bfq_schedule_dispatch(bfqd);
  5786. }
  5787. /*
  5788. * The processes associated with bfqq may happen to generate their
  5789. * cumulative I/O at a lower rate than the rate at which the device
  5790. * could serve the same I/O. This is rather probable, e.g., if only
  5791. * one process is associated with bfqq and the device is an SSD. It
  5792. * results in bfqq becoming often empty while in service. In this
  5793. * respect, if BFQ is allowed to switch to another queue when bfqq
  5794. * remains empty, then the device goes on being fed with I/O requests,
  5795. * and the throughput is not affected. In contrast, if BFQ is not
  5796. * allowed to switch to another queue---because bfqq is sync and
  5797. * I/O-dispatch needs to be plugged while bfqq is temporarily
  5798. * empty---then, during the service of bfqq, there will be frequent
  5799. * "service holes", i.e., time intervals during which bfqq gets empty
  5800. * and the device can only consume the I/O already queued in its
  5801. * hardware queues. During service holes, the device may even get to
  5802. * remaining idle. In the end, during the service of bfqq, the device
  5803. * is driven at a lower speed than the one it can reach with the kind
  5804. * of I/O flowing through bfqq.
  5805. *
  5806. * To counter this loss of throughput, BFQ implements a "request
  5807. * injection mechanism", which tries to fill the above service holes
  5808. * with I/O requests taken from other queues. The hard part in this
  5809. * mechanism is finding the right amount of I/O to inject, so as to
  5810. * both boost throughput and not break bfqq's bandwidth and latency
  5811. * guarantees. In this respect, the mechanism maintains a per-queue
  5812. * inject limit, computed as below. While bfqq is empty, the injection
  5813. * mechanism dispatches extra I/O requests only until the total number
  5814. * of I/O requests in flight---i.e., already dispatched but not yet
  5815. * completed---remains lower than this limit.
  5816. *
  5817. * A first definition comes in handy to introduce the algorithm by
  5818. * which the inject limit is computed. We define as first request for
  5819. * bfqq, an I/O request for bfqq that arrives while bfqq is in
  5820. * service, and causes bfqq to switch from empty to non-empty. The
  5821. * algorithm updates the limit as a function of the effect of
  5822. * injection on the service times of only the first requests of
  5823. * bfqq. The reason for this restriction is that these are the
  5824. * requests whose service time is affected most, because they are the
  5825. * first to arrive after injection possibly occurred.
  5826. *
  5827. * To evaluate the effect of injection, the algorithm measures the
  5828. * "total service time" of first requests. We define as total service
  5829. * time of an I/O request, the time that elapses since when the
  5830. * request is enqueued into bfqq, to when it is completed. This
  5831. * quantity allows the whole effect of injection to be measured. It is
  5832. * easy to see why. Suppose that some requests of other queues are
  5833. * actually injected while bfqq is empty, and that a new request R
  5834. * then arrives for bfqq. If the device does start to serve all or
  5835. * part of the injected requests during the service hole, then,
  5836. * because of this extra service, it may delay the next invocation of
  5837. * the dispatch hook of BFQ. Then, even after R gets eventually
  5838. * dispatched, the device may delay the actual service of R if it is
  5839. * still busy serving the extra requests, or if it decides to serve,
  5840. * before R, some extra request still present in its queues. As a
  5841. * conclusion, the cumulative extra delay caused by injection can be
  5842. * easily evaluated by just comparing the total service time of first
  5843. * requests with and without injection.
  5844. *
  5845. * The limit-update algorithm works as follows. On the arrival of a
  5846. * first request of bfqq, the algorithm measures the total time of the
  5847. * request only if one of the three cases below holds, and, for each
  5848. * case, it updates the limit as described below:
  5849. *
  5850. * (1) If there is no in-flight request. This gives a baseline for the
  5851. * total service time of the requests of bfqq. If the baseline has
  5852. * not been computed yet, then, after computing it, the limit is
  5853. * set to 1, to start boosting throughput, and to prepare the
  5854. * ground for the next case. If the baseline has already been
  5855. * computed, then it is updated, in case it results to be lower
  5856. * than the previous value.
  5857. *
  5858. * (2) If the limit is higher than 0 and there are in-flight
  5859. * requests. By comparing the total service time in this case with
  5860. * the above baseline, it is possible to know at which extent the
  5861. * current value of the limit is inflating the total service
  5862. * time. If the inflation is below a certain threshold, then bfqq
  5863. * is assumed to be suffering from no perceivable loss of its
  5864. * service guarantees, and the limit is even tentatively
  5865. * increased. If the inflation is above the threshold, then the
  5866. * limit is decreased. Due to the lack of any hysteresis, this
  5867. * logic makes the limit oscillate even in steady workload
  5868. * conditions. Yet we opted for it, because it is fast in reaching
  5869. * the best value for the limit, as a function of the current I/O
  5870. * workload. To reduce oscillations, this step is disabled for a
  5871. * short time interval after the limit happens to be decreased.
  5872. *
  5873. * (3) Periodically, after resetting the limit, to make sure that the
  5874. * limit eventually drops in case the workload changes. This is
  5875. * needed because, after the limit has gone safely up for a
  5876. * certain workload, it is impossible to guess whether the
  5877. * baseline total service time may have changed, without measuring
  5878. * it again without injection. A more effective version of this
  5879. * step might be to just sample the baseline, by interrupting
  5880. * injection only once, and then to reset/lower the limit only if
  5881. * the total service time with the current limit does happen to be
  5882. * too large.
  5883. *
  5884. * More details on each step are provided in the comments on the
  5885. * pieces of code that implement these steps: the branch handling the
  5886. * transition from empty to non empty in bfq_add_request(), the branch
  5887. * handling injection in bfq_select_queue(), and the function
  5888. * bfq_choose_bfqq_for_injection(). These comments also explain some
  5889. * exceptions, made by the injection mechanism in some special cases.
  5890. */
  5891. static void bfq_update_inject_limit(struct bfq_data *bfqd,
  5892. struct bfq_queue *bfqq)
  5893. {
  5894. u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
  5895. unsigned int old_limit = bfqq->inject_limit;
  5896. if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
  5897. u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
  5898. if (tot_time_ns >= threshold && old_limit > 0) {
  5899. bfqq->inject_limit--;
  5900. bfqq->decrease_time_jif = jiffies;
  5901. } else if (tot_time_ns < threshold &&
  5902. old_limit <= bfqd->max_rq_in_driver)
  5903. bfqq->inject_limit++;
  5904. }
  5905. /*
  5906. * Either we still have to compute the base value for the
  5907. * total service time, and there seem to be the right
  5908. * conditions to do it, or we can lower the last base value
  5909. * computed.
  5910. *
  5911. * NOTE: (bfqd->rq_in_driver == 1) means that there is no I/O
  5912. * request in flight, because this function is in the code
  5913. * path that handles the completion of a request of bfqq, and,
  5914. * in particular, this function is executed before
  5915. * bfqd->rq_in_driver is decremented in such a code path.
  5916. */
  5917. if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
  5918. tot_time_ns < bfqq->last_serv_time_ns) {
  5919. if (bfqq->last_serv_time_ns == 0) {
  5920. /*
  5921. * Now we certainly have a base value: make sure we
  5922. * start trying injection.
  5923. */
  5924. bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
  5925. }
  5926. bfqq->last_serv_time_ns = tot_time_ns;
  5927. } else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
  5928. /*
  5929. * No I/O injected and no request still in service in
  5930. * the drive: these are the exact conditions for
  5931. * computing the base value of the total service time
  5932. * for bfqq. So let's update this value, because it is
  5933. * rather variable. For example, it varies if the size
  5934. * or the spatial locality of the I/O requests in bfqq
  5935. * change.
  5936. */
  5937. bfqq->last_serv_time_ns = tot_time_ns;
  5938. /* update complete, not waiting for any request completion any longer */
  5939. bfqd->waited_rq = NULL;
  5940. bfqd->rqs_injected = false;
  5941. }
  5942. /*
  5943. * Handle either a requeue or a finish for rq. The things to do are
  5944. * the same in both cases: all references to rq are to be dropped. In
  5945. * particular, rq is considered completed from the point of view of
  5946. * the scheduler.
  5947. */
  5948. static void bfq_finish_requeue_request(struct request *rq)
  5949. {
  5950. struct bfq_queue *bfqq = RQ_BFQQ(rq);
  5951. struct bfq_data *bfqd;
  5952. unsigned long flags;
  5953. /*
  5954. * rq either is not associated with any icq, or is an already
  5955. * requeued request that has not (yet) been re-inserted into
  5956. * a bfq_queue.
  5957. */
  5958. if (!rq->elv.icq || !bfqq)
  5959. return;
  5960. bfqd = bfqq->bfqd;
  5961. if (rq->rq_flags & RQF_STARTED)
  5962. bfqg_stats_update_completion(bfqq_group(bfqq),
  5963. rq->start_time_ns,
  5964. rq->io_start_time_ns,
  5965. rq->cmd_flags);
  5966. spin_lock_irqsave(&bfqd->lock, flags);
  5967. if (likely(rq->rq_flags & RQF_STARTED)) {
  5968. if (rq == bfqd->waited_rq)
  5969. bfq_update_inject_limit(bfqd, bfqq);
  5970. bfq_completed_request(bfqq, bfqd);
  5971. }
  5972. bfqq_request_freed(bfqq);
  5973. bfq_put_queue(bfqq);
  5974. RQ_BIC(rq)->requests--;
  5975. spin_unlock_irqrestore(&bfqd->lock, flags);
  5976. /*
  5977. * Reset private fields. In case of a requeue, this allows
  5978. * this function to correctly do nothing if it is spuriously
  5979. * invoked again on this same request (see the check at the
  5980. * beginning of the function). Probably, a better general
  5981. * design would be to prevent blk-mq from invoking the requeue
  5982. * or finish hooks of an elevator, for a request that is not
  5983. * referred by that elevator.
  5984. *
  5985. * Resetting the following fields would break the
  5986. * request-insertion logic if rq is re-inserted into a bfq
  5987. * internal queue, without a re-preparation. Here we assume
  5988. * that re-insertions of requeued requests, without
  5989. * re-preparation, can happen only for pass_through or at_head
  5990. * requests (which are not re-inserted into bfq internal
  5991. * queues).
  5992. */
  5993. rq->elv.priv[0] = NULL;
  5994. rq->elv.priv[1] = NULL;
  5995. }
  5996. static void bfq_finish_request(struct request *rq)
  5997. {
  5998. bfq_finish_requeue_request(rq);
  5999. if (rq->elv.icq) {
  6000. put_io_context(rq->elv.icq->ioc);
  6001. rq->elv.icq = NULL;
  6002. }
  6003. }
  6004. /*
  6005. * Removes the association between the current task and bfqq, assuming
  6006. * that bic points to the bfq iocontext of the task.
  6007. * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
  6008. * was the last process referring to that bfqq.
  6009. */
  6010. static struct bfq_queue *
  6011. bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
  6012. {
  6013. bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
  6014. if (bfqq_process_refs(bfqq) == 1) {
  6015. bfqq->pid = current->pid;
  6016. bfq_clear_bfqq_coop(bfqq);
  6017. bfq_clear_bfqq_split_coop(bfqq);
  6018. return bfqq;
  6019. }
  6020. bic_set_bfqq(bic, NULL, true);
  6021. bfq_put_cooperator(bfqq);
  6022. bfq_release_process_ref(bfqq->bfqd, bfqq);
  6023. return NULL;
  6024. }
  6025. static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
  6026. struct bfq_io_cq *bic,
  6027. struct bio *bio,
  6028. bool split, bool is_sync,
  6029. bool *new_queue)
  6030. {
  6031. struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
  6032. if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
  6033. return bfqq;
  6034. if (new_queue)
  6035. *new_queue = true;
  6036. if (bfqq)
  6037. bfq_put_queue(bfqq);
  6038. bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split);
  6039. bic_set_bfqq(bic, bfqq, is_sync);
  6040. if (split && is_sync) {
  6041. if ((bic->was_in_burst_list && bfqd->large_burst) ||
  6042. bic->saved_in_large_burst)
  6043. bfq_mark_bfqq_in_large_burst(bfqq);
  6044. else {
  6045. bfq_clear_bfqq_in_large_burst(bfqq);
  6046. if (bic->was_in_burst_list)
  6047. /*
  6048. * If bfqq was in the current
  6049. * burst list before being
  6050. * merged, then we have to add
  6051. * it back. And we do not need
  6052. * to increase burst_size, as
  6053. * we did not decrement
  6054. * burst_size when we removed
  6055. * bfqq from the burst list as
  6056. * a consequence of a merge
  6057. * (see comments in
  6058. * bfq_put_queue). In this
  6059. * respect, it would be rather
  6060. * costly to know whether the
  6061. * current burst list is still
  6062. * the same burst list from
  6063. * which bfqq was removed on
  6064. * the merge. To avoid this
  6065. * cost, if bfqq was in a
  6066. * burst list, then we add
  6067. * bfqq to the current burst
  6068. * list without any further
  6069. * check. This can cause
  6070. * inappropriate insertions,
  6071. * but rarely enough to not
  6072. * harm the detection of large
  6073. * bursts significantly.
  6074. */
  6075. hlist_add_head(&bfqq->burst_list_node,
  6076. &bfqd->burst_list);
  6077. }
  6078. bfqq->split_time = jiffies;
  6079. }
  6080. return bfqq;
  6081. }
  6082. /*
  6083. * Only reset private fields. The actual request preparation will be
  6084. * performed by bfq_init_rq, when rq is either inserted or merged. See
  6085. * comments on bfq_init_rq for the reason behind this delayed
  6086. * preparation.
  6087. */
  6088. static void bfq_prepare_request(struct request *rq)
  6089. {
  6090. rq->elv.icq = ioc_find_get_icq(rq->q);
  6091. /*
  6092. * Regardless of whether we have an icq attached, we have to
  6093. * clear the scheduler pointers, as they might point to
  6094. * previously allocated bic/bfqq structs.
  6095. */
  6096. rq->elv.priv[0] = rq->elv.priv[1] = NULL;
  6097. }
  6098. /*
  6099. * If needed, init rq, allocate bfq data structures associated with
  6100. * rq, and increment reference counters in the destination bfq_queue
  6101. * for rq. Return the destination bfq_queue for rq, or NULL is rq is
  6102. * not associated with any bfq_queue.
  6103. *
  6104. * This function is invoked by the functions that perform rq insertion
  6105. * or merging. One may have expected the above preparation operations
  6106. * to be performed in bfq_prepare_request, and not delayed to when rq
  6107. * is inserted or merged. The rationale behind this delayed
  6108. * preparation is that, after the prepare_request hook is invoked for
  6109. * rq, rq may still be transformed into a request with no icq, i.e., a
  6110. * request not associated with any queue. No bfq hook is invoked to
  6111. * signal this transformation. As a consequence, should these
  6112. * preparation operations be performed when the prepare_request hook
  6113. * is invoked, and should rq be transformed one moment later, bfq
  6114. * would end up in an inconsistent state, because it would have
  6115. * incremented some queue counters for an rq destined to
  6116. * transformation, without any chance to correctly lower these
  6117. * counters back. In contrast, no transformation can still happen for
  6118. * rq after rq has been inserted or merged. So, it is safe to execute
  6119. * these preparation operations when rq is finally inserted or merged.
  6120. */
  6121. static struct bfq_queue *bfq_init_rq(struct request *rq)
  6122. {
  6123. struct request_queue *q = rq->q;
  6124. struct bio *bio = rq->bio;
  6125. struct bfq_data *bfqd = q->elevator->elevator_data;
  6126. struct bfq_io_cq *bic;
  6127. const int is_sync = rq_is_sync(rq);
  6128. struct bfq_queue *bfqq;
  6129. bool new_queue = false;
  6130. bool bfqq_already_existing = false, split = false;
  6131. if (unlikely(!rq->elv.icq))
  6132. return NULL;
  6133. /*
  6134. * Assuming that elv.priv[1] is set only if everything is set
  6135. * for this rq. This holds true, because this function is
  6136. * invoked only for insertion or merging, and, after such
  6137. * events, a request cannot be manipulated any longer before
  6138. * being removed from bfq.
  6139. */
  6140. if (rq->elv.priv[1])
  6141. return rq->elv.priv[1];
  6142. bic = icq_to_bic(rq->elv.icq);
  6143. bfq_check_ioprio_change(bic, bio);
  6144. bfq_bic_update_cgroup(bic, bio);
  6145. bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
  6146. &new_queue);
  6147. if (likely(!new_queue)) {
  6148. /* If the queue was seeky for too long, break it apart. */
  6149. if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) &&
  6150. !bic->stably_merged) {
  6151. struct bfq_queue *old_bfqq = bfqq;
  6152. /* Update bic before losing reference to bfqq */
  6153. if (bfq_bfqq_in_large_burst(bfqq))
  6154. bic->saved_in_large_burst = true;
  6155. bfqq = bfq_split_bfqq(bic, bfqq);
  6156. split = true;
  6157. if (!bfqq) {
  6158. bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
  6159. true, is_sync,
  6160. NULL);
  6161. if (unlikely(bfqq == &bfqd->oom_bfqq))
  6162. bfqq_already_existing = true;
  6163. } else
  6164. bfqq_already_existing = true;
  6165. if (!bfqq_already_existing) {
  6166. bfqq->waker_bfqq = old_bfqq->waker_bfqq;
  6167. bfqq->tentative_waker_bfqq = NULL;
  6168. /*
  6169. * If the waker queue disappears, then
  6170. * new_bfqq->waker_bfqq must be
  6171. * reset. So insert new_bfqq into the
  6172. * woken_list of the waker. See
  6173. * bfq_check_waker for details.
  6174. */
  6175. if (bfqq->waker_bfqq)
  6176. hlist_add_head(&bfqq->woken_list_node,
  6177. &bfqq->waker_bfqq->woken_list);
  6178. }
  6179. }
  6180. }
  6181. bfqq_request_allocated(bfqq);
  6182. bfqq->ref++;
  6183. bic->requests++;
  6184. bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
  6185. rq, bfqq, bfqq->ref);
  6186. rq->elv.priv[0] = bic;
  6187. rq->elv.priv[1] = bfqq;
  6188. /*
  6189. * If a bfq_queue has only one process reference, it is owned
  6190. * by only this bic: we can then set bfqq->bic = bic. in
  6191. * addition, if the queue has also just been split, we have to
  6192. * resume its state.
  6193. */
  6194. if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
  6195. bfqq->bic = bic;
  6196. if (split) {
  6197. /*
  6198. * The queue has just been split from a shared
  6199. * queue: restore the idle window and the
  6200. * possible weight raising period.
  6201. */
  6202. bfq_bfqq_resume_state(bfqq, bfqd, bic,
  6203. bfqq_already_existing);
  6204. }
  6205. }
  6206. /*
  6207. * Consider bfqq as possibly belonging to a burst of newly
  6208. * created queues only if:
  6209. * 1) A burst is actually happening (bfqd->burst_size > 0)
  6210. * or
  6211. * 2) There is no other active queue. In fact, if, in
  6212. * contrast, there are active queues not belonging to the
  6213. * possible burst bfqq may belong to, then there is no gain
  6214. * in considering bfqq as belonging to a burst, and
  6215. * therefore in not weight-raising bfqq. See comments on
  6216. * bfq_handle_burst().
  6217. *
  6218. * This filtering also helps eliminating false positives,
  6219. * occurring when bfqq does not belong to an actual large
  6220. * burst, but some background task (e.g., a service) happens
  6221. * to trigger the creation of new queues very close to when
  6222. * bfqq and its possible companion queues are created. See
  6223. * comments on bfq_handle_burst() for further details also on
  6224. * this issue.
  6225. */
  6226. if (unlikely(bfq_bfqq_just_created(bfqq) &&
  6227. (bfqd->burst_size > 0 ||
  6228. bfq_tot_busy_queues(bfqd) == 0)))
  6229. bfq_handle_burst(bfqd, bfqq);
  6230. return bfqq;
  6231. }
  6232. static void
  6233. bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  6234. {
  6235. enum bfqq_expiration reason;
  6236. unsigned long flags;
  6237. spin_lock_irqsave(&bfqd->lock, flags);
  6238. /*
  6239. * Considering that bfqq may be in race, we should firstly check
  6240. * whether bfqq is in service before doing something on it. If
  6241. * the bfqq in race is not in service, it has already been expired
  6242. * through __bfq_bfqq_expire func and its wait_request flags has
  6243. * been cleared in __bfq_bfqd_reset_in_service func.
  6244. */
  6245. if (bfqq != bfqd->in_service_queue) {
  6246. spin_unlock_irqrestore(&bfqd->lock, flags);
  6247. return;
  6248. }
  6249. bfq_clear_bfqq_wait_request(bfqq);
  6250. if (bfq_bfqq_budget_timeout(bfqq))
  6251. /*
  6252. * Also here the queue can be safely expired
  6253. * for budget timeout without wasting
  6254. * guarantees
  6255. */
  6256. reason = BFQQE_BUDGET_TIMEOUT;
  6257. else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
  6258. /*
  6259. * The queue may not be empty upon timer expiration,
  6260. * because we may not disable the timer when the
  6261. * first request of the in-service queue arrives
  6262. * during disk idling.
  6263. */
  6264. reason = BFQQE_TOO_IDLE;
  6265. else
  6266. goto schedule_dispatch;
  6267. bfq_bfqq_expire(bfqd, bfqq, true, reason);
  6268. schedule_dispatch:
  6269. bfq_schedule_dispatch(bfqd);
  6270. spin_unlock_irqrestore(&bfqd->lock, flags);
  6271. }
  6272. /*
  6273. * Handler of the expiration of the timer running if the in-service queue
  6274. * is idling inside its time slice.
  6275. */
  6276. static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
  6277. {
  6278. struct bfq_data *bfqd = container_of(timer, struct bfq_data,
  6279. idle_slice_timer);
  6280. struct bfq_queue *bfqq = bfqd->in_service_queue;
  6281. /*
  6282. * Theoretical race here: the in-service queue can be NULL or
  6283. * different from the queue that was idling if a new request
  6284. * arrives for the current queue and there is a full dispatch
  6285. * cycle that changes the in-service queue. This can hardly
  6286. * happen, but in the worst case we just expire a queue too
  6287. * early.
  6288. */
  6289. if (bfqq)
  6290. bfq_idle_slice_timer_body(bfqd, bfqq);
  6291. return HRTIMER_NORESTART;
  6292. }
  6293. static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
  6294. struct bfq_queue **bfqq_ptr)
  6295. {
  6296. struct bfq_queue *bfqq = *bfqq_ptr;
  6297. bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
  6298. if (bfqq) {
  6299. bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
  6300. bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
  6301. bfqq, bfqq->ref);
  6302. bfq_put_queue(bfqq);
  6303. *bfqq_ptr = NULL;
  6304. }
  6305. }
  6306. /*
  6307. * Release all the bfqg references to its async queues. If we are
  6308. * deallocating the group these queues may still contain requests, so
  6309. * we reparent them to the root cgroup (i.e., the only one that will
  6310. * exist for sure until all the requests on a device are gone).
  6311. */
  6312. void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
  6313. {
  6314. int i, j;
  6315. for (i = 0; i < 2; i++)
  6316. for (j = 0; j < IOPRIO_NR_LEVELS; j++)
  6317. __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
  6318. __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
  6319. }
  6320. /*
  6321. * See the comments on bfq_limit_depth for the purpose of
  6322. * the depths set in the function. Return minimum shallow depth we'll use.
  6323. */
  6324. static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
  6325. {
  6326. unsigned int depth = 1U << bt->sb.shift;
  6327. bfqd->full_depth_shift = bt->sb.shift;
  6328. /*
  6329. * In-word depths if no bfq_queue is being weight-raised:
  6330. * leaving 25% of tags only for sync reads.
  6331. *
  6332. * In next formulas, right-shift the value
  6333. * (1U<<bt->sb.shift), instead of computing directly
  6334. * (1U<<(bt->sb.shift - something)), to be robust against
  6335. * any possible value of bt->sb.shift, without having to
  6336. * limit 'something'.
  6337. */
  6338. /* no more than 50% of tags for async I/O */
  6339. bfqd->word_depths[0][0] = max(depth >> 1, 1U);
  6340. /*
  6341. * no more than 75% of tags for sync writes (25% extra tags
  6342. * w.r.t. async I/O, to prevent async I/O from starving sync
  6343. * writes)
  6344. */
  6345. bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
  6346. /*
  6347. * In-word depths in case some bfq_queue is being weight-
  6348. * raised: leaving ~63% of tags for sync reads. This is the
  6349. * highest percentage for which, in our tests, application
  6350. * start-up times didn't suffer from any regression due to tag
  6351. * shortage.
  6352. */
  6353. /* no more than ~18% of tags for async I/O */
  6354. bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
  6355. /* no more than ~37% of tags for sync writes (~20% extra tags) */
  6356. bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
  6357. }
  6358. static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
  6359. {
  6360. struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
  6361. struct blk_mq_tags *tags = hctx->sched_tags;
  6362. bfq_update_depths(bfqd, &tags->bitmap_tags);
  6363. sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
  6364. }
  6365. static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
  6366. {
  6367. bfq_depth_updated(hctx);
  6368. return 0;
  6369. }
  6370. static void bfq_exit_queue(struct elevator_queue *e)
  6371. {
  6372. struct bfq_data *bfqd = e->elevator_data;
  6373. struct bfq_queue *bfqq, *n;
  6374. hrtimer_cancel(&bfqd->idle_slice_timer);
  6375. spin_lock_irq(&bfqd->lock);
  6376. list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
  6377. bfq_deactivate_bfqq(bfqd, bfqq, false, false);
  6378. spin_unlock_irq(&bfqd->lock);
  6379. hrtimer_cancel(&bfqd->idle_slice_timer);
  6380. /* release oom-queue reference to root group */
  6381. bfqg_and_blkg_put(bfqd->root_group);
  6382. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6383. blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
  6384. #else
  6385. spin_lock_irq(&bfqd->lock);
  6386. bfq_put_async_queues(bfqd, bfqd->root_group);
  6387. kfree(bfqd->root_group);
  6388. spin_unlock_irq(&bfqd->lock);
  6389. #endif
  6390. blk_stat_disable_accounting(bfqd->queue);
  6391. wbt_enable_default(bfqd->queue);
  6392. kfree(bfqd);
  6393. }
  6394. static void bfq_init_root_group(struct bfq_group *root_group,
  6395. struct bfq_data *bfqd)
  6396. {
  6397. int i;
  6398. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6399. root_group->entity.parent = NULL;
  6400. root_group->my_entity = NULL;
  6401. root_group->bfqd = bfqd;
  6402. #endif
  6403. root_group->rq_pos_tree = RB_ROOT;
  6404. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  6405. root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  6406. root_group->sched_data.bfq_class_idle_last_service = jiffies;
  6407. }
  6408. static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
  6409. {
  6410. struct bfq_data *bfqd;
  6411. struct elevator_queue *eq;
  6412. eq = elevator_alloc(q, e);
  6413. if (!eq)
  6414. return -ENOMEM;
  6415. bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
  6416. if (!bfqd) {
  6417. kobject_put(&eq->kobj);
  6418. return -ENOMEM;
  6419. }
  6420. eq->elevator_data = bfqd;
  6421. spin_lock_irq(&q->queue_lock);
  6422. q->elevator = eq;
  6423. spin_unlock_irq(&q->queue_lock);
  6424. /*
  6425. * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
  6426. * Grab a permanent reference to it, so that the normal code flow
  6427. * will not attempt to free it.
  6428. */
  6429. bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
  6430. bfqd->oom_bfqq.ref++;
  6431. bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
  6432. bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
  6433. bfqd->oom_bfqq.entity.new_weight =
  6434. bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
  6435. /* oom_bfqq does not participate to bursts */
  6436. bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
  6437. /*
  6438. * Trigger weight initialization, according to ioprio, at the
  6439. * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
  6440. * class won't be changed any more.
  6441. */
  6442. bfqd->oom_bfqq.entity.prio_changed = 1;
  6443. bfqd->queue = q;
  6444. INIT_LIST_HEAD(&bfqd->dispatch);
  6445. hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
  6446. HRTIMER_MODE_REL);
  6447. bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
  6448. bfqd->queue_weights_tree = RB_ROOT_CACHED;
  6449. bfqd->num_groups_with_pending_reqs = 0;
  6450. INIT_LIST_HEAD(&bfqd->active_list);
  6451. INIT_LIST_HEAD(&bfqd->idle_list);
  6452. INIT_HLIST_HEAD(&bfqd->burst_list);
  6453. bfqd->hw_tag = -1;
  6454. bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
  6455. bfqd->bfq_max_budget = bfq_default_max_budget;
  6456. bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
  6457. bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
  6458. bfqd->bfq_back_max = bfq_back_max;
  6459. bfqd->bfq_back_penalty = bfq_back_penalty;
  6460. bfqd->bfq_slice_idle = bfq_slice_idle;
  6461. bfqd->bfq_timeout = bfq_timeout;
  6462. bfqd->bfq_large_burst_thresh = 8;
  6463. bfqd->bfq_burst_interval = msecs_to_jiffies(180);
  6464. bfqd->low_latency = true;
  6465. /*
  6466. * Trade-off between responsiveness and fairness.
  6467. */
  6468. bfqd->bfq_wr_coeff = 30;
  6469. bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
  6470. bfqd->bfq_wr_max_time = 0;
  6471. bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
  6472. bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
  6473. bfqd->bfq_wr_max_softrt_rate = 7000; /*
  6474. * Approximate rate required
  6475. * to playback or record a
  6476. * high-definition compressed
  6477. * video.
  6478. */
  6479. bfqd->wr_busy_queues = 0;
  6480. /*
  6481. * Begin by assuming, optimistically, that the device peak
  6482. * rate is equal to 2/3 of the highest reference rate.
  6483. */
  6484. bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] *
  6485. ref_wr_duration[blk_queue_nonrot(bfqd->queue)];
  6486. bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
  6487. spin_lock_init(&bfqd->lock);
  6488. /*
  6489. * The invocation of the next bfq_create_group_hierarchy
  6490. * function is the head of a chain of function calls
  6491. * (bfq_create_group_hierarchy->blkcg_activate_policy->
  6492. * blk_mq_freeze_queue) that may lead to the invocation of the
  6493. * has_work hook function. For this reason,
  6494. * bfq_create_group_hierarchy is invoked only after all
  6495. * scheduler data has been initialized, apart from the fields
  6496. * that can be initialized only after invoking
  6497. * bfq_create_group_hierarchy. This, in particular, enables
  6498. * has_work to correctly return false. Of course, to avoid
  6499. * other inconsistencies, the blk-mq stack must then refrain
  6500. * from invoking further scheduler hooks before this init
  6501. * function is finished.
  6502. */
  6503. bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
  6504. if (!bfqd->root_group)
  6505. goto out_free;
  6506. bfq_init_root_group(bfqd->root_group, bfqd);
  6507. bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
  6508. /* We dispatch from request queue wide instead of hw queue */
  6509. blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
  6510. wbt_disable_default(q);
  6511. blk_stat_enable_accounting(q);
  6512. return 0;
  6513. out_free:
  6514. kfree(bfqd);
  6515. kobject_put(&eq->kobj);
  6516. return -ENOMEM;
  6517. }
  6518. static void bfq_slab_kill(void)
  6519. {
  6520. kmem_cache_destroy(bfq_pool);
  6521. }
  6522. static int __init bfq_slab_setup(void)
  6523. {
  6524. bfq_pool = KMEM_CACHE(bfq_queue, 0);
  6525. if (!bfq_pool)
  6526. return -ENOMEM;
  6527. return 0;
  6528. }
  6529. static ssize_t bfq_var_show(unsigned int var, char *page)
  6530. {
  6531. return sprintf(page, "%u\n", var);
  6532. }
  6533. static int bfq_var_store(unsigned long *var, const char *page)
  6534. {
  6535. unsigned long new_val;
  6536. int ret = kstrtoul(page, 10, &new_val);
  6537. if (ret)
  6538. return ret;
  6539. *var = new_val;
  6540. return 0;
  6541. }
  6542. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  6543. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  6544. { \
  6545. struct bfq_data *bfqd = e->elevator_data; \
  6546. u64 __data = __VAR; \
  6547. if (__CONV == 1) \
  6548. __data = jiffies_to_msecs(__data); \
  6549. else if (__CONV == 2) \
  6550. __data = div_u64(__data, NSEC_PER_MSEC); \
  6551. return bfq_var_show(__data, (page)); \
  6552. }
  6553. SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
  6554. SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
  6555. SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
  6556. SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
  6557. SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
  6558. SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
  6559. SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
  6560. SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
  6561. SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
  6562. #undef SHOW_FUNCTION
  6563. #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
  6564. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  6565. { \
  6566. struct bfq_data *bfqd = e->elevator_data; \
  6567. u64 __data = __VAR; \
  6568. __data = div_u64(__data, NSEC_PER_USEC); \
  6569. return bfq_var_show(__data, (page)); \
  6570. }
  6571. USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
  6572. #undef USEC_SHOW_FUNCTION
  6573. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  6574. static ssize_t \
  6575. __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  6576. { \
  6577. struct bfq_data *bfqd = e->elevator_data; \
  6578. unsigned long __data, __min = (MIN), __max = (MAX); \
  6579. int ret; \
  6580. \
  6581. ret = bfq_var_store(&__data, (page)); \
  6582. if (ret) \
  6583. return ret; \
  6584. if (__data < __min) \
  6585. __data = __min; \
  6586. else if (__data > __max) \
  6587. __data = __max; \
  6588. if (__CONV == 1) \
  6589. *(__PTR) = msecs_to_jiffies(__data); \
  6590. else if (__CONV == 2) \
  6591. *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
  6592. else \
  6593. *(__PTR) = __data; \
  6594. return count; \
  6595. }
  6596. STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
  6597. INT_MAX, 2);
  6598. STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
  6599. INT_MAX, 2);
  6600. STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
  6601. STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
  6602. INT_MAX, 0);
  6603. STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
  6604. #undef STORE_FUNCTION
  6605. #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
  6606. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
  6607. { \
  6608. struct bfq_data *bfqd = e->elevator_data; \
  6609. unsigned long __data, __min = (MIN), __max = (MAX); \
  6610. int ret; \
  6611. \
  6612. ret = bfq_var_store(&__data, (page)); \
  6613. if (ret) \
  6614. return ret; \
  6615. if (__data < __min) \
  6616. __data = __min; \
  6617. else if (__data > __max) \
  6618. __data = __max; \
  6619. *(__PTR) = (u64)__data * NSEC_PER_USEC; \
  6620. return count; \
  6621. }
  6622. USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
  6623. UINT_MAX);
  6624. #undef USEC_STORE_FUNCTION
  6625. static ssize_t bfq_max_budget_store(struct elevator_queue *e,
  6626. const char *page, size_t count)
  6627. {
  6628. struct bfq_data *bfqd = e->elevator_data;
  6629. unsigned long __data;
  6630. int ret;
  6631. ret = bfq_var_store(&__data, (page));
  6632. if (ret)
  6633. return ret;
  6634. if (__data == 0)
  6635. bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
  6636. else {
  6637. if (__data > INT_MAX)
  6638. __data = INT_MAX;
  6639. bfqd->bfq_max_budget = __data;
  6640. }
  6641. bfqd->bfq_user_max_budget = __data;
  6642. return count;
  6643. }
  6644. /*
  6645. * Leaving this name to preserve name compatibility with cfq
  6646. * parameters, but this timeout is used for both sync and async.
  6647. */
  6648. static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
  6649. const char *page, size_t count)
  6650. {
  6651. struct bfq_data *bfqd = e->elevator_data;
  6652. unsigned long __data;
  6653. int ret;
  6654. ret = bfq_var_store(&__data, (page));
  6655. if (ret)
  6656. return ret;
  6657. if (__data < 1)
  6658. __data = 1;
  6659. else if (__data > INT_MAX)
  6660. __data = INT_MAX;
  6661. bfqd->bfq_timeout = msecs_to_jiffies(__data);
  6662. if (bfqd->bfq_user_max_budget == 0)
  6663. bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
  6664. return count;
  6665. }
  6666. static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
  6667. const char *page, size_t count)
  6668. {
  6669. struct bfq_data *bfqd = e->elevator_data;
  6670. unsigned long __data;
  6671. int ret;
  6672. ret = bfq_var_store(&__data, (page));
  6673. if (ret)
  6674. return ret;
  6675. if (__data > 1)
  6676. __data = 1;
  6677. if (!bfqd->strict_guarantees && __data == 1
  6678. && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
  6679. bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
  6680. bfqd->strict_guarantees = __data;
  6681. return count;
  6682. }
  6683. static ssize_t bfq_low_latency_store(struct elevator_queue *e,
  6684. const char *page, size_t count)
  6685. {
  6686. struct bfq_data *bfqd = e->elevator_data;
  6687. unsigned long __data;
  6688. int ret;
  6689. ret = bfq_var_store(&__data, (page));
  6690. if (ret)
  6691. return ret;
  6692. if (__data > 1)
  6693. __data = 1;
  6694. if (__data == 0 && bfqd->low_latency != 0)
  6695. bfq_end_wr(bfqd);
  6696. bfqd->low_latency = __data;
  6697. return count;
  6698. }
  6699. #define BFQ_ATTR(name) \
  6700. __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
  6701. static struct elv_fs_entry bfq_attrs[] = {
  6702. BFQ_ATTR(fifo_expire_sync),
  6703. BFQ_ATTR(fifo_expire_async),
  6704. BFQ_ATTR(back_seek_max),
  6705. BFQ_ATTR(back_seek_penalty),
  6706. BFQ_ATTR(slice_idle),
  6707. BFQ_ATTR(slice_idle_us),
  6708. BFQ_ATTR(max_budget),
  6709. BFQ_ATTR(timeout_sync),
  6710. BFQ_ATTR(strict_guarantees),
  6711. BFQ_ATTR(low_latency),
  6712. __ATTR_NULL
  6713. };
  6714. static struct elevator_type iosched_bfq_mq = {
  6715. .ops = {
  6716. .limit_depth = bfq_limit_depth,
  6717. .prepare_request = bfq_prepare_request,
  6718. .requeue_request = bfq_finish_requeue_request,
  6719. .finish_request = bfq_finish_request,
  6720. .exit_icq = bfq_exit_icq,
  6721. .insert_requests = bfq_insert_requests,
  6722. .dispatch_request = bfq_dispatch_request,
  6723. .next_request = elv_rb_latter_request,
  6724. .former_request = elv_rb_former_request,
  6725. .allow_merge = bfq_allow_bio_merge,
  6726. .bio_merge = bfq_bio_merge,
  6727. .request_merge = bfq_request_merge,
  6728. .requests_merged = bfq_requests_merged,
  6729. .request_merged = bfq_request_merged,
  6730. .has_work = bfq_has_work,
  6731. .depth_updated = bfq_depth_updated,
  6732. .init_hctx = bfq_init_hctx,
  6733. .init_sched = bfq_init_queue,
  6734. .exit_sched = bfq_exit_queue,
  6735. },
  6736. .icq_size = sizeof(struct bfq_io_cq),
  6737. .icq_align = __alignof__(struct bfq_io_cq),
  6738. .elevator_attrs = bfq_attrs,
  6739. .elevator_name = "bfq",
  6740. .elevator_owner = THIS_MODULE,
  6741. };
  6742. MODULE_ALIAS("bfq-iosched");
  6743. static int __init bfq_init(void)
  6744. {
  6745. int ret;
  6746. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6747. ret = blkcg_policy_register(&blkcg_policy_bfq);
  6748. if (ret)
  6749. return ret;
  6750. #endif
  6751. ret = -ENOMEM;
  6752. if (bfq_slab_setup())
  6753. goto err_pol_unreg;
  6754. /*
  6755. * Times to load large popular applications for the typical
  6756. * systems installed on the reference devices (see the
  6757. * comments before the definition of the next
  6758. * array). Actually, we use slightly lower values, as the
  6759. * estimated peak rate tends to be smaller than the actual
  6760. * peak rate. The reason for this last fact is that estimates
  6761. * are computed over much shorter time intervals than the long
  6762. * intervals typically used for benchmarking. Why? First, to
  6763. * adapt more quickly to variations. Second, because an I/O
  6764. * scheduler cannot rely on a peak-rate-evaluation workload to
  6765. * be run for a long time.
  6766. */
  6767. ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */
  6768. ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */
  6769. ret = elv_register(&iosched_bfq_mq);
  6770. if (ret)
  6771. goto slab_kill;
  6772. return 0;
  6773. slab_kill:
  6774. bfq_slab_kill();
  6775. err_pol_unreg:
  6776. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6777. blkcg_policy_unregister(&blkcg_policy_bfq);
  6778. #endif
  6779. return ret;
  6780. }
  6781. static void __exit bfq_exit(void)
  6782. {
  6783. elv_unregister(&iosched_bfq_mq);
  6784. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  6785. blkcg_policy_unregister(&blkcg_policy_bfq);
  6786. #endif
  6787. bfq_slab_kill();
  6788. }
  6789. module_init(bfq_init);
  6790. module_exit(bfq_exit);
  6791. MODULE_AUTHOR("Paolo Valente");
  6792. MODULE_LICENSE("GPL");
  6793. MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");