nand_base.c 168 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Overview:
  4. * This is the generic MTD driver for NAND flash devices. It should be
  5. * capable of working with almost all NAND chips currently available.
  6. *
  7. * Additional technical information is available on
  8. * http://www.linux-mtd.infradead.org/doc/nand.html
  9. *
  10. * Copyright (C) 2000 Steven J. Hill ([email protected])
  11. * 2002-2006 Thomas Gleixner ([email protected])
  12. *
  13. * Credits:
  14. * David Woodhouse for adding multichip support
  15. *
  16. * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
  17. * rework for 2K page size chips
  18. *
  19. * TODO:
  20. * Enable cached programming for 2k page size chips
  21. * Check, if mtd->ecctype should be set to MTD_ECC_HW
  22. * if we have HW ECC support.
  23. * BBT table is not serialized, has to be fixed
  24. */
  25. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  26. #include <linux/module.h>
  27. #include <linux/delay.h>
  28. #include <linux/errno.h>
  29. #include <linux/err.h>
  30. #include <linux/sched.h>
  31. #include <linux/slab.h>
  32. #include <linux/mm.h>
  33. #include <linux/types.h>
  34. #include <linux/mtd/mtd.h>
  35. #include <linux/mtd/nand.h>
  36. #include <linux/mtd/nand-ecc-sw-hamming.h>
  37. #include <linux/mtd/nand-ecc-sw-bch.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/bitops.h>
  40. #include <linux/io.h>
  41. #include <linux/mtd/partitions.h>
  42. #include <linux/of.h>
  43. #include <linux/of_gpio.h>
  44. #include <linux/gpio/consumer.h>
  45. #include "internals.h"
  46. static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
  47. struct mtd_pairing_info *info)
  48. {
  49. int lastpage = (mtd->erasesize / mtd->writesize) - 1;
  50. int dist = 3;
  51. if (page == lastpage)
  52. dist = 2;
  53. if (!page || (page & 1)) {
  54. info->group = 0;
  55. info->pair = (page + 1) / 2;
  56. } else {
  57. info->group = 1;
  58. info->pair = (page + 1 - dist) / 2;
  59. }
  60. return 0;
  61. }
  62. static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
  63. const struct mtd_pairing_info *info)
  64. {
  65. int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
  66. int page = info->pair * 2;
  67. int dist = 3;
  68. if (!info->group && !info->pair)
  69. return 0;
  70. if (info->pair == lastpair && info->group)
  71. dist = 2;
  72. if (!info->group)
  73. page--;
  74. else if (info->pair)
  75. page += dist - 1;
  76. if (page >= mtd->erasesize / mtd->writesize)
  77. return -EINVAL;
  78. return page;
  79. }
  80. const struct mtd_pairing_scheme dist3_pairing_scheme = {
  81. .ngroups = 2,
  82. .get_info = nand_pairing_dist3_get_info,
  83. .get_wunit = nand_pairing_dist3_get_wunit,
  84. };
  85. static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
  86. {
  87. int ret = 0;
  88. /* Start address must align on block boundary */
  89. if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
  90. pr_debug("%s: unaligned address\n", __func__);
  91. ret = -EINVAL;
  92. }
  93. /* Length must align on block boundary */
  94. if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
  95. pr_debug("%s: length not block aligned\n", __func__);
  96. ret = -EINVAL;
  97. }
  98. return ret;
  99. }
  100. /**
  101. * nand_extract_bits - Copy unaligned bits from one buffer to another one
  102. * @dst: destination buffer
  103. * @dst_off: bit offset at which the writing starts
  104. * @src: source buffer
  105. * @src_off: bit offset at which the reading starts
  106. * @nbits: number of bits to copy from @src to @dst
  107. *
  108. * Copy bits from one memory region to another (overlap authorized).
  109. */
  110. void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
  111. unsigned int src_off, unsigned int nbits)
  112. {
  113. unsigned int tmp, n;
  114. dst += dst_off / 8;
  115. dst_off %= 8;
  116. src += src_off / 8;
  117. src_off %= 8;
  118. while (nbits) {
  119. n = min3(8 - dst_off, 8 - src_off, nbits);
  120. tmp = (*src >> src_off) & GENMASK(n - 1, 0);
  121. *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
  122. *dst |= tmp << dst_off;
  123. dst_off += n;
  124. if (dst_off >= 8) {
  125. dst++;
  126. dst_off -= 8;
  127. }
  128. src_off += n;
  129. if (src_off >= 8) {
  130. src++;
  131. src_off -= 8;
  132. }
  133. nbits -= n;
  134. }
  135. }
  136. EXPORT_SYMBOL_GPL(nand_extract_bits);
  137. /**
  138. * nand_select_target() - Select a NAND target (A.K.A. die)
  139. * @chip: NAND chip object
  140. * @cs: the CS line to select. Note that this CS id is always from the chip
  141. * PoV, not the controller one
  142. *
  143. * Select a NAND target so that further operations executed on @chip go to the
  144. * selected NAND target.
  145. */
  146. void nand_select_target(struct nand_chip *chip, unsigned int cs)
  147. {
  148. /*
  149. * cs should always lie between 0 and nanddev_ntargets(), when that's
  150. * not the case it's a bug and the caller should be fixed.
  151. */
  152. if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
  153. return;
  154. chip->cur_cs = cs;
  155. if (chip->legacy.select_chip)
  156. chip->legacy.select_chip(chip, cs);
  157. }
  158. EXPORT_SYMBOL_GPL(nand_select_target);
  159. /**
  160. * nand_deselect_target() - Deselect the currently selected target
  161. * @chip: NAND chip object
  162. *
  163. * Deselect the currently selected NAND target. The result of operations
  164. * executed on @chip after the target has been deselected is undefined.
  165. */
  166. void nand_deselect_target(struct nand_chip *chip)
  167. {
  168. if (chip->legacy.select_chip)
  169. chip->legacy.select_chip(chip, -1);
  170. chip->cur_cs = -1;
  171. }
  172. EXPORT_SYMBOL_GPL(nand_deselect_target);
  173. /**
  174. * nand_release_device - [GENERIC] release chip
  175. * @chip: NAND chip object
  176. *
  177. * Release chip lock and wake up anyone waiting on the device.
  178. */
  179. static void nand_release_device(struct nand_chip *chip)
  180. {
  181. /* Release the controller and the chip */
  182. mutex_unlock(&chip->controller->lock);
  183. mutex_unlock(&chip->lock);
  184. }
  185. /**
  186. * nand_bbm_get_next_page - Get the next page for bad block markers
  187. * @chip: NAND chip object
  188. * @page: First page to start checking for bad block marker usage
  189. *
  190. * Returns an integer that corresponds to the page offset within a block, for
  191. * a page that is used to store bad block markers. If no more pages are
  192. * available, -EINVAL is returned.
  193. */
  194. int nand_bbm_get_next_page(struct nand_chip *chip, int page)
  195. {
  196. struct mtd_info *mtd = nand_to_mtd(chip);
  197. int last_page = ((mtd->erasesize - mtd->writesize) >>
  198. chip->page_shift) & chip->pagemask;
  199. unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
  200. | NAND_BBM_LASTPAGE;
  201. if (page == 0 && !(chip->options & bbm_flags))
  202. return 0;
  203. if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
  204. return 0;
  205. if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
  206. return 1;
  207. if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
  208. return last_page;
  209. return -EINVAL;
  210. }
  211. /**
  212. * nand_block_bad - [DEFAULT] Read bad block marker from the chip
  213. * @chip: NAND chip object
  214. * @ofs: offset from device start
  215. *
  216. * Check, if the block is bad.
  217. */
  218. static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
  219. {
  220. int first_page, page_offset;
  221. int res;
  222. u8 bad;
  223. first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  224. page_offset = nand_bbm_get_next_page(chip, 0);
  225. while (page_offset >= 0) {
  226. res = chip->ecc.read_oob(chip, first_page + page_offset);
  227. if (res < 0)
  228. return res;
  229. bad = chip->oob_poi[chip->badblockpos];
  230. if (likely(chip->badblockbits == 8))
  231. res = bad != 0xFF;
  232. else
  233. res = hweight8(bad) < chip->badblockbits;
  234. if (res)
  235. return res;
  236. page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
  237. }
  238. return 0;
  239. }
  240. /**
  241. * nand_region_is_secured() - Check if the region is secured
  242. * @chip: NAND chip object
  243. * @offset: Offset of the region to check
  244. * @size: Size of the region to check
  245. *
  246. * Checks if the region is secured by comparing the offset and size with the
  247. * list of secure regions obtained from DT. Returns true if the region is
  248. * secured else false.
  249. */
  250. static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
  251. {
  252. int i;
  253. /* Skip touching the secure regions if present */
  254. for (i = 0; i < chip->nr_secure_regions; i++) {
  255. const struct nand_secure_region *region = &chip->secure_regions[i];
  256. if (offset + size <= region->offset ||
  257. offset >= region->offset + region->size)
  258. continue;
  259. pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
  260. __func__, offset, offset + size);
  261. return true;
  262. }
  263. return false;
  264. }
  265. static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
  266. {
  267. struct mtd_info *mtd = nand_to_mtd(chip);
  268. if (chip->options & NAND_NO_BBM_QUIRK)
  269. return 0;
  270. /* Check if the region is secured */
  271. if (nand_region_is_secured(chip, ofs, mtd->erasesize))
  272. return -EIO;
  273. if (mtd_check_expert_analysis_mode())
  274. return 0;
  275. if (chip->legacy.block_bad)
  276. return chip->legacy.block_bad(chip, ofs);
  277. return nand_block_bad(chip, ofs);
  278. }
  279. /**
  280. * nand_get_device - [GENERIC] Get chip for selected access
  281. * @chip: NAND chip structure
  282. *
  283. * Lock the device and its controller for exclusive access
  284. */
  285. static void nand_get_device(struct nand_chip *chip)
  286. {
  287. /* Wait until the device is resumed. */
  288. while (1) {
  289. mutex_lock(&chip->lock);
  290. if (!chip->suspended) {
  291. mutex_lock(&chip->controller->lock);
  292. return;
  293. }
  294. mutex_unlock(&chip->lock);
  295. wait_event(chip->resume_wq, !chip->suspended);
  296. }
  297. }
  298. /**
  299. * nand_check_wp - [GENERIC] check if the chip is write protected
  300. * @chip: NAND chip object
  301. *
  302. * Check, if the device is write protected. The function expects, that the
  303. * device is already selected.
  304. */
  305. static int nand_check_wp(struct nand_chip *chip)
  306. {
  307. u8 status;
  308. int ret;
  309. /* Broken xD cards report WP despite being writable */
  310. if (chip->options & NAND_BROKEN_XD)
  311. return 0;
  312. /* Check the WP bit */
  313. ret = nand_status_op(chip, &status);
  314. if (ret)
  315. return ret;
  316. return status & NAND_STATUS_WP ? 0 : 1;
  317. }
  318. /**
  319. * nand_fill_oob - [INTERN] Transfer client buffer to oob
  320. * @chip: NAND chip object
  321. * @oob: oob data buffer
  322. * @len: oob data write length
  323. * @ops: oob ops structure
  324. */
  325. static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
  326. struct mtd_oob_ops *ops)
  327. {
  328. struct mtd_info *mtd = nand_to_mtd(chip);
  329. int ret;
  330. /*
  331. * Initialise to all 0xFF, to avoid the possibility of left over OOB
  332. * data from a previous OOB read.
  333. */
  334. memset(chip->oob_poi, 0xff, mtd->oobsize);
  335. switch (ops->mode) {
  336. case MTD_OPS_PLACE_OOB:
  337. case MTD_OPS_RAW:
  338. memcpy(chip->oob_poi + ops->ooboffs, oob, len);
  339. return oob + len;
  340. case MTD_OPS_AUTO_OOB:
  341. ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
  342. ops->ooboffs, len);
  343. BUG_ON(ret);
  344. return oob + len;
  345. default:
  346. BUG();
  347. }
  348. return NULL;
  349. }
  350. /**
  351. * nand_do_write_oob - [MTD Interface] NAND write out-of-band
  352. * @chip: NAND chip object
  353. * @to: offset to write to
  354. * @ops: oob operation description structure
  355. *
  356. * NAND write out-of-band.
  357. */
  358. static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
  359. struct mtd_oob_ops *ops)
  360. {
  361. struct mtd_info *mtd = nand_to_mtd(chip);
  362. int chipnr, page, status, len, ret;
  363. pr_debug("%s: to = 0x%08x, len = %i\n",
  364. __func__, (unsigned int)to, (int)ops->ooblen);
  365. len = mtd_oobavail(mtd, ops);
  366. /* Do not allow write past end of page */
  367. if ((ops->ooboffs + ops->ooblen) > len) {
  368. pr_debug("%s: attempt to write past end of page\n",
  369. __func__);
  370. return -EINVAL;
  371. }
  372. /* Check if the region is secured */
  373. if (nand_region_is_secured(chip, to, ops->ooblen))
  374. return -EIO;
  375. chipnr = (int)(to >> chip->chip_shift);
  376. /*
  377. * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
  378. * of my DiskOnChip 2000 test units) will clear the whole data page too
  379. * if we don't do this. I have no clue why, but I seem to have 'fixed'
  380. * it in the doc2000 driver in August 1999. dwmw2.
  381. */
  382. ret = nand_reset(chip, chipnr);
  383. if (ret)
  384. return ret;
  385. nand_select_target(chip, chipnr);
  386. /* Shift to get page */
  387. page = (int)(to >> chip->page_shift);
  388. /* Check, if it is write protected */
  389. if (nand_check_wp(chip)) {
  390. nand_deselect_target(chip);
  391. return -EROFS;
  392. }
  393. /* Invalidate the page cache, if we write to the cached page */
  394. if (page == chip->pagecache.page)
  395. chip->pagecache.page = -1;
  396. nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
  397. if (ops->mode == MTD_OPS_RAW)
  398. status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
  399. else
  400. status = chip->ecc.write_oob(chip, page & chip->pagemask);
  401. nand_deselect_target(chip);
  402. if (status)
  403. return status;
  404. ops->oobretlen = ops->ooblen;
  405. return 0;
  406. }
  407. /**
  408. * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
  409. * @chip: NAND chip object
  410. * @ofs: offset from device start
  411. *
  412. * This is the default implementation, which can be overridden by a hardware
  413. * specific driver. It provides the details for writing a bad block marker to a
  414. * block.
  415. */
  416. static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
  417. {
  418. struct mtd_info *mtd = nand_to_mtd(chip);
  419. struct mtd_oob_ops ops;
  420. uint8_t buf[2] = { 0, 0 };
  421. int ret = 0, res, page_offset;
  422. memset(&ops, 0, sizeof(ops));
  423. ops.oobbuf = buf;
  424. ops.ooboffs = chip->badblockpos;
  425. if (chip->options & NAND_BUSWIDTH_16) {
  426. ops.ooboffs &= ~0x01;
  427. ops.len = ops.ooblen = 2;
  428. } else {
  429. ops.len = ops.ooblen = 1;
  430. }
  431. ops.mode = MTD_OPS_PLACE_OOB;
  432. page_offset = nand_bbm_get_next_page(chip, 0);
  433. while (page_offset >= 0) {
  434. res = nand_do_write_oob(chip,
  435. ofs + (page_offset * mtd->writesize),
  436. &ops);
  437. if (!ret)
  438. ret = res;
  439. page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
  440. }
  441. return ret;
  442. }
  443. /**
  444. * nand_markbad_bbm - mark a block by updating the BBM
  445. * @chip: NAND chip object
  446. * @ofs: offset of the block to mark bad
  447. */
  448. int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
  449. {
  450. if (chip->legacy.block_markbad)
  451. return chip->legacy.block_markbad(chip, ofs);
  452. return nand_default_block_markbad(chip, ofs);
  453. }
  454. /**
  455. * nand_block_markbad_lowlevel - mark a block bad
  456. * @chip: NAND chip object
  457. * @ofs: offset from device start
  458. *
  459. * This function performs the generic NAND bad block marking steps (i.e., bad
  460. * block table(s) and/or marker(s)). We only allow the hardware driver to
  461. * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
  462. *
  463. * We try operations in the following order:
  464. *
  465. * (1) erase the affected block, to allow OOB marker to be written cleanly
  466. * (2) write bad block marker to OOB area of affected block (unless flag
  467. * NAND_BBT_NO_OOB_BBM is present)
  468. * (3) update the BBT
  469. *
  470. * Note that we retain the first error encountered in (2) or (3), finish the
  471. * procedures, and dump the error in the end.
  472. */
  473. static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
  474. {
  475. struct mtd_info *mtd = nand_to_mtd(chip);
  476. int res, ret = 0;
  477. if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
  478. struct erase_info einfo;
  479. /* Attempt erase before marking OOB */
  480. memset(&einfo, 0, sizeof(einfo));
  481. einfo.addr = ofs;
  482. einfo.len = 1ULL << chip->phys_erase_shift;
  483. nand_erase_nand(chip, &einfo, 0);
  484. /* Write bad block marker to OOB */
  485. nand_get_device(chip);
  486. ret = nand_markbad_bbm(chip, ofs);
  487. nand_release_device(chip);
  488. }
  489. /* Mark block bad in BBT */
  490. if (chip->bbt) {
  491. res = nand_markbad_bbt(chip, ofs);
  492. if (!ret)
  493. ret = res;
  494. }
  495. if (!ret)
  496. mtd->ecc_stats.badblocks++;
  497. return ret;
  498. }
  499. /**
  500. * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
  501. * @mtd: MTD device structure
  502. * @ofs: offset from device start
  503. *
  504. * Check if the block is marked as reserved.
  505. */
  506. static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
  507. {
  508. struct nand_chip *chip = mtd_to_nand(mtd);
  509. if (!chip->bbt)
  510. return 0;
  511. /* Return info from the table */
  512. return nand_isreserved_bbt(chip, ofs);
  513. }
  514. /**
  515. * nand_block_checkbad - [GENERIC] Check if a block is marked bad
  516. * @chip: NAND chip object
  517. * @ofs: offset from device start
  518. * @allowbbt: 1, if its allowed to access the bbt area
  519. *
  520. * Check, if the block is bad. Either by reading the bad block table or
  521. * calling of the scan function.
  522. */
  523. static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
  524. {
  525. /* Return info from the table */
  526. if (chip->bbt)
  527. return nand_isbad_bbt(chip, ofs, allowbbt);
  528. return nand_isbad_bbm(chip, ofs);
  529. }
  530. /**
  531. * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
  532. * @chip: NAND chip structure
  533. * @timeout_ms: Timeout in ms
  534. *
  535. * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
  536. * If that does not happen whitin the specified timeout, -ETIMEDOUT is
  537. * returned.
  538. *
  539. * This helper is intended to be used when the controller does not have access
  540. * to the NAND R/B pin.
  541. *
  542. * Be aware that calling this helper from an ->exec_op() implementation means
  543. * ->exec_op() must be re-entrant.
  544. *
  545. * Return 0 if the NAND chip is ready, a negative error otherwise.
  546. */
  547. int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
  548. {
  549. const struct nand_interface_config *conf;
  550. u8 status = 0;
  551. int ret;
  552. if (!nand_has_exec_op(chip))
  553. return -ENOTSUPP;
  554. /* Wait tWB before polling the STATUS reg. */
  555. conf = nand_get_interface_config(chip);
  556. ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
  557. ret = nand_status_op(chip, NULL);
  558. if (ret)
  559. return ret;
  560. /*
  561. * +1 below is necessary because if we are now in the last fraction
  562. * of jiffy and msecs_to_jiffies is 1 then we will wait only that
  563. * small jiffy fraction - possibly leading to false timeout
  564. */
  565. timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  566. do {
  567. ret = nand_read_data_op(chip, &status, sizeof(status), true,
  568. false);
  569. if (ret)
  570. break;
  571. if (status & NAND_STATUS_READY)
  572. break;
  573. /*
  574. * Typical lowest execution time for a tR on most NANDs is 10us,
  575. * use this as polling delay before doing something smarter (ie.
  576. * deriving a delay from the timeout value, timeout_ms/ratio).
  577. */
  578. udelay(10);
  579. } while (time_before(jiffies, timeout_ms));
  580. /*
  581. * We have to exit READ_STATUS mode in order to read real data on the
  582. * bus in case the WAITRDY instruction is preceding a DATA_IN
  583. * instruction.
  584. */
  585. nand_exit_status_op(chip);
  586. if (ret)
  587. return ret;
  588. return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
  589. };
  590. EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
  591. /**
  592. * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
  593. * @chip: NAND chip structure
  594. * @gpiod: GPIO descriptor of R/B pin
  595. * @timeout_ms: Timeout in ms
  596. *
  597. * Poll the R/B GPIO pin until it becomes ready. If that does not happen
  598. * whitin the specified timeout, -ETIMEDOUT is returned.
  599. *
  600. * This helper is intended to be used when the controller has access to the
  601. * NAND R/B pin over GPIO.
  602. *
  603. * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
  604. */
  605. int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
  606. unsigned long timeout_ms)
  607. {
  608. /*
  609. * Wait until R/B pin indicates chip is ready or timeout occurs.
  610. * +1 below is necessary because if we are now in the last fraction
  611. * of jiffy and msecs_to_jiffies is 1 then we will wait only that
  612. * small jiffy fraction - possibly leading to false timeout.
  613. */
  614. timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  615. do {
  616. if (gpiod_get_value_cansleep(gpiod))
  617. return 0;
  618. cond_resched();
  619. } while (time_before(jiffies, timeout_ms));
  620. return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
  621. };
  622. EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
  623. /**
  624. * panic_nand_wait - [GENERIC] wait until the command is done
  625. * @chip: NAND chip structure
  626. * @timeo: timeout
  627. *
  628. * Wait for command done. This is a helper function for nand_wait used when
  629. * we are in interrupt context. May happen when in panic and trying to write
  630. * an oops through mtdoops.
  631. */
  632. void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
  633. {
  634. int i;
  635. for (i = 0; i < timeo; i++) {
  636. if (chip->legacy.dev_ready) {
  637. if (chip->legacy.dev_ready(chip))
  638. break;
  639. } else {
  640. int ret;
  641. u8 status;
  642. ret = nand_read_data_op(chip, &status, sizeof(status),
  643. true, false);
  644. if (ret)
  645. return;
  646. if (status & NAND_STATUS_READY)
  647. break;
  648. }
  649. mdelay(1);
  650. }
  651. }
  652. static bool nand_supports_get_features(struct nand_chip *chip, int addr)
  653. {
  654. return (chip->parameters.supports_set_get_features &&
  655. test_bit(addr, chip->parameters.get_feature_list));
  656. }
  657. static bool nand_supports_set_features(struct nand_chip *chip, int addr)
  658. {
  659. return (chip->parameters.supports_set_get_features &&
  660. test_bit(addr, chip->parameters.set_feature_list));
  661. }
  662. /**
  663. * nand_reset_interface - Reset data interface and timings
  664. * @chip: The NAND chip
  665. * @chipnr: Internal die id
  666. *
  667. * Reset the Data interface and timings to ONFI mode 0.
  668. *
  669. * Returns 0 for success or negative error code otherwise.
  670. */
  671. static int nand_reset_interface(struct nand_chip *chip, int chipnr)
  672. {
  673. const struct nand_controller_ops *ops = chip->controller->ops;
  674. int ret;
  675. if (!nand_controller_can_setup_interface(chip))
  676. return 0;
  677. /*
  678. * The ONFI specification says:
  679. * "
  680. * To transition from NV-DDR or NV-DDR2 to the SDR data
  681. * interface, the host shall use the Reset (FFh) command
  682. * using SDR timing mode 0. A device in any timing mode is
  683. * required to recognize Reset (FFh) command issued in SDR
  684. * timing mode 0.
  685. * "
  686. *
  687. * Configure the data interface in SDR mode and set the
  688. * timings to timing mode 0.
  689. */
  690. chip->current_interface_config = nand_get_reset_interface_config();
  691. ret = ops->setup_interface(chip, chipnr,
  692. chip->current_interface_config);
  693. if (ret)
  694. pr_err("Failed to configure data interface to SDR timing mode 0\n");
  695. return ret;
  696. }
  697. /**
  698. * nand_setup_interface - Setup the best data interface and timings
  699. * @chip: The NAND chip
  700. * @chipnr: Internal die id
  701. *
  702. * Configure what has been reported to be the best data interface and NAND
  703. * timings supported by the chip and the driver.
  704. *
  705. * Returns 0 for success or negative error code otherwise.
  706. */
  707. static int nand_setup_interface(struct nand_chip *chip, int chipnr)
  708. {
  709. const struct nand_controller_ops *ops = chip->controller->ops;
  710. u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
  711. int ret;
  712. if (!nand_controller_can_setup_interface(chip))
  713. return 0;
  714. /*
  715. * A nand_reset_interface() put both the NAND chip and the NAND
  716. * controller in timings mode 0. If the default mode for this chip is
  717. * also 0, no need to proceed to the change again. Plus, at probe time,
  718. * nand_setup_interface() uses ->set/get_features() which would
  719. * fail anyway as the parameter page is not available yet.
  720. */
  721. if (!chip->best_interface_config)
  722. return 0;
  723. request = chip->best_interface_config->timings.mode;
  724. if (nand_interface_is_sdr(chip->best_interface_config))
  725. request |= ONFI_DATA_INTERFACE_SDR;
  726. else
  727. request |= ONFI_DATA_INTERFACE_NVDDR;
  728. tmode_param[0] = request;
  729. /* Change the mode on the chip side (if supported by the NAND chip) */
  730. if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
  731. nand_select_target(chip, chipnr);
  732. ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
  733. tmode_param);
  734. nand_deselect_target(chip);
  735. if (ret)
  736. return ret;
  737. }
  738. /* Change the mode on the controller side */
  739. ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
  740. if (ret)
  741. return ret;
  742. /* Check the mode has been accepted by the chip, if supported */
  743. if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
  744. goto update_interface_config;
  745. memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
  746. nand_select_target(chip, chipnr);
  747. ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
  748. tmode_param);
  749. nand_deselect_target(chip);
  750. if (ret)
  751. goto err_reset_chip;
  752. if (request != tmode_param[0]) {
  753. pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
  754. nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
  755. chip->best_interface_config->timings.mode);
  756. pr_debug("NAND chip would work in %s timing mode %d\n",
  757. tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
  758. (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
  759. goto err_reset_chip;
  760. }
  761. update_interface_config:
  762. chip->current_interface_config = chip->best_interface_config;
  763. return 0;
  764. err_reset_chip:
  765. /*
  766. * Fallback to mode 0 if the chip explicitly did not ack the chosen
  767. * timing mode.
  768. */
  769. nand_reset_interface(chip, chipnr);
  770. nand_select_target(chip, chipnr);
  771. nand_reset_op(chip);
  772. nand_deselect_target(chip);
  773. return ret;
  774. }
  775. /**
  776. * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
  777. * NAND controller and the NAND chip support
  778. * @chip: the NAND chip
  779. * @iface: the interface configuration (can eventually be updated)
  780. * @spec_timings: specific timings, when not fitting the ONFI specification
  781. *
  782. * If specific timings are provided, use them. Otherwise, retrieve supported
  783. * timing modes from ONFI information.
  784. */
  785. int nand_choose_best_sdr_timings(struct nand_chip *chip,
  786. struct nand_interface_config *iface,
  787. struct nand_sdr_timings *spec_timings)
  788. {
  789. const struct nand_controller_ops *ops = chip->controller->ops;
  790. int best_mode = 0, mode, ret = -EOPNOTSUPP;
  791. iface->type = NAND_SDR_IFACE;
  792. if (spec_timings) {
  793. iface->timings.sdr = *spec_timings;
  794. iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
  795. /* Verify the controller supports the requested interface */
  796. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  797. iface);
  798. if (!ret) {
  799. chip->best_interface_config = iface;
  800. return ret;
  801. }
  802. /* Fallback to slower modes */
  803. best_mode = iface->timings.mode;
  804. } else if (chip->parameters.onfi) {
  805. best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
  806. }
  807. for (mode = best_mode; mode >= 0; mode--) {
  808. onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
  809. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  810. iface);
  811. if (!ret) {
  812. chip->best_interface_config = iface;
  813. break;
  814. }
  815. }
  816. return ret;
  817. }
  818. /**
  819. * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
  820. * NAND controller and the NAND chip support
  821. * @chip: the NAND chip
  822. * @iface: the interface configuration (can eventually be updated)
  823. * @spec_timings: specific timings, when not fitting the ONFI specification
  824. *
  825. * If specific timings are provided, use them. Otherwise, retrieve supported
  826. * timing modes from ONFI information.
  827. */
  828. int nand_choose_best_nvddr_timings(struct nand_chip *chip,
  829. struct nand_interface_config *iface,
  830. struct nand_nvddr_timings *spec_timings)
  831. {
  832. const struct nand_controller_ops *ops = chip->controller->ops;
  833. int best_mode = 0, mode, ret = -EOPNOTSUPP;
  834. iface->type = NAND_NVDDR_IFACE;
  835. if (spec_timings) {
  836. iface->timings.nvddr = *spec_timings;
  837. iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
  838. /* Verify the controller supports the requested interface */
  839. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  840. iface);
  841. if (!ret) {
  842. chip->best_interface_config = iface;
  843. return ret;
  844. }
  845. /* Fallback to slower modes */
  846. best_mode = iface->timings.mode;
  847. } else if (chip->parameters.onfi) {
  848. best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
  849. }
  850. for (mode = best_mode; mode >= 0; mode--) {
  851. onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
  852. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  853. iface);
  854. if (!ret) {
  855. chip->best_interface_config = iface;
  856. break;
  857. }
  858. }
  859. return ret;
  860. }
  861. /**
  862. * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
  863. * NAND controller and the NAND chip support
  864. * @chip: the NAND chip
  865. * @iface: the interface configuration (can eventually be updated)
  866. *
  867. * If specific timings are provided, use them. Otherwise, retrieve supported
  868. * timing modes from ONFI information.
  869. */
  870. static int nand_choose_best_timings(struct nand_chip *chip,
  871. struct nand_interface_config *iface)
  872. {
  873. int ret;
  874. /* Try the fastest timings: NV-DDR */
  875. ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
  876. if (!ret)
  877. return 0;
  878. /* Fallback to SDR timings otherwise */
  879. return nand_choose_best_sdr_timings(chip, iface, NULL);
  880. }
  881. /**
  882. * nand_choose_interface_config - find the best data interface and timings
  883. * @chip: The NAND chip
  884. *
  885. * Find the best data interface and NAND timings supported by the chip
  886. * and the driver. Eventually let the NAND manufacturer driver propose his own
  887. * set of timings.
  888. *
  889. * After this function nand_chip->interface_config is initialized with the best
  890. * timing mode available.
  891. *
  892. * Returns 0 for success or negative error code otherwise.
  893. */
  894. static int nand_choose_interface_config(struct nand_chip *chip)
  895. {
  896. struct nand_interface_config *iface;
  897. int ret;
  898. if (!nand_controller_can_setup_interface(chip))
  899. return 0;
  900. iface = kzalloc(sizeof(*iface), GFP_KERNEL);
  901. if (!iface)
  902. return -ENOMEM;
  903. if (chip->ops.choose_interface_config)
  904. ret = chip->ops.choose_interface_config(chip, iface);
  905. else
  906. ret = nand_choose_best_timings(chip, iface);
  907. if (ret)
  908. kfree(iface);
  909. return ret;
  910. }
  911. /**
  912. * nand_fill_column_cycles - fill the column cycles of an address
  913. * @chip: The NAND chip
  914. * @addrs: Array of address cycles to fill
  915. * @offset_in_page: The offset in the page
  916. *
  917. * Fills the first or the first two bytes of the @addrs field depending
  918. * on the NAND bus width and the page size.
  919. *
  920. * Returns the number of cycles needed to encode the column, or a negative
  921. * error code in case one of the arguments is invalid.
  922. */
  923. static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
  924. unsigned int offset_in_page)
  925. {
  926. struct mtd_info *mtd = nand_to_mtd(chip);
  927. /* Make sure the offset is less than the actual page size. */
  928. if (offset_in_page > mtd->writesize + mtd->oobsize)
  929. return -EINVAL;
  930. /*
  931. * On small page NANDs, there's a dedicated command to access the OOB
  932. * area, and the column address is relative to the start of the OOB
  933. * area, not the start of the page. Asjust the address accordingly.
  934. */
  935. if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
  936. offset_in_page -= mtd->writesize;
  937. /*
  938. * The offset in page is expressed in bytes, if the NAND bus is 16-bit
  939. * wide, then it must be divided by 2.
  940. */
  941. if (chip->options & NAND_BUSWIDTH_16) {
  942. if (WARN_ON(offset_in_page % 2))
  943. return -EINVAL;
  944. offset_in_page /= 2;
  945. }
  946. addrs[0] = offset_in_page;
  947. /*
  948. * Small page NANDs use 1 cycle for the columns, while large page NANDs
  949. * need 2
  950. */
  951. if (mtd->writesize <= 512)
  952. return 1;
  953. addrs[1] = offset_in_page >> 8;
  954. return 2;
  955. }
  956. static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
  957. unsigned int offset_in_page, void *buf,
  958. unsigned int len)
  959. {
  960. const struct nand_interface_config *conf =
  961. nand_get_interface_config(chip);
  962. struct mtd_info *mtd = nand_to_mtd(chip);
  963. u8 addrs[4];
  964. struct nand_op_instr instrs[] = {
  965. NAND_OP_CMD(NAND_CMD_READ0, 0),
  966. NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
  967. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  968. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  969. NAND_OP_DATA_IN(len, buf, 0),
  970. };
  971. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  972. int ret;
  973. /* Drop the DATA_IN instruction if len is set to 0. */
  974. if (!len)
  975. op.ninstrs--;
  976. if (offset_in_page >= mtd->writesize)
  977. instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
  978. else if (offset_in_page >= 256 &&
  979. !(chip->options & NAND_BUSWIDTH_16))
  980. instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
  981. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  982. if (ret < 0)
  983. return ret;
  984. addrs[1] = page;
  985. addrs[2] = page >> 8;
  986. if (chip->options & NAND_ROW_ADDR_3) {
  987. addrs[3] = page >> 16;
  988. instrs[1].ctx.addr.naddrs++;
  989. }
  990. return nand_exec_op(chip, &op);
  991. }
  992. static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
  993. unsigned int offset_in_page, void *buf,
  994. unsigned int len)
  995. {
  996. const struct nand_interface_config *conf =
  997. nand_get_interface_config(chip);
  998. u8 addrs[5];
  999. struct nand_op_instr instrs[] = {
  1000. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1001. NAND_OP_ADDR(4, addrs, 0),
  1002. NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1003. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  1004. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1005. NAND_OP_DATA_IN(len, buf, 0),
  1006. };
  1007. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1008. int ret;
  1009. /* Drop the DATA_IN instruction if len is set to 0. */
  1010. if (!len)
  1011. op.ninstrs--;
  1012. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1013. if (ret < 0)
  1014. return ret;
  1015. addrs[2] = page;
  1016. addrs[3] = page >> 8;
  1017. if (chip->options & NAND_ROW_ADDR_3) {
  1018. addrs[4] = page >> 16;
  1019. instrs[1].ctx.addr.naddrs++;
  1020. }
  1021. return nand_exec_op(chip, &op);
  1022. }
  1023. /**
  1024. * nand_read_page_op - Do a READ PAGE operation
  1025. * @chip: The NAND chip
  1026. * @page: page to read
  1027. * @offset_in_page: offset within the page
  1028. * @buf: buffer used to store the data
  1029. * @len: length of the buffer
  1030. *
  1031. * This function issues a READ PAGE operation.
  1032. * This function does not select/unselect the CS line.
  1033. *
  1034. * Returns 0 on success, a negative error code otherwise.
  1035. */
  1036. int nand_read_page_op(struct nand_chip *chip, unsigned int page,
  1037. unsigned int offset_in_page, void *buf, unsigned int len)
  1038. {
  1039. struct mtd_info *mtd = nand_to_mtd(chip);
  1040. if (len && !buf)
  1041. return -EINVAL;
  1042. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1043. return -EINVAL;
  1044. if (nand_has_exec_op(chip)) {
  1045. if (mtd->writesize > 512)
  1046. return nand_lp_exec_read_page_op(chip, page,
  1047. offset_in_page, buf,
  1048. len);
  1049. return nand_sp_exec_read_page_op(chip, page, offset_in_page,
  1050. buf, len);
  1051. }
  1052. chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
  1053. if (len)
  1054. chip->legacy.read_buf(chip, buf, len);
  1055. return 0;
  1056. }
  1057. EXPORT_SYMBOL_GPL(nand_read_page_op);
  1058. /**
  1059. * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
  1060. * @chip: The NAND chip
  1061. * @page: parameter page to read
  1062. * @buf: buffer used to store the data
  1063. * @len: length of the buffer
  1064. *
  1065. * This function issues a READ PARAMETER PAGE operation.
  1066. * This function does not select/unselect the CS line.
  1067. *
  1068. * Returns 0 on success, a negative error code otherwise.
  1069. */
  1070. int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
  1071. unsigned int len)
  1072. {
  1073. unsigned int i;
  1074. u8 *p = buf;
  1075. if (len && !buf)
  1076. return -EINVAL;
  1077. if (nand_has_exec_op(chip)) {
  1078. const struct nand_interface_config *conf =
  1079. nand_get_interface_config(chip);
  1080. struct nand_op_instr instrs[] = {
  1081. NAND_OP_CMD(NAND_CMD_PARAM, 0),
  1082. NAND_OP_ADDR(1, &page,
  1083. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1084. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  1085. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1086. NAND_OP_8BIT_DATA_IN(len, buf, 0),
  1087. };
  1088. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1089. /* Drop the DATA_IN instruction if len is set to 0. */
  1090. if (!len)
  1091. op.ninstrs--;
  1092. return nand_exec_op(chip, &op);
  1093. }
  1094. chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
  1095. for (i = 0; i < len; i++)
  1096. p[i] = chip->legacy.read_byte(chip);
  1097. return 0;
  1098. }
  1099. /**
  1100. * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
  1101. * @chip: The NAND chip
  1102. * @offset_in_page: offset within the page
  1103. * @buf: buffer used to store the data
  1104. * @len: length of the buffer
  1105. * @force_8bit: force 8-bit bus access
  1106. *
  1107. * This function issues a CHANGE READ COLUMN operation.
  1108. * This function does not select/unselect the CS line.
  1109. *
  1110. * Returns 0 on success, a negative error code otherwise.
  1111. */
  1112. int nand_change_read_column_op(struct nand_chip *chip,
  1113. unsigned int offset_in_page, void *buf,
  1114. unsigned int len, bool force_8bit)
  1115. {
  1116. struct mtd_info *mtd = nand_to_mtd(chip);
  1117. if (len && !buf)
  1118. return -EINVAL;
  1119. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1120. return -EINVAL;
  1121. /* Small page NANDs do not support column change. */
  1122. if (mtd->writesize <= 512)
  1123. return -ENOTSUPP;
  1124. if (nand_has_exec_op(chip)) {
  1125. const struct nand_interface_config *conf =
  1126. nand_get_interface_config(chip);
  1127. u8 addrs[2] = {};
  1128. struct nand_op_instr instrs[] = {
  1129. NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
  1130. NAND_OP_ADDR(2, addrs, 0),
  1131. NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
  1132. NAND_COMMON_TIMING_NS(conf, tCCS_min)),
  1133. NAND_OP_DATA_IN(len, buf, 0),
  1134. };
  1135. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1136. int ret;
  1137. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1138. if (ret < 0)
  1139. return ret;
  1140. /* Drop the DATA_IN instruction if len is set to 0. */
  1141. if (!len)
  1142. op.ninstrs--;
  1143. instrs[3].ctx.data.force_8bit = force_8bit;
  1144. return nand_exec_op(chip, &op);
  1145. }
  1146. chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
  1147. if (len)
  1148. chip->legacy.read_buf(chip, buf, len);
  1149. return 0;
  1150. }
  1151. EXPORT_SYMBOL_GPL(nand_change_read_column_op);
  1152. /**
  1153. * nand_read_oob_op - Do a READ OOB operation
  1154. * @chip: The NAND chip
  1155. * @page: page to read
  1156. * @offset_in_oob: offset within the OOB area
  1157. * @buf: buffer used to store the data
  1158. * @len: length of the buffer
  1159. *
  1160. * This function issues a READ OOB operation.
  1161. * This function does not select/unselect the CS line.
  1162. *
  1163. * Returns 0 on success, a negative error code otherwise.
  1164. */
  1165. int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
  1166. unsigned int offset_in_oob, void *buf, unsigned int len)
  1167. {
  1168. struct mtd_info *mtd = nand_to_mtd(chip);
  1169. if (len && !buf)
  1170. return -EINVAL;
  1171. if (offset_in_oob + len > mtd->oobsize)
  1172. return -EINVAL;
  1173. if (nand_has_exec_op(chip))
  1174. return nand_read_page_op(chip, page,
  1175. mtd->writesize + offset_in_oob,
  1176. buf, len);
  1177. chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
  1178. if (len)
  1179. chip->legacy.read_buf(chip, buf, len);
  1180. return 0;
  1181. }
  1182. EXPORT_SYMBOL_GPL(nand_read_oob_op);
  1183. static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
  1184. unsigned int offset_in_page, const void *buf,
  1185. unsigned int len, bool prog)
  1186. {
  1187. const struct nand_interface_config *conf =
  1188. nand_get_interface_config(chip);
  1189. struct mtd_info *mtd = nand_to_mtd(chip);
  1190. u8 addrs[5] = {};
  1191. struct nand_op_instr instrs[] = {
  1192. /*
  1193. * The first instruction will be dropped if we're dealing
  1194. * with a large page NAND and adjusted if we're dealing
  1195. * with a small page NAND and the page offset is > 255.
  1196. */
  1197. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1198. NAND_OP_CMD(NAND_CMD_SEQIN, 0),
  1199. NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
  1200. NAND_OP_DATA_OUT(len, buf, 0),
  1201. NAND_OP_CMD(NAND_CMD_PAGEPROG,
  1202. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1203. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
  1204. };
  1205. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1206. int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1207. if (naddrs < 0)
  1208. return naddrs;
  1209. addrs[naddrs++] = page;
  1210. addrs[naddrs++] = page >> 8;
  1211. if (chip->options & NAND_ROW_ADDR_3)
  1212. addrs[naddrs++] = page >> 16;
  1213. instrs[2].ctx.addr.naddrs = naddrs;
  1214. /* Drop the last two instructions if we're not programming the page. */
  1215. if (!prog) {
  1216. op.ninstrs -= 2;
  1217. /* Also drop the DATA_OUT instruction if empty. */
  1218. if (!len)
  1219. op.ninstrs--;
  1220. }
  1221. if (mtd->writesize <= 512) {
  1222. /*
  1223. * Small pages need some more tweaking: we have to adjust the
  1224. * first instruction depending on the page offset we're trying
  1225. * to access.
  1226. */
  1227. if (offset_in_page >= mtd->writesize)
  1228. instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
  1229. else if (offset_in_page >= 256 &&
  1230. !(chip->options & NAND_BUSWIDTH_16))
  1231. instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
  1232. } else {
  1233. /*
  1234. * Drop the first command if we're dealing with a large page
  1235. * NAND.
  1236. */
  1237. op.instrs++;
  1238. op.ninstrs--;
  1239. }
  1240. return nand_exec_op(chip, &op);
  1241. }
  1242. /**
  1243. * nand_prog_page_begin_op - starts a PROG PAGE operation
  1244. * @chip: The NAND chip
  1245. * @page: page to write
  1246. * @offset_in_page: offset within the page
  1247. * @buf: buffer containing the data to write to the page
  1248. * @len: length of the buffer
  1249. *
  1250. * This function issues the first half of a PROG PAGE operation.
  1251. * This function does not select/unselect the CS line.
  1252. *
  1253. * Returns 0 on success, a negative error code otherwise.
  1254. */
  1255. int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
  1256. unsigned int offset_in_page, const void *buf,
  1257. unsigned int len)
  1258. {
  1259. struct mtd_info *mtd = nand_to_mtd(chip);
  1260. if (len && !buf)
  1261. return -EINVAL;
  1262. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1263. return -EINVAL;
  1264. if (nand_has_exec_op(chip))
  1265. return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
  1266. len, false);
  1267. chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
  1268. if (buf)
  1269. chip->legacy.write_buf(chip, buf, len);
  1270. return 0;
  1271. }
  1272. EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
  1273. /**
  1274. * nand_prog_page_end_op - ends a PROG PAGE operation
  1275. * @chip: The NAND chip
  1276. *
  1277. * This function issues the second half of a PROG PAGE operation.
  1278. * This function does not select/unselect the CS line.
  1279. *
  1280. * Returns 0 on success, a negative error code otherwise.
  1281. */
  1282. int nand_prog_page_end_op(struct nand_chip *chip)
  1283. {
  1284. int ret;
  1285. u8 status;
  1286. if (nand_has_exec_op(chip)) {
  1287. const struct nand_interface_config *conf =
  1288. nand_get_interface_config(chip);
  1289. struct nand_op_instr instrs[] = {
  1290. NAND_OP_CMD(NAND_CMD_PAGEPROG,
  1291. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1292. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
  1293. 0),
  1294. };
  1295. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1296. ret = nand_exec_op(chip, &op);
  1297. if (ret)
  1298. return ret;
  1299. ret = nand_status_op(chip, &status);
  1300. if (ret)
  1301. return ret;
  1302. } else {
  1303. chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
  1304. ret = chip->legacy.waitfunc(chip);
  1305. if (ret < 0)
  1306. return ret;
  1307. status = ret;
  1308. }
  1309. if (status & NAND_STATUS_FAIL)
  1310. return -EIO;
  1311. return 0;
  1312. }
  1313. EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
  1314. /**
  1315. * nand_prog_page_op - Do a full PROG PAGE operation
  1316. * @chip: The NAND chip
  1317. * @page: page to write
  1318. * @offset_in_page: offset within the page
  1319. * @buf: buffer containing the data to write to the page
  1320. * @len: length of the buffer
  1321. *
  1322. * This function issues a full PROG PAGE operation.
  1323. * This function does not select/unselect the CS line.
  1324. *
  1325. * Returns 0 on success, a negative error code otherwise.
  1326. */
  1327. int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
  1328. unsigned int offset_in_page, const void *buf,
  1329. unsigned int len)
  1330. {
  1331. struct mtd_info *mtd = nand_to_mtd(chip);
  1332. u8 status;
  1333. int ret;
  1334. if (!len || !buf)
  1335. return -EINVAL;
  1336. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1337. return -EINVAL;
  1338. if (nand_has_exec_op(chip)) {
  1339. ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
  1340. len, true);
  1341. if (ret)
  1342. return ret;
  1343. ret = nand_status_op(chip, &status);
  1344. if (ret)
  1345. return ret;
  1346. } else {
  1347. chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
  1348. page);
  1349. chip->legacy.write_buf(chip, buf, len);
  1350. chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
  1351. ret = chip->legacy.waitfunc(chip);
  1352. if (ret < 0)
  1353. return ret;
  1354. status = ret;
  1355. }
  1356. if (status & NAND_STATUS_FAIL)
  1357. return -EIO;
  1358. return 0;
  1359. }
  1360. EXPORT_SYMBOL_GPL(nand_prog_page_op);
  1361. /**
  1362. * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
  1363. * @chip: The NAND chip
  1364. * @offset_in_page: offset within the page
  1365. * @buf: buffer containing the data to send to the NAND
  1366. * @len: length of the buffer
  1367. * @force_8bit: force 8-bit bus access
  1368. *
  1369. * This function issues a CHANGE WRITE COLUMN operation.
  1370. * This function does not select/unselect the CS line.
  1371. *
  1372. * Returns 0 on success, a negative error code otherwise.
  1373. */
  1374. int nand_change_write_column_op(struct nand_chip *chip,
  1375. unsigned int offset_in_page,
  1376. const void *buf, unsigned int len,
  1377. bool force_8bit)
  1378. {
  1379. struct mtd_info *mtd = nand_to_mtd(chip);
  1380. if (len && !buf)
  1381. return -EINVAL;
  1382. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1383. return -EINVAL;
  1384. /* Small page NANDs do not support column change. */
  1385. if (mtd->writesize <= 512)
  1386. return -ENOTSUPP;
  1387. if (nand_has_exec_op(chip)) {
  1388. const struct nand_interface_config *conf =
  1389. nand_get_interface_config(chip);
  1390. u8 addrs[2];
  1391. struct nand_op_instr instrs[] = {
  1392. NAND_OP_CMD(NAND_CMD_RNDIN, 0),
  1393. NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
  1394. NAND_OP_DATA_OUT(len, buf, 0),
  1395. };
  1396. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1397. int ret;
  1398. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1399. if (ret < 0)
  1400. return ret;
  1401. instrs[2].ctx.data.force_8bit = force_8bit;
  1402. /* Drop the DATA_OUT instruction if len is set to 0. */
  1403. if (!len)
  1404. op.ninstrs--;
  1405. return nand_exec_op(chip, &op);
  1406. }
  1407. chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
  1408. if (len)
  1409. chip->legacy.write_buf(chip, buf, len);
  1410. return 0;
  1411. }
  1412. EXPORT_SYMBOL_GPL(nand_change_write_column_op);
  1413. /**
  1414. * nand_readid_op - Do a READID operation
  1415. * @chip: The NAND chip
  1416. * @addr: address cycle to pass after the READID command
  1417. * @buf: buffer used to store the ID
  1418. * @len: length of the buffer
  1419. *
  1420. * This function sends a READID command and reads back the ID returned by the
  1421. * NAND.
  1422. * This function does not select/unselect the CS line.
  1423. *
  1424. * Returns 0 on success, a negative error code otherwise.
  1425. */
  1426. int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
  1427. unsigned int len)
  1428. {
  1429. unsigned int i;
  1430. u8 *id = buf, *ddrbuf = NULL;
  1431. if (len && !buf)
  1432. return -EINVAL;
  1433. if (nand_has_exec_op(chip)) {
  1434. const struct nand_interface_config *conf =
  1435. nand_get_interface_config(chip);
  1436. struct nand_op_instr instrs[] = {
  1437. NAND_OP_CMD(NAND_CMD_READID, 0),
  1438. NAND_OP_ADDR(1, &addr,
  1439. NAND_COMMON_TIMING_NS(conf, tADL_min)),
  1440. NAND_OP_8BIT_DATA_IN(len, buf, 0),
  1441. };
  1442. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1443. int ret;
  1444. /* READ_ID data bytes are received twice in NV-DDR mode */
  1445. if (len && nand_interface_is_nvddr(conf)) {
  1446. ddrbuf = kzalloc(len * 2, GFP_KERNEL);
  1447. if (!ddrbuf)
  1448. return -ENOMEM;
  1449. instrs[2].ctx.data.len *= 2;
  1450. instrs[2].ctx.data.buf.in = ddrbuf;
  1451. }
  1452. /* Drop the DATA_IN instruction if len is set to 0. */
  1453. if (!len)
  1454. op.ninstrs--;
  1455. ret = nand_exec_op(chip, &op);
  1456. if (!ret && len && nand_interface_is_nvddr(conf)) {
  1457. for (i = 0; i < len; i++)
  1458. id[i] = ddrbuf[i * 2];
  1459. }
  1460. kfree(ddrbuf);
  1461. return ret;
  1462. }
  1463. chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
  1464. for (i = 0; i < len; i++)
  1465. id[i] = chip->legacy.read_byte(chip);
  1466. return 0;
  1467. }
  1468. EXPORT_SYMBOL_GPL(nand_readid_op);
  1469. /**
  1470. * nand_status_op - Do a STATUS operation
  1471. * @chip: The NAND chip
  1472. * @status: out variable to store the NAND status
  1473. *
  1474. * This function sends a STATUS command and reads back the status returned by
  1475. * the NAND.
  1476. * This function does not select/unselect the CS line.
  1477. *
  1478. * Returns 0 on success, a negative error code otherwise.
  1479. */
  1480. int nand_status_op(struct nand_chip *chip, u8 *status)
  1481. {
  1482. if (nand_has_exec_op(chip)) {
  1483. const struct nand_interface_config *conf =
  1484. nand_get_interface_config(chip);
  1485. u8 ddrstatus[2];
  1486. struct nand_op_instr instrs[] = {
  1487. NAND_OP_CMD(NAND_CMD_STATUS,
  1488. NAND_COMMON_TIMING_NS(conf, tADL_min)),
  1489. NAND_OP_8BIT_DATA_IN(1, status, 0),
  1490. };
  1491. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1492. int ret;
  1493. /* The status data byte will be received twice in NV-DDR mode */
  1494. if (status && nand_interface_is_nvddr(conf)) {
  1495. instrs[1].ctx.data.len *= 2;
  1496. instrs[1].ctx.data.buf.in = ddrstatus;
  1497. }
  1498. if (!status)
  1499. op.ninstrs--;
  1500. ret = nand_exec_op(chip, &op);
  1501. if (!ret && status && nand_interface_is_nvddr(conf))
  1502. *status = ddrstatus[0];
  1503. return ret;
  1504. }
  1505. chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
  1506. if (status)
  1507. *status = chip->legacy.read_byte(chip);
  1508. return 0;
  1509. }
  1510. EXPORT_SYMBOL_GPL(nand_status_op);
  1511. /**
  1512. * nand_exit_status_op - Exit a STATUS operation
  1513. * @chip: The NAND chip
  1514. *
  1515. * This function sends a READ0 command to cancel the effect of the STATUS
  1516. * command to avoid reading only the status until a new read command is sent.
  1517. *
  1518. * This function does not select/unselect the CS line.
  1519. *
  1520. * Returns 0 on success, a negative error code otherwise.
  1521. */
  1522. int nand_exit_status_op(struct nand_chip *chip)
  1523. {
  1524. if (nand_has_exec_op(chip)) {
  1525. struct nand_op_instr instrs[] = {
  1526. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1527. };
  1528. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1529. return nand_exec_op(chip, &op);
  1530. }
  1531. chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
  1532. return 0;
  1533. }
  1534. /**
  1535. * nand_erase_op - Do an erase operation
  1536. * @chip: The NAND chip
  1537. * @eraseblock: block to erase
  1538. *
  1539. * This function sends an ERASE command and waits for the NAND to be ready
  1540. * before returning.
  1541. * This function does not select/unselect the CS line.
  1542. *
  1543. * Returns 0 on success, a negative error code otherwise.
  1544. */
  1545. int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
  1546. {
  1547. unsigned int page = eraseblock <<
  1548. (chip->phys_erase_shift - chip->page_shift);
  1549. int ret;
  1550. u8 status;
  1551. if (nand_has_exec_op(chip)) {
  1552. const struct nand_interface_config *conf =
  1553. nand_get_interface_config(chip);
  1554. u8 addrs[3] = { page, page >> 8, page >> 16 };
  1555. struct nand_op_instr instrs[] = {
  1556. NAND_OP_CMD(NAND_CMD_ERASE1, 0),
  1557. NAND_OP_ADDR(2, addrs, 0),
  1558. NAND_OP_CMD(NAND_CMD_ERASE2,
  1559. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1560. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
  1561. 0),
  1562. };
  1563. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1564. if (chip->options & NAND_ROW_ADDR_3)
  1565. instrs[1].ctx.addr.naddrs++;
  1566. ret = nand_exec_op(chip, &op);
  1567. if (ret)
  1568. return ret;
  1569. ret = nand_status_op(chip, &status);
  1570. if (ret)
  1571. return ret;
  1572. } else {
  1573. chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
  1574. chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
  1575. ret = chip->legacy.waitfunc(chip);
  1576. if (ret < 0)
  1577. return ret;
  1578. status = ret;
  1579. }
  1580. if (status & NAND_STATUS_FAIL)
  1581. return -EIO;
  1582. return 0;
  1583. }
  1584. EXPORT_SYMBOL_GPL(nand_erase_op);
  1585. /**
  1586. * nand_set_features_op - Do a SET FEATURES operation
  1587. * @chip: The NAND chip
  1588. * @feature: feature id
  1589. * @data: 4 bytes of data
  1590. *
  1591. * This function sends a SET FEATURES command and waits for the NAND to be
  1592. * ready before returning.
  1593. * This function does not select/unselect the CS line.
  1594. *
  1595. * Returns 0 on success, a negative error code otherwise.
  1596. */
  1597. static int nand_set_features_op(struct nand_chip *chip, u8 feature,
  1598. const void *data)
  1599. {
  1600. const u8 *params = data;
  1601. int i, ret;
  1602. if (nand_has_exec_op(chip)) {
  1603. const struct nand_interface_config *conf =
  1604. nand_get_interface_config(chip);
  1605. struct nand_op_instr instrs[] = {
  1606. NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
  1607. NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
  1608. tADL_min)),
  1609. NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
  1610. NAND_COMMON_TIMING_NS(conf,
  1611. tWB_max)),
  1612. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
  1613. 0),
  1614. };
  1615. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1616. return nand_exec_op(chip, &op);
  1617. }
  1618. chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
  1619. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  1620. chip->legacy.write_byte(chip, params[i]);
  1621. ret = chip->legacy.waitfunc(chip);
  1622. if (ret < 0)
  1623. return ret;
  1624. if (ret & NAND_STATUS_FAIL)
  1625. return -EIO;
  1626. return 0;
  1627. }
  1628. /**
  1629. * nand_get_features_op - Do a GET FEATURES operation
  1630. * @chip: The NAND chip
  1631. * @feature: feature id
  1632. * @data: 4 bytes of data
  1633. *
  1634. * This function sends a GET FEATURES command and waits for the NAND to be
  1635. * ready before returning.
  1636. * This function does not select/unselect the CS line.
  1637. *
  1638. * Returns 0 on success, a negative error code otherwise.
  1639. */
  1640. static int nand_get_features_op(struct nand_chip *chip, u8 feature,
  1641. void *data)
  1642. {
  1643. u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
  1644. int i;
  1645. if (nand_has_exec_op(chip)) {
  1646. const struct nand_interface_config *conf =
  1647. nand_get_interface_config(chip);
  1648. struct nand_op_instr instrs[] = {
  1649. NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
  1650. NAND_OP_ADDR(1, &feature,
  1651. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1652. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
  1653. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1654. NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
  1655. data, 0),
  1656. };
  1657. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1658. int ret;
  1659. /* GET_FEATURE data bytes are received twice in NV-DDR mode */
  1660. if (nand_interface_is_nvddr(conf)) {
  1661. instrs[3].ctx.data.len *= 2;
  1662. instrs[3].ctx.data.buf.in = ddrbuf;
  1663. }
  1664. ret = nand_exec_op(chip, &op);
  1665. if (nand_interface_is_nvddr(conf)) {
  1666. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
  1667. params[i] = ddrbuf[i * 2];
  1668. }
  1669. return ret;
  1670. }
  1671. chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
  1672. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  1673. params[i] = chip->legacy.read_byte(chip);
  1674. return 0;
  1675. }
  1676. static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
  1677. unsigned int delay_ns)
  1678. {
  1679. if (nand_has_exec_op(chip)) {
  1680. struct nand_op_instr instrs[] = {
  1681. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
  1682. PSEC_TO_NSEC(delay_ns)),
  1683. };
  1684. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1685. return nand_exec_op(chip, &op);
  1686. }
  1687. /* Apply delay or wait for ready/busy pin */
  1688. if (!chip->legacy.dev_ready)
  1689. udelay(chip->legacy.chip_delay);
  1690. else
  1691. nand_wait_ready(chip);
  1692. return 0;
  1693. }
  1694. /**
  1695. * nand_reset_op - Do a reset operation
  1696. * @chip: The NAND chip
  1697. *
  1698. * This function sends a RESET command and waits for the NAND to be ready
  1699. * before returning.
  1700. * This function does not select/unselect the CS line.
  1701. *
  1702. * Returns 0 on success, a negative error code otherwise.
  1703. */
  1704. int nand_reset_op(struct nand_chip *chip)
  1705. {
  1706. if (nand_has_exec_op(chip)) {
  1707. const struct nand_interface_config *conf =
  1708. nand_get_interface_config(chip);
  1709. struct nand_op_instr instrs[] = {
  1710. NAND_OP_CMD(NAND_CMD_RESET,
  1711. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1712. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
  1713. 0),
  1714. };
  1715. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1716. return nand_exec_op(chip, &op);
  1717. }
  1718. chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
  1719. return 0;
  1720. }
  1721. EXPORT_SYMBOL_GPL(nand_reset_op);
  1722. /**
  1723. * nand_read_data_op - Read data from the NAND
  1724. * @chip: The NAND chip
  1725. * @buf: buffer used to store the data
  1726. * @len: length of the buffer
  1727. * @force_8bit: force 8-bit bus access
  1728. * @check_only: do not actually run the command, only checks if the
  1729. * controller driver supports it
  1730. *
  1731. * This function does a raw data read on the bus. Usually used after launching
  1732. * another NAND operation like nand_read_page_op().
  1733. * This function does not select/unselect the CS line.
  1734. *
  1735. * Returns 0 on success, a negative error code otherwise.
  1736. */
  1737. int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
  1738. bool force_8bit, bool check_only)
  1739. {
  1740. if (!len || !buf)
  1741. return -EINVAL;
  1742. if (nand_has_exec_op(chip)) {
  1743. const struct nand_interface_config *conf =
  1744. nand_get_interface_config(chip);
  1745. struct nand_op_instr instrs[] = {
  1746. NAND_OP_DATA_IN(len, buf, 0),
  1747. };
  1748. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1749. u8 *ddrbuf = NULL;
  1750. int ret, i;
  1751. instrs[0].ctx.data.force_8bit = force_8bit;
  1752. /*
  1753. * Parameter payloads (ID, status, features, etc) do not go
  1754. * through the same pipeline as regular data, hence the
  1755. * force_8bit flag must be set and this also indicates that in
  1756. * case NV-DDR timings are being used the data will be received
  1757. * twice.
  1758. */
  1759. if (force_8bit && nand_interface_is_nvddr(conf)) {
  1760. ddrbuf = kzalloc(len * 2, GFP_KERNEL);
  1761. if (!ddrbuf)
  1762. return -ENOMEM;
  1763. instrs[0].ctx.data.len *= 2;
  1764. instrs[0].ctx.data.buf.in = ddrbuf;
  1765. }
  1766. if (check_only) {
  1767. ret = nand_check_op(chip, &op);
  1768. kfree(ddrbuf);
  1769. return ret;
  1770. }
  1771. ret = nand_exec_op(chip, &op);
  1772. if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
  1773. u8 *dst = buf;
  1774. for (i = 0; i < len; i++)
  1775. dst[i] = ddrbuf[i * 2];
  1776. }
  1777. kfree(ddrbuf);
  1778. return ret;
  1779. }
  1780. if (check_only)
  1781. return 0;
  1782. if (force_8bit) {
  1783. u8 *p = buf;
  1784. unsigned int i;
  1785. for (i = 0; i < len; i++)
  1786. p[i] = chip->legacy.read_byte(chip);
  1787. } else {
  1788. chip->legacy.read_buf(chip, buf, len);
  1789. }
  1790. return 0;
  1791. }
  1792. EXPORT_SYMBOL_GPL(nand_read_data_op);
  1793. /**
  1794. * nand_write_data_op - Write data from the NAND
  1795. * @chip: The NAND chip
  1796. * @buf: buffer containing the data to send on the bus
  1797. * @len: length of the buffer
  1798. * @force_8bit: force 8-bit bus access
  1799. *
  1800. * This function does a raw data write on the bus. Usually used after launching
  1801. * another NAND operation like nand_write_page_begin_op().
  1802. * This function does not select/unselect the CS line.
  1803. *
  1804. * Returns 0 on success, a negative error code otherwise.
  1805. */
  1806. int nand_write_data_op(struct nand_chip *chip, const void *buf,
  1807. unsigned int len, bool force_8bit)
  1808. {
  1809. if (!len || !buf)
  1810. return -EINVAL;
  1811. if (nand_has_exec_op(chip)) {
  1812. struct nand_op_instr instrs[] = {
  1813. NAND_OP_DATA_OUT(len, buf, 0),
  1814. };
  1815. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1816. instrs[0].ctx.data.force_8bit = force_8bit;
  1817. return nand_exec_op(chip, &op);
  1818. }
  1819. if (force_8bit) {
  1820. const u8 *p = buf;
  1821. unsigned int i;
  1822. for (i = 0; i < len; i++)
  1823. chip->legacy.write_byte(chip, p[i]);
  1824. } else {
  1825. chip->legacy.write_buf(chip, buf, len);
  1826. }
  1827. return 0;
  1828. }
  1829. EXPORT_SYMBOL_GPL(nand_write_data_op);
  1830. /**
  1831. * struct nand_op_parser_ctx - Context used by the parser
  1832. * @instrs: array of all the instructions that must be addressed
  1833. * @ninstrs: length of the @instrs array
  1834. * @subop: Sub-operation to be passed to the NAND controller
  1835. *
  1836. * This structure is used by the core to split NAND operations into
  1837. * sub-operations that can be handled by the NAND controller.
  1838. */
  1839. struct nand_op_parser_ctx {
  1840. const struct nand_op_instr *instrs;
  1841. unsigned int ninstrs;
  1842. struct nand_subop subop;
  1843. };
  1844. /**
  1845. * nand_op_parser_must_split_instr - Checks if an instruction must be split
  1846. * @pat: the parser pattern element that matches @instr
  1847. * @instr: pointer to the instruction to check
  1848. * @start_offset: this is an in/out parameter. If @instr has already been
  1849. * split, then @start_offset is the offset from which to start
  1850. * (either an address cycle or an offset in the data buffer).
  1851. * Conversely, if the function returns true (ie. instr must be
  1852. * split), this parameter is updated to point to the first
  1853. * data/address cycle that has not been taken care of.
  1854. *
  1855. * Some NAND controllers are limited and cannot send X address cycles with a
  1856. * unique operation, or cannot read/write more than Y bytes at the same time.
  1857. * In this case, split the instruction that does not fit in a single
  1858. * controller-operation into two or more chunks.
  1859. *
  1860. * Returns true if the instruction must be split, false otherwise.
  1861. * The @start_offset parameter is also updated to the offset at which the next
  1862. * bundle of instruction must start (if an address or a data instruction).
  1863. */
  1864. static bool
  1865. nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
  1866. const struct nand_op_instr *instr,
  1867. unsigned int *start_offset)
  1868. {
  1869. switch (pat->type) {
  1870. case NAND_OP_ADDR_INSTR:
  1871. if (!pat->ctx.addr.maxcycles)
  1872. break;
  1873. if (instr->ctx.addr.naddrs - *start_offset >
  1874. pat->ctx.addr.maxcycles) {
  1875. *start_offset += pat->ctx.addr.maxcycles;
  1876. return true;
  1877. }
  1878. break;
  1879. case NAND_OP_DATA_IN_INSTR:
  1880. case NAND_OP_DATA_OUT_INSTR:
  1881. if (!pat->ctx.data.maxlen)
  1882. break;
  1883. if (instr->ctx.data.len - *start_offset >
  1884. pat->ctx.data.maxlen) {
  1885. *start_offset += pat->ctx.data.maxlen;
  1886. return true;
  1887. }
  1888. break;
  1889. default:
  1890. break;
  1891. }
  1892. return false;
  1893. }
  1894. /**
  1895. * nand_op_parser_match_pat - Checks if a pattern matches the instructions
  1896. * remaining in the parser context
  1897. * @pat: the pattern to test
  1898. * @ctx: the parser context structure to match with the pattern @pat
  1899. *
  1900. * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
  1901. * Returns true if this is the case, false ortherwise. When true is returned,
  1902. * @ctx->subop is updated with the set of instructions to be passed to the
  1903. * controller driver.
  1904. */
  1905. static bool
  1906. nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
  1907. struct nand_op_parser_ctx *ctx)
  1908. {
  1909. unsigned int instr_offset = ctx->subop.first_instr_start_off;
  1910. const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
  1911. const struct nand_op_instr *instr = ctx->subop.instrs;
  1912. unsigned int i, ninstrs;
  1913. for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
  1914. /*
  1915. * The pattern instruction does not match the operation
  1916. * instruction. If the instruction is marked optional in the
  1917. * pattern definition, we skip the pattern element and continue
  1918. * to the next one. If the element is mandatory, there's no
  1919. * match and we can return false directly.
  1920. */
  1921. if (instr->type != pat->elems[i].type) {
  1922. if (!pat->elems[i].optional)
  1923. return false;
  1924. continue;
  1925. }
  1926. /*
  1927. * Now check the pattern element constraints. If the pattern is
  1928. * not able to handle the whole instruction in a single step,
  1929. * we have to split it.
  1930. * The last_instr_end_off value comes back updated to point to
  1931. * the position where we have to split the instruction (the
  1932. * start of the next subop chunk).
  1933. */
  1934. if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
  1935. &instr_offset)) {
  1936. ninstrs++;
  1937. i++;
  1938. break;
  1939. }
  1940. instr++;
  1941. ninstrs++;
  1942. instr_offset = 0;
  1943. }
  1944. /*
  1945. * This can happen if all instructions of a pattern are optional.
  1946. * Still, if there's not at least one instruction handled by this
  1947. * pattern, this is not a match, and we should try the next one (if
  1948. * any).
  1949. */
  1950. if (!ninstrs)
  1951. return false;
  1952. /*
  1953. * We had a match on the pattern head, but the pattern may be longer
  1954. * than the instructions we're asked to execute. We need to make sure
  1955. * there's no mandatory elements in the pattern tail.
  1956. */
  1957. for (; i < pat->nelems; i++) {
  1958. if (!pat->elems[i].optional)
  1959. return false;
  1960. }
  1961. /*
  1962. * We have a match: update the subop structure accordingly and return
  1963. * true.
  1964. */
  1965. ctx->subop.ninstrs = ninstrs;
  1966. ctx->subop.last_instr_end_off = instr_offset;
  1967. return true;
  1968. }
  1969. #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
  1970. static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
  1971. {
  1972. const struct nand_op_instr *instr;
  1973. char *prefix = " ";
  1974. unsigned int i;
  1975. pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
  1976. for (i = 0; i < ctx->ninstrs; i++) {
  1977. instr = &ctx->instrs[i];
  1978. if (instr == &ctx->subop.instrs[0])
  1979. prefix = " ->";
  1980. nand_op_trace(prefix, instr);
  1981. if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
  1982. prefix = " ";
  1983. }
  1984. }
  1985. #else
  1986. static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
  1987. {
  1988. /* NOP */
  1989. }
  1990. #endif
  1991. static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
  1992. const struct nand_op_parser_ctx *b)
  1993. {
  1994. if (a->subop.ninstrs < b->subop.ninstrs)
  1995. return -1;
  1996. else if (a->subop.ninstrs > b->subop.ninstrs)
  1997. return 1;
  1998. if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
  1999. return -1;
  2000. else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
  2001. return 1;
  2002. return 0;
  2003. }
  2004. /**
  2005. * nand_op_parser_exec_op - exec_op parser
  2006. * @chip: the NAND chip
  2007. * @parser: patterns description provided by the controller driver
  2008. * @op: the NAND operation to address
  2009. * @check_only: when true, the function only checks if @op can be handled but
  2010. * does not execute the operation
  2011. *
  2012. * Helper function designed to ease integration of NAND controller drivers that
  2013. * only support a limited set of instruction sequences. The supported sequences
  2014. * are described in @parser, and the framework takes care of splitting @op into
  2015. * multiple sub-operations (if required) and pass them back to the ->exec()
  2016. * callback of the matching pattern if @check_only is set to false.
  2017. *
  2018. * NAND controller drivers should call this function from their own ->exec_op()
  2019. * implementation.
  2020. *
  2021. * Returns 0 on success, a negative error code otherwise. A failure can be
  2022. * caused by an unsupported operation (none of the supported patterns is able
  2023. * to handle the requested operation), or an error returned by one of the
  2024. * matching pattern->exec() hook.
  2025. */
  2026. int nand_op_parser_exec_op(struct nand_chip *chip,
  2027. const struct nand_op_parser *parser,
  2028. const struct nand_operation *op, bool check_only)
  2029. {
  2030. struct nand_op_parser_ctx ctx = {
  2031. .subop.cs = op->cs,
  2032. .subop.instrs = op->instrs,
  2033. .instrs = op->instrs,
  2034. .ninstrs = op->ninstrs,
  2035. };
  2036. unsigned int i;
  2037. while (ctx.subop.instrs < op->instrs + op->ninstrs) {
  2038. const struct nand_op_parser_pattern *pattern;
  2039. struct nand_op_parser_ctx best_ctx;
  2040. int ret, best_pattern = -1;
  2041. for (i = 0; i < parser->npatterns; i++) {
  2042. struct nand_op_parser_ctx test_ctx = ctx;
  2043. pattern = &parser->patterns[i];
  2044. if (!nand_op_parser_match_pat(pattern, &test_ctx))
  2045. continue;
  2046. if (best_pattern >= 0 &&
  2047. nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
  2048. continue;
  2049. best_pattern = i;
  2050. best_ctx = test_ctx;
  2051. }
  2052. if (best_pattern < 0) {
  2053. pr_debug("->exec_op() parser: pattern not found!\n");
  2054. return -ENOTSUPP;
  2055. }
  2056. ctx = best_ctx;
  2057. nand_op_parser_trace(&ctx);
  2058. if (!check_only) {
  2059. pattern = &parser->patterns[best_pattern];
  2060. ret = pattern->exec(chip, &ctx.subop);
  2061. if (ret)
  2062. return ret;
  2063. }
  2064. /*
  2065. * Update the context structure by pointing to the start of the
  2066. * next subop.
  2067. */
  2068. ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
  2069. if (ctx.subop.last_instr_end_off)
  2070. ctx.subop.instrs -= 1;
  2071. ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
  2072. }
  2073. return 0;
  2074. }
  2075. EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
  2076. static bool nand_instr_is_data(const struct nand_op_instr *instr)
  2077. {
  2078. return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
  2079. instr->type == NAND_OP_DATA_OUT_INSTR);
  2080. }
  2081. static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
  2082. unsigned int instr_idx)
  2083. {
  2084. return subop && instr_idx < subop->ninstrs;
  2085. }
  2086. static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
  2087. unsigned int instr_idx)
  2088. {
  2089. if (instr_idx)
  2090. return 0;
  2091. return subop->first_instr_start_off;
  2092. }
  2093. /**
  2094. * nand_subop_get_addr_start_off - Get the start offset in an address array
  2095. * @subop: The entire sub-operation
  2096. * @instr_idx: Index of the instruction inside the sub-operation
  2097. *
  2098. * During driver development, one could be tempted to directly use the
  2099. * ->addr.addrs field of address instructions. This is wrong as address
  2100. * instructions might be split.
  2101. *
  2102. * Given an address instruction, returns the offset of the first cycle to issue.
  2103. */
  2104. unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
  2105. unsigned int instr_idx)
  2106. {
  2107. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2108. subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
  2109. return 0;
  2110. return nand_subop_get_start_off(subop, instr_idx);
  2111. }
  2112. EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
  2113. /**
  2114. * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
  2115. * @subop: The entire sub-operation
  2116. * @instr_idx: Index of the instruction inside the sub-operation
  2117. *
  2118. * During driver development, one could be tempted to directly use the
  2119. * ->addr->naddrs field of a data instruction. This is wrong as instructions
  2120. * might be split.
  2121. *
  2122. * Given an address instruction, returns the number of address cycle to issue.
  2123. */
  2124. unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
  2125. unsigned int instr_idx)
  2126. {
  2127. int start_off, end_off;
  2128. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2129. subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
  2130. return 0;
  2131. start_off = nand_subop_get_addr_start_off(subop, instr_idx);
  2132. if (instr_idx == subop->ninstrs - 1 &&
  2133. subop->last_instr_end_off)
  2134. end_off = subop->last_instr_end_off;
  2135. else
  2136. end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
  2137. return end_off - start_off;
  2138. }
  2139. EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
  2140. /**
  2141. * nand_subop_get_data_start_off - Get the start offset in a data array
  2142. * @subop: The entire sub-operation
  2143. * @instr_idx: Index of the instruction inside the sub-operation
  2144. *
  2145. * During driver development, one could be tempted to directly use the
  2146. * ->data->buf.{in,out} field of data instructions. This is wrong as data
  2147. * instructions might be split.
  2148. *
  2149. * Given a data instruction, returns the offset to start from.
  2150. */
  2151. unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
  2152. unsigned int instr_idx)
  2153. {
  2154. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2155. !nand_instr_is_data(&subop->instrs[instr_idx])))
  2156. return 0;
  2157. return nand_subop_get_start_off(subop, instr_idx);
  2158. }
  2159. EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
  2160. /**
  2161. * nand_subop_get_data_len - Get the number of bytes to retrieve
  2162. * @subop: The entire sub-operation
  2163. * @instr_idx: Index of the instruction inside the sub-operation
  2164. *
  2165. * During driver development, one could be tempted to directly use the
  2166. * ->data->len field of a data instruction. This is wrong as data instructions
  2167. * might be split.
  2168. *
  2169. * Returns the length of the chunk of data to send/receive.
  2170. */
  2171. unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
  2172. unsigned int instr_idx)
  2173. {
  2174. int start_off = 0, end_off;
  2175. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2176. !nand_instr_is_data(&subop->instrs[instr_idx])))
  2177. return 0;
  2178. start_off = nand_subop_get_data_start_off(subop, instr_idx);
  2179. if (instr_idx == subop->ninstrs - 1 &&
  2180. subop->last_instr_end_off)
  2181. end_off = subop->last_instr_end_off;
  2182. else
  2183. end_off = subop->instrs[instr_idx].ctx.data.len;
  2184. return end_off - start_off;
  2185. }
  2186. EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
  2187. /**
  2188. * nand_reset - Reset and initialize a NAND device
  2189. * @chip: The NAND chip
  2190. * @chipnr: Internal die id
  2191. *
  2192. * Save the timings data structure, then apply SDR timings mode 0 (see
  2193. * nand_reset_interface for details), do the reset operation, and apply
  2194. * back the previous timings.
  2195. *
  2196. * Returns 0 on success, a negative error code otherwise.
  2197. */
  2198. int nand_reset(struct nand_chip *chip, int chipnr)
  2199. {
  2200. int ret;
  2201. ret = nand_reset_interface(chip, chipnr);
  2202. if (ret)
  2203. return ret;
  2204. /*
  2205. * The CS line has to be released before we can apply the new NAND
  2206. * interface settings, hence this weird nand_select_target()
  2207. * nand_deselect_target() dance.
  2208. */
  2209. nand_select_target(chip, chipnr);
  2210. ret = nand_reset_op(chip);
  2211. nand_deselect_target(chip);
  2212. if (ret)
  2213. return ret;
  2214. ret = nand_setup_interface(chip, chipnr);
  2215. if (ret)
  2216. return ret;
  2217. return 0;
  2218. }
  2219. EXPORT_SYMBOL_GPL(nand_reset);
  2220. /**
  2221. * nand_get_features - wrapper to perform a GET_FEATURE
  2222. * @chip: NAND chip info structure
  2223. * @addr: feature address
  2224. * @subfeature_param: the subfeature parameters, a four bytes array
  2225. *
  2226. * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
  2227. * operation cannot be handled.
  2228. */
  2229. int nand_get_features(struct nand_chip *chip, int addr,
  2230. u8 *subfeature_param)
  2231. {
  2232. if (!nand_supports_get_features(chip, addr))
  2233. return -ENOTSUPP;
  2234. if (chip->legacy.get_features)
  2235. return chip->legacy.get_features(chip, addr, subfeature_param);
  2236. return nand_get_features_op(chip, addr, subfeature_param);
  2237. }
  2238. /**
  2239. * nand_set_features - wrapper to perform a SET_FEATURE
  2240. * @chip: NAND chip info structure
  2241. * @addr: feature address
  2242. * @subfeature_param: the subfeature parameters, a four bytes array
  2243. *
  2244. * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
  2245. * operation cannot be handled.
  2246. */
  2247. int nand_set_features(struct nand_chip *chip, int addr,
  2248. u8 *subfeature_param)
  2249. {
  2250. if (!nand_supports_set_features(chip, addr))
  2251. return -ENOTSUPP;
  2252. if (chip->legacy.set_features)
  2253. return chip->legacy.set_features(chip, addr, subfeature_param);
  2254. return nand_set_features_op(chip, addr, subfeature_param);
  2255. }
  2256. /**
  2257. * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
  2258. * @buf: buffer to test
  2259. * @len: buffer length
  2260. * @bitflips_threshold: maximum number of bitflips
  2261. *
  2262. * Check if a buffer contains only 0xff, which means the underlying region
  2263. * has been erased and is ready to be programmed.
  2264. * The bitflips_threshold specify the maximum number of bitflips before
  2265. * considering the region is not erased.
  2266. * Note: The logic of this function has been extracted from the memweight
  2267. * implementation, except that nand_check_erased_buf function exit before
  2268. * testing the whole buffer if the number of bitflips exceed the
  2269. * bitflips_threshold value.
  2270. *
  2271. * Returns a positive number of bitflips less than or equal to
  2272. * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
  2273. * threshold.
  2274. */
  2275. static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
  2276. {
  2277. const unsigned char *bitmap = buf;
  2278. int bitflips = 0;
  2279. int weight;
  2280. for (; len && ((uintptr_t)bitmap) % sizeof(long);
  2281. len--, bitmap++) {
  2282. weight = hweight8(*bitmap);
  2283. bitflips += BITS_PER_BYTE - weight;
  2284. if (unlikely(bitflips > bitflips_threshold))
  2285. return -EBADMSG;
  2286. }
  2287. for (; len >= sizeof(long);
  2288. len -= sizeof(long), bitmap += sizeof(long)) {
  2289. unsigned long d = *((unsigned long *)bitmap);
  2290. if (d == ~0UL)
  2291. continue;
  2292. weight = hweight_long(d);
  2293. bitflips += BITS_PER_LONG - weight;
  2294. if (unlikely(bitflips > bitflips_threshold))
  2295. return -EBADMSG;
  2296. }
  2297. for (; len > 0; len--, bitmap++) {
  2298. weight = hweight8(*bitmap);
  2299. bitflips += BITS_PER_BYTE - weight;
  2300. if (unlikely(bitflips > bitflips_threshold))
  2301. return -EBADMSG;
  2302. }
  2303. return bitflips;
  2304. }
  2305. /**
  2306. * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
  2307. * 0xff data
  2308. * @data: data buffer to test
  2309. * @datalen: data length
  2310. * @ecc: ECC buffer
  2311. * @ecclen: ECC length
  2312. * @extraoob: extra OOB buffer
  2313. * @extraooblen: extra OOB length
  2314. * @bitflips_threshold: maximum number of bitflips
  2315. *
  2316. * Check if a data buffer and its associated ECC and OOB data contains only
  2317. * 0xff pattern, which means the underlying region has been erased and is
  2318. * ready to be programmed.
  2319. * The bitflips_threshold specify the maximum number of bitflips before
  2320. * considering the region as not erased.
  2321. *
  2322. * Note:
  2323. * 1/ ECC algorithms are working on pre-defined block sizes which are usually
  2324. * different from the NAND page size. When fixing bitflips, ECC engines will
  2325. * report the number of errors per chunk, and the NAND core infrastructure
  2326. * expect you to return the maximum number of bitflips for the whole page.
  2327. * This is why you should always use this function on a single chunk and
  2328. * not on the whole page. After checking each chunk you should update your
  2329. * max_bitflips value accordingly.
  2330. * 2/ When checking for bitflips in erased pages you should not only check
  2331. * the payload data but also their associated ECC data, because a user might
  2332. * have programmed almost all bits to 1 but a few. In this case, we
  2333. * shouldn't consider the chunk as erased, and checking ECC bytes prevent
  2334. * this case.
  2335. * 3/ The extraoob argument is optional, and should be used if some of your OOB
  2336. * data are protected by the ECC engine.
  2337. * It could also be used if you support subpages and want to attach some
  2338. * extra OOB data to an ECC chunk.
  2339. *
  2340. * Returns a positive number of bitflips less than or equal to
  2341. * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
  2342. * threshold. In case of success, the passed buffers are filled with 0xff.
  2343. */
  2344. int nand_check_erased_ecc_chunk(void *data, int datalen,
  2345. void *ecc, int ecclen,
  2346. void *extraoob, int extraooblen,
  2347. int bitflips_threshold)
  2348. {
  2349. int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
  2350. data_bitflips = nand_check_erased_buf(data, datalen,
  2351. bitflips_threshold);
  2352. if (data_bitflips < 0)
  2353. return data_bitflips;
  2354. bitflips_threshold -= data_bitflips;
  2355. ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
  2356. if (ecc_bitflips < 0)
  2357. return ecc_bitflips;
  2358. bitflips_threshold -= ecc_bitflips;
  2359. extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
  2360. bitflips_threshold);
  2361. if (extraoob_bitflips < 0)
  2362. return extraoob_bitflips;
  2363. if (data_bitflips)
  2364. memset(data, 0xff, datalen);
  2365. if (ecc_bitflips)
  2366. memset(ecc, 0xff, ecclen);
  2367. if (extraoob_bitflips)
  2368. memset(extraoob, 0xff, extraooblen);
  2369. return data_bitflips + ecc_bitflips + extraoob_bitflips;
  2370. }
  2371. EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
  2372. /**
  2373. * nand_read_page_raw_notsupp - dummy read raw page function
  2374. * @chip: nand chip info structure
  2375. * @buf: buffer to store read data
  2376. * @oob_required: caller requires OOB data read to chip->oob_poi
  2377. * @page: page number to read
  2378. *
  2379. * Returns -ENOTSUPP unconditionally.
  2380. */
  2381. int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
  2382. int oob_required, int page)
  2383. {
  2384. return -ENOTSUPP;
  2385. }
  2386. /**
  2387. * nand_read_page_raw - [INTERN] read raw page data without ecc
  2388. * @chip: nand chip info structure
  2389. * @buf: buffer to store read data
  2390. * @oob_required: caller requires OOB data read to chip->oob_poi
  2391. * @page: page number to read
  2392. *
  2393. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  2394. */
  2395. int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
  2396. int page)
  2397. {
  2398. struct mtd_info *mtd = nand_to_mtd(chip);
  2399. int ret;
  2400. ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
  2401. if (ret)
  2402. return ret;
  2403. if (oob_required) {
  2404. ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
  2405. false, false);
  2406. if (ret)
  2407. return ret;
  2408. }
  2409. return 0;
  2410. }
  2411. EXPORT_SYMBOL(nand_read_page_raw);
  2412. /**
  2413. * nand_monolithic_read_page_raw - Monolithic page read in raw mode
  2414. * @chip: NAND chip info structure
  2415. * @buf: buffer to store read data
  2416. * @oob_required: caller requires OOB data read to chip->oob_poi
  2417. * @page: page number to read
  2418. *
  2419. * This is a raw page read, ie. without any error detection/correction.
  2420. * Monolithic means we are requesting all the relevant data (main plus
  2421. * eventually OOB) to be loaded in the NAND cache and sent over the
  2422. * bus (from the NAND chip to the NAND controller) in a single
  2423. * operation. This is an alternative to nand_read_page_raw(), which
  2424. * first reads the main data, and if the OOB data is requested too,
  2425. * then reads more data on the bus.
  2426. */
  2427. int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
  2428. int oob_required, int page)
  2429. {
  2430. struct mtd_info *mtd = nand_to_mtd(chip);
  2431. unsigned int size = mtd->writesize;
  2432. u8 *read_buf = buf;
  2433. int ret;
  2434. if (oob_required) {
  2435. size += mtd->oobsize;
  2436. if (buf != chip->data_buf)
  2437. read_buf = nand_get_data_buf(chip);
  2438. }
  2439. ret = nand_read_page_op(chip, page, 0, read_buf, size);
  2440. if (ret)
  2441. return ret;
  2442. if (buf != chip->data_buf)
  2443. memcpy(buf, read_buf, mtd->writesize);
  2444. return 0;
  2445. }
  2446. EXPORT_SYMBOL(nand_monolithic_read_page_raw);
  2447. /**
  2448. * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
  2449. * @chip: nand chip info structure
  2450. * @buf: buffer to store read data
  2451. * @oob_required: caller requires OOB data read to chip->oob_poi
  2452. * @page: page number to read
  2453. *
  2454. * We need a special oob layout and handling even when OOB isn't used.
  2455. */
  2456. static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
  2457. int oob_required, int page)
  2458. {
  2459. struct mtd_info *mtd = nand_to_mtd(chip);
  2460. int eccsize = chip->ecc.size;
  2461. int eccbytes = chip->ecc.bytes;
  2462. uint8_t *oob = chip->oob_poi;
  2463. int steps, size, ret;
  2464. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2465. if (ret)
  2466. return ret;
  2467. for (steps = chip->ecc.steps; steps > 0; steps--) {
  2468. ret = nand_read_data_op(chip, buf, eccsize, false, false);
  2469. if (ret)
  2470. return ret;
  2471. buf += eccsize;
  2472. if (chip->ecc.prepad) {
  2473. ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
  2474. false, false);
  2475. if (ret)
  2476. return ret;
  2477. oob += chip->ecc.prepad;
  2478. }
  2479. ret = nand_read_data_op(chip, oob, eccbytes, false, false);
  2480. if (ret)
  2481. return ret;
  2482. oob += eccbytes;
  2483. if (chip->ecc.postpad) {
  2484. ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
  2485. false, false);
  2486. if (ret)
  2487. return ret;
  2488. oob += chip->ecc.postpad;
  2489. }
  2490. }
  2491. size = mtd->oobsize - (oob - chip->oob_poi);
  2492. if (size) {
  2493. ret = nand_read_data_op(chip, oob, size, false, false);
  2494. if (ret)
  2495. return ret;
  2496. }
  2497. return 0;
  2498. }
  2499. /**
  2500. * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
  2501. * @chip: nand chip info structure
  2502. * @buf: buffer to store read data
  2503. * @oob_required: caller requires OOB data read to chip->oob_poi
  2504. * @page: page number to read
  2505. */
  2506. static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
  2507. int oob_required, int page)
  2508. {
  2509. struct mtd_info *mtd = nand_to_mtd(chip);
  2510. int i, eccsize = chip->ecc.size, ret;
  2511. int eccbytes = chip->ecc.bytes;
  2512. int eccsteps = chip->ecc.steps;
  2513. uint8_t *p = buf;
  2514. uint8_t *ecc_calc = chip->ecc.calc_buf;
  2515. uint8_t *ecc_code = chip->ecc.code_buf;
  2516. unsigned int max_bitflips = 0;
  2517. chip->ecc.read_page_raw(chip, buf, 1, page);
  2518. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  2519. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  2520. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2521. chip->ecc.total);
  2522. if (ret)
  2523. return ret;
  2524. eccsteps = chip->ecc.steps;
  2525. p = buf;
  2526. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2527. int stat;
  2528. stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
  2529. if (stat < 0) {
  2530. mtd->ecc_stats.failed++;
  2531. } else {
  2532. mtd->ecc_stats.corrected += stat;
  2533. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2534. }
  2535. }
  2536. return max_bitflips;
  2537. }
  2538. /**
  2539. * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
  2540. * @chip: nand chip info structure
  2541. * @data_offs: offset of requested data within the page
  2542. * @readlen: data length
  2543. * @bufpoi: buffer to store read data
  2544. * @page: page number to read
  2545. */
  2546. static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
  2547. uint32_t readlen, uint8_t *bufpoi, int page)
  2548. {
  2549. struct mtd_info *mtd = nand_to_mtd(chip);
  2550. int start_step, end_step, num_steps, ret;
  2551. uint8_t *p;
  2552. int data_col_addr, i, gaps = 0;
  2553. int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
  2554. int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
  2555. int index, section = 0;
  2556. unsigned int max_bitflips = 0;
  2557. struct mtd_oob_region oobregion = { };
  2558. /* Column address within the page aligned to ECC size (256bytes) */
  2559. start_step = data_offs / chip->ecc.size;
  2560. end_step = (data_offs + readlen - 1) / chip->ecc.size;
  2561. num_steps = end_step - start_step + 1;
  2562. index = start_step * chip->ecc.bytes;
  2563. /* Data size aligned to ECC ecc.size */
  2564. datafrag_len = num_steps * chip->ecc.size;
  2565. eccfrag_len = num_steps * chip->ecc.bytes;
  2566. data_col_addr = start_step * chip->ecc.size;
  2567. /* If we read not a page aligned data */
  2568. p = bufpoi + data_col_addr;
  2569. ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
  2570. if (ret)
  2571. return ret;
  2572. /* Calculate ECC */
  2573. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
  2574. chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
  2575. /*
  2576. * The performance is faster if we position offsets according to
  2577. * ecc.pos. Let's make sure that there are no gaps in ECC positions.
  2578. */
  2579. ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
  2580. if (ret)
  2581. return ret;
  2582. if (oobregion.length < eccfrag_len)
  2583. gaps = 1;
  2584. if (gaps) {
  2585. ret = nand_change_read_column_op(chip, mtd->writesize,
  2586. chip->oob_poi, mtd->oobsize,
  2587. false);
  2588. if (ret)
  2589. return ret;
  2590. } else {
  2591. /*
  2592. * Send the command to read the particular ECC bytes take care
  2593. * about buswidth alignment in read_buf.
  2594. */
  2595. aligned_pos = oobregion.offset & ~(busw - 1);
  2596. aligned_len = eccfrag_len;
  2597. if (oobregion.offset & (busw - 1))
  2598. aligned_len++;
  2599. if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
  2600. (busw - 1))
  2601. aligned_len++;
  2602. ret = nand_change_read_column_op(chip,
  2603. mtd->writesize + aligned_pos,
  2604. &chip->oob_poi[aligned_pos],
  2605. aligned_len, false);
  2606. if (ret)
  2607. return ret;
  2608. }
  2609. ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
  2610. chip->oob_poi, index, eccfrag_len);
  2611. if (ret)
  2612. return ret;
  2613. p = bufpoi + data_col_addr;
  2614. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
  2615. int stat;
  2616. stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
  2617. &chip->ecc.calc_buf[i]);
  2618. if (stat == -EBADMSG &&
  2619. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2620. /* check for empty pages with bitflips */
  2621. stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
  2622. &chip->ecc.code_buf[i],
  2623. chip->ecc.bytes,
  2624. NULL, 0,
  2625. chip->ecc.strength);
  2626. }
  2627. if (stat < 0) {
  2628. mtd->ecc_stats.failed++;
  2629. } else {
  2630. mtd->ecc_stats.corrected += stat;
  2631. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2632. }
  2633. }
  2634. return max_bitflips;
  2635. }
  2636. /**
  2637. * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
  2638. * @chip: nand chip info structure
  2639. * @buf: buffer to store read data
  2640. * @oob_required: caller requires OOB data read to chip->oob_poi
  2641. * @page: page number to read
  2642. *
  2643. * Not for syndrome calculating ECC controllers which need a special oob layout.
  2644. */
  2645. static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
  2646. int oob_required, int page)
  2647. {
  2648. struct mtd_info *mtd = nand_to_mtd(chip);
  2649. int i, eccsize = chip->ecc.size, ret;
  2650. int eccbytes = chip->ecc.bytes;
  2651. int eccsteps = chip->ecc.steps;
  2652. uint8_t *p = buf;
  2653. uint8_t *ecc_calc = chip->ecc.calc_buf;
  2654. uint8_t *ecc_code = chip->ecc.code_buf;
  2655. unsigned int max_bitflips = 0;
  2656. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2657. if (ret)
  2658. return ret;
  2659. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2660. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2661. ret = nand_read_data_op(chip, p, eccsize, false, false);
  2662. if (ret)
  2663. return ret;
  2664. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  2665. }
  2666. ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
  2667. false);
  2668. if (ret)
  2669. return ret;
  2670. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2671. chip->ecc.total);
  2672. if (ret)
  2673. return ret;
  2674. eccsteps = chip->ecc.steps;
  2675. p = buf;
  2676. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2677. int stat;
  2678. stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
  2679. if (stat == -EBADMSG &&
  2680. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2681. /* check for empty pages with bitflips */
  2682. stat = nand_check_erased_ecc_chunk(p, eccsize,
  2683. &ecc_code[i], eccbytes,
  2684. NULL, 0,
  2685. chip->ecc.strength);
  2686. }
  2687. if (stat < 0) {
  2688. mtd->ecc_stats.failed++;
  2689. } else {
  2690. mtd->ecc_stats.corrected += stat;
  2691. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2692. }
  2693. }
  2694. return max_bitflips;
  2695. }
  2696. /**
  2697. * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
  2698. * data read from OOB area
  2699. * @chip: nand chip info structure
  2700. * @buf: buffer to store read data
  2701. * @oob_required: caller requires OOB data read to chip->oob_poi
  2702. * @page: page number to read
  2703. *
  2704. * Hardware ECC for large page chips, which requires the ECC data to be
  2705. * extracted from the OOB before the actual data is read.
  2706. */
  2707. int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
  2708. int oob_required, int page)
  2709. {
  2710. struct mtd_info *mtd = nand_to_mtd(chip);
  2711. int i, eccsize = chip->ecc.size, ret;
  2712. int eccbytes = chip->ecc.bytes;
  2713. int eccsteps = chip->ecc.steps;
  2714. uint8_t *p = buf;
  2715. uint8_t *ecc_code = chip->ecc.code_buf;
  2716. unsigned int max_bitflips = 0;
  2717. /* Read the OOB area first */
  2718. ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  2719. if (ret)
  2720. return ret;
  2721. /* Move read cursor to start of page */
  2722. ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
  2723. if (ret)
  2724. return ret;
  2725. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2726. chip->ecc.total);
  2727. if (ret)
  2728. return ret;
  2729. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2730. int stat;
  2731. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2732. ret = nand_read_data_op(chip, p, eccsize, false, false);
  2733. if (ret)
  2734. return ret;
  2735. stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
  2736. if (stat == -EBADMSG &&
  2737. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2738. /* check for empty pages with bitflips */
  2739. stat = nand_check_erased_ecc_chunk(p, eccsize,
  2740. &ecc_code[i],
  2741. eccbytes, NULL, 0,
  2742. chip->ecc.strength);
  2743. }
  2744. if (stat < 0) {
  2745. mtd->ecc_stats.failed++;
  2746. } else {
  2747. mtd->ecc_stats.corrected += stat;
  2748. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2749. }
  2750. }
  2751. return max_bitflips;
  2752. }
  2753. EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
  2754. /**
  2755. * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
  2756. * @chip: nand chip info structure
  2757. * @buf: buffer to store read data
  2758. * @oob_required: caller requires OOB data read to chip->oob_poi
  2759. * @page: page number to read
  2760. *
  2761. * The hw generator calculates the error syndrome automatically. Therefore we
  2762. * need a special oob layout and handling.
  2763. */
  2764. static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
  2765. int oob_required, int page)
  2766. {
  2767. struct mtd_info *mtd = nand_to_mtd(chip);
  2768. int ret, i, eccsize = chip->ecc.size;
  2769. int eccbytes = chip->ecc.bytes;
  2770. int eccsteps = chip->ecc.steps;
  2771. int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
  2772. uint8_t *p = buf;
  2773. uint8_t *oob = chip->oob_poi;
  2774. unsigned int max_bitflips = 0;
  2775. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2776. if (ret)
  2777. return ret;
  2778. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2779. int stat;
  2780. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2781. ret = nand_read_data_op(chip, p, eccsize, false, false);
  2782. if (ret)
  2783. return ret;
  2784. if (chip->ecc.prepad) {
  2785. ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
  2786. false, false);
  2787. if (ret)
  2788. return ret;
  2789. oob += chip->ecc.prepad;
  2790. }
  2791. chip->ecc.hwctl(chip, NAND_ECC_READSYN);
  2792. ret = nand_read_data_op(chip, oob, eccbytes, false, false);
  2793. if (ret)
  2794. return ret;
  2795. stat = chip->ecc.correct(chip, p, oob, NULL);
  2796. oob += eccbytes;
  2797. if (chip->ecc.postpad) {
  2798. ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
  2799. false, false);
  2800. if (ret)
  2801. return ret;
  2802. oob += chip->ecc.postpad;
  2803. }
  2804. if (stat == -EBADMSG &&
  2805. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2806. /* check for empty pages with bitflips */
  2807. stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
  2808. oob - eccpadbytes,
  2809. eccpadbytes,
  2810. NULL, 0,
  2811. chip->ecc.strength);
  2812. }
  2813. if (stat < 0) {
  2814. mtd->ecc_stats.failed++;
  2815. } else {
  2816. mtd->ecc_stats.corrected += stat;
  2817. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2818. }
  2819. }
  2820. /* Calculate remaining oob bytes */
  2821. i = mtd->oobsize - (oob - chip->oob_poi);
  2822. if (i) {
  2823. ret = nand_read_data_op(chip, oob, i, false, false);
  2824. if (ret)
  2825. return ret;
  2826. }
  2827. return max_bitflips;
  2828. }
  2829. /**
  2830. * nand_transfer_oob - [INTERN] Transfer oob to client buffer
  2831. * @chip: NAND chip object
  2832. * @oob: oob destination address
  2833. * @ops: oob ops structure
  2834. * @len: size of oob to transfer
  2835. */
  2836. static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
  2837. struct mtd_oob_ops *ops, size_t len)
  2838. {
  2839. struct mtd_info *mtd = nand_to_mtd(chip);
  2840. int ret;
  2841. switch (ops->mode) {
  2842. case MTD_OPS_PLACE_OOB:
  2843. case MTD_OPS_RAW:
  2844. memcpy(oob, chip->oob_poi + ops->ooboffs, len);
  2845. return oob + len;
  2846. case MTD_OPS_AUTO_OOB:
  2847. ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
  2848. ops->ooboffs, len);
  2849. BUG_ON(ret);
  2850. return oob + len;
  2851. default:
  2852. BUG();
  2853. }
  2854. return NULL;
  2855. }
  2856. /**
  2857. * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
  2858. * @chip: NAND chip object
  2859. * @retry_mode: the retry mode to use
  2860. *
  2861. * Some vendors supply a special command to shift the Vt threshold, to be used
  2862. * when there are too many bitflips in a page (i.e., ECC error). After setting
  2863. * a new threshold, the host should retry reading the page.
  2864. */
  2865. static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
  2866. {
  2867. pr_debug("setting READ RETRY mode %d\n", retry_mode);
  2868. if (retry_mode >= chip->read_retries)
  2869. return -EINVAL;
  2870. if (!chip->ops.setup_read_retry)
  2871. return -EOPNOTSUPP;
  2872. return chip->ops.setup_read_retry(chip, retry_mode);
  2873. }
  2874. static void nand_wait_readrdy(struct nand_chip *chip)
  2875. {
  2876. const struct nand_interface_config *conf;
  2877. if (!(chip->options & NAND_NEED_READRDY))
  2878. return;
  2879. conf = nand_get_interface_config(chip);
  2880. WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
  2881. }
  2882. /**
  2883. * nand_do_read_ops - [INTERN] Read data with ECC
  2884. * @chip: NAND chip object
  2885. * @from: offset to read from
  2886. * @ops: oob ops structure
  2887. *
  2888. * Internal function. Called with chip held.
  2889. */
  2890. static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
  2891. struct mtd_oob_ops *ops)
  2892. {
  2893. int chipnr, page, realpage, col, bytes, aligned, oob_required;
  2894. struct mtd_info *mtd = nand_to_mtd(chip);
  2895. int ret = 0;
  2896. uint32_t readlen = ops->len;
  2897. uint32_t oobreadlen = ops->ooblen;
  2898. uint32_t max_oobsize = mtd_oobavail(mtd, ops);
  2899. uint8_t *bufpoi, *oob, *buf;
  2900. int use_bounce_buf;
  2901. unsigned int max_bitflips = 0;
  2902. int retry_mode = 0;
  2903. bool ecc_fail = false;
  2904. /* Check if the region is secured */
  2905. if (nand_region_is_secured(chip, from, readlen))
  2906. return -EIO;
  2907. chipnr = (int)(from >> chip->chip_shift);
  2908. nand_select_target(chip, chipnr);
  2909. realpage = (int)(from >> chip->page_shift);
  2910. page = realpage & chip->pagemask;
  2911. col = (int)(from & (mtd->writesize - 1));
  2912. buf = ops->datbuf;
  2913. oob = ops->oobbuf;
  2914. oob_required = oob ? 1 : 0;
  2915. while (1) {
  2916. struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
  2917. bytes = min(mtd->writesize - col, readlen);
  2918. aligned = (bytes == mtd->writesize);
  2919. if (!aligned)
  2920. use_bounce_buf = 1;
  2921. else if (chip->options & NAND_USES_DMA)
  2922. use_bounce_buf = !virt_addr_valid(buf) ||
  2923. !IS_ALIGNED((unsigned long)buf,
  2924. chip->buf_align);
  2925. else
  2926. use_bounce_buf = 0;
  2927. /* Is the current page in the buffer? */
  2928. if (realpage != chip->pagecache.page || oob) {
  2929. bufpoi = use_bounce_buf ? chip->data_buf : buf;
  2930. if (use_bounce_buf && aligned)
  2931. pr_debug("%s: using read bounce buffer for buf@%p\n",
  2932. __func__, buf);
  2933. read_retry:
  2934. /*
  2935. * Now read the page into the buffer. Absent an error,
  2936. * the read methods return max bitflips per ecc step.
  2937. */
  2938. if (unlikely(ops->mode == MTD_OPS_RAW))
  2939. ret = chip->ecc.read_page_raw(chip, bufpoi,
  2940. oob_required,
  2941. page);
  2942. else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
  2943. !oob)
  2944. ret = chip->ecc.read_subpage(chip, col, bytes,
  2945. bufpoi, page);
  2946. else
  2947. ret = chip->ecc.read_page(chip, bufpoi,
  2948. oob_required, page);
  2949. if (ret < 0) {
  2950. if (use_bounce_buf)
  2951. /* Invalidate page cache */
  2952. chip->pagecache.page = -1;
  2953. break;
  2954. }
  2955. /*
  2956. * Copy back the data in the initial buffer when reading
  2957. * partial pages or when a bounce buffer is required.
  2958. */
  2959. if (use_bounce_buf) {
  2960. if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
  2961. !(mtd->ecc_stats.failed - ecc_stats.failed) &&
  2962. (ops->mode != MTD_OPS_RAW)) {
  2963. chip->pagecache.page = realpage;
  2964. chip->pagecache.bitflips = ret;
  2965. } else {
  2966. /* Invalidate page cache */
  2967. chip->pagecache.page = -1;
  2968. }
  2969. memcpy(buf, bufpoi + col, bytes);
  2970. }
  2971. if (unlikely(oob)) {
  2972. int toread = min(oobreadlen, max_oobsize);
  2973. if (toread) {
  2974. oob = nand_transfer_oob(chip, oob, ops,
  2975. toread);
  2976. oobreadlen -= toread;
  2977. }
  2978. }
  2979. nand_wait_readrdy(chip);
  2980. if (mtd->ecc_stats.failed - ecc_stats.failed) {
  2981. if (retry_mode + 1 < chip->read_retries) {
  2982. retry_mode++;
  2983. ret = nand_setup_read_retry(chip,
  2984. retry_mode);
  2985. if (ret < 0)
  2986. break;
  2987. /* Reset ecc_stats; retry */
  2988. mtd->ecc_stats = ecc_stats;
  2989. goto read_retry;
  2990. } else {
  2991. /* No more retry modes; real failure */
  2992. ecc_fail = true;
  2993. }
  2994. }
  2995. buf += bytes;
  2996. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  2997. } else {
  2998. memcpy(buf, chip->data_buf + col, bytes);
  2999. buf += bytes;
  3000. max_bitflips = max_t(unsigned int, max_bitflips,
  3001. chip->pagecache.bitflips);
  3002. }
  3003. readlen -= bytes;
  3004. /* Reset to retry mode 0 */
  3005. if (retry_mode) {
  3006. ret = nand_setup_read_retry(chip, 0);
  3007. if (ret < 0)
  3008. break;
  3009. retry_mode = 0;
  3010. }
  3011. if (!readlen)
  3012. break;
  3013. /* For subsequent reads align to page boundary */
  3014. col = 0;
  3015. /* Increment page address */
  3016. realpage++;
  3017. page = realpage & chip->pagemask;
  3018. /* Check, if we cross a chip boundary */
  3019. if (!page) {
  3020. chipnr++;
  3021. nand_deselect_target(chip);
  3022. nand_select_target(chip, chipnr);
  3023. }
  3024. }
  3025. nand_deselect_target(chip);
  3026. ops->retlen = ops->len - (size_t) readlen;
  3027. if (oob)
  3028. ops->oobretlen = ops->ooblen - oobreadlen;
  3029. if (ret < 0)
  3030. return ret;
  3031. if (ecc_fail)
  3032. return -EBADMSG;
  3033. return max_bitflips;
  3034. }
  3035. /**
  3036. * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
  3037. * @chip: nand chip info structure
  3038. * @page: page number to read
  3039. */
  3040. int nand_read_oob_std(struct nand_chip *chip, int page)
  3041. {
  3042. struct mtd_info *mtd = nand_to_mtd(chip);
  3043. return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  3044. }
  3045. EXPORT_SYMBOL(nand_read_oob_std);
  3046. /**
  3047. * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
  3048. * with syndromes
  3049. * @chip: nand chip info structure
  3050. * @page: page number to read
  3051. */
  3052. static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
  3053. {
  3054. struct mtd_info *mtd = nand_to_mtd(chip);
  3055. int length = mtd->oobsize;
  3056. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  3057. int eccsize = chip->ecc.size;
  3058. uint8_t *bufpoi = chip->oob_poi;
  3059. int i, toread, sndrnd = 0, pos, ret;
  3060. ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
  3061. if (ret)
  3062. return ret;
  3063. for (i = 0; i < chip->ecc.steps; i++) {
  3064. if (sndrnd) {
  3065. int ret;
  3066. pos = eccsize + i * (eccsize + chunk);
  3067. if (mtd->writesize > 512)
  3068. ret = nand_change_read_column_op(chip, pos,
  3069. NULL, 0,
  3070. false);
  3071. else
  3072. ret = nand_read_page_op(chip, page, pos, NULL,
  3073. 0);
  3074. if (ret)
  3075. return ret;
  3076. } else
  3077. sndrnd = 1;
  3078. toread = min_t(int, length, chunk);
  3079. ret = nand_read_data_op(chip, bufpoi, toread, false, false);
  3080. if (ret)
  3081. return ret;
  3082. bufpoi += toread;
  3083. length -= toread;
  3084. }
  3085. if (length > 0) {
  3086. ret = nand_read_data_op(chip, bufpoi, length, false, false);
  3087. if (ret)
  3088. return ret;
  3089. }
  3090. return 0;
  3091. }
  3092. /**
  3093. * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
  3094. * @chip: nand chip info structure
  3095. * @page: page number to write
  3096. */
  3097. int nand_write_oob_std(struct nand_chip *chip, int page)
  3098. {
  3099. struct mtd_info *mtd = nand_to_mtd(chip);
  3100. return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
  3101. mtd->oobsize);
  3102. }
  3103. EXPORT_SYMBOL(nand_write_oob_std);
  3104. /**
  3105. * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
  3106. * with syndrome - only for large page flash
  3107. * @chip: nand chip info structure
  3108. * @page: page number to write
  3109. */
  3110. static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
  3111. {
  3112. struct mtd_info *mtd = nand_to_mtd(chip);
  3113. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  3114. int eccsize = chip->ecc.size, length = mtd->oobsize;
  3115. int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
  3116. const uint8_t *bufpoi = chip->oob_poi;
  3117. /*
  3118. * data-ecc-data-ecc ... ecc-oob
  3119. * or
  3120. * data-pad-ecc-pad-data-pad .... ecc-pad-oob
  3121. */
  3122. if (!chip->ecc.prepad && !chip->ecc.postpad) {
  3123. pos = steps * (eccsize + chunk);
  3124. steps = 0;
  3125. } else
  3126. pos = eccsize;
  3127. ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
  3128. if (ret)
  3129. return ret;
  3130. for (i = 0; i < steps; i++) {
  3131. if (sndcmd) {
  3132. if (mtd->writesize <= 512) {
  3133. uint32_t fill = 0xFFFFFFFF;
  3134. len = eccsize;
  3135. while (len > 0) {
  3136. int num = min_t(int, len, 4);
  3137. ret = nand_write_data_op(chip, &fill,
  3138. num, false);
  3139. if (ret)
  3140. return ret;
  3141. len -= num;
  3142. }
  3143. } else {
  3144. pos = eccsize + i * (eccsize + chunk);
  3145. ret = nand_change_write_column_op(chip, pos,
  3146. NULL, 0,
  3147. false);
  3148. if (ret)
  3149. return ret;
  3150. }
  3151. } else
  3152. sndcmd = 1;
  3153. len = min_t(int, length, chunk);
  3154. ret = nand_write_data_op(chip, bufpoi, len, false);
  3155. if (ret)
  3156. return ret;
  3157. bufpoi += len;
  3158. length -= len;
  3159. }
  3160. if (length > 0) {
  3161. ret = nand_write_data_op(chip, bufpoi, length, false);
  3162. if (ret)
  3163. return ret;
  3164. }
  3165. return nand_prog_page_end_op(chip);
  3166. }
  3167. /**
  3168. * nand_do_read_oob - [INTERN] NAND read out-of-band
  3169. * @chip: NAND chip object
  3170. * @from: offset to read from
  3171. * @ops: oob operations description structure
  3172. *
  3173. * NAND read out-of-band data from the spare area.
  3174. */
  3175. static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
  3176. struct mtd_oob_ops *ops)
  3177. {
  3178. struct mtd_info *mtd = nand_to_mtd(chip);
  3179. unsigned int max_bitflips = 0;
  3180. int page, realpage, chipnr;
  3181. struct mtd_ecc_stats stats;
  3182. int readlen = ops->ooblen;
  3183. int len;
  3184. uint8_t *buf = ops->oobbuf;
  3185. int ret = 0;
  3186. pr_debug("%s: from = 0x%08Lx, len = %i\n",
  3187. __func__, (unsigned long long)from, readlen);
  3188. /* Check if the region is secured */
  3189. if (nand_region_is_secured(chip, from, readlen))
  3190. return -EIO;
  3191. stats = mtd->ecc_stats;
  3192. len = mtd_oobavail(mtd, ops);
  3193. chipnr = (int)(from >> chip->chip_shift);
  3194. nand_select_target(chip, chipnr);
  3195. /* Shift to get page */
  3196. realpage = (int)(from >> chip->page_shift);
  3197. page = realpage & chip->pagemask;
  3198. while (1) {
  3199. if (ops->mode == MTD_OPS_RAW)
  3200. ret = chip->ecc.read_oob_raw(chip, page);
  3201. else
  3202. ret = chip->ecc.read_oob(chip, page);
  3203. if (ret < 0)
  3204. break;
  3205. len = min(len, readlen);
  3206. buf = nand_transfer_oob(chip, buf, ops, len);
  3207. nand_wait_readrdy(chip);
  3208. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  3209. readlen -= len;
  3210. if (!readlen)
  3211. break;
  3212. /* Increment page address */
  3213. realpage++;
  3214. page = realpage & chip->pagemask;
  3215. /* Check, if we cross a chip boundary */
  3216. if (!page) {
  3217. chipnr++;
  3218. nand_deselect_target(chip);
  3219. nand_select_target(chip, chipnr);
  3220. }
  3221. }
  3222. nand_deselect_target(chip);
  3223. ops->oobretlen = ops->ooblen - readlen;
  3224. if (ret < 0)
  3225. return ret;
  3226. if (mtd->ecc_stats.failed - stats.failed)
  3227. return -EBADMSG;
  3228. return max_bitflips;
  3229. }
  3230. /**
  3231. * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
  3232. * @mtd: MTD device structure
  3233. * @from: offset to read from
  3234. * @ops: oob operation description structure
  3235. *
  3236. * NAND read data and/or out-of-band data.
  3237. */
  3238. static int nand_read_oob(struct mtd_info *mtd, loff_t from,
  3239. struct mtd_oob_ops *ops)
  3240. {
  3241. struct nand_chip *chip = mtd_to_nand(mtd);
  3242. struct mtd_ecc_stats old_stats;
  3243. int ret;
  3244. ops->retlen = 0;
  3245. if (ops->mode != MTD_OPS_PLACE_OOB &&
  3246. ops->mode != MTD_OPS_AUTO_OOB &&
  3247. ops->mode != MTD_OPS_RAW)
  3248. return -ENOTSUPP;
  3249. nand_get_device(chip);
  3250. old_stats = mtd->ecc_stats;
  3251. if (!ops->datbuf)
  3252. ret = nand_do_read_oob(chip, from, ops);
  3253. else
  3254. ret = nand_do_read_ops(chip, from, ops);
  3255. if (ops->stats) {
  3256. ops->stats->uncorrectable_errors +=
  3257. mtd->ecc_stats.failed - old_stats.failed;
  3258. ops->stats->corrected_bitflips +=
  3259. mtd->ecc_stats.corrected - old_stats.corrected;
  3260. }
  3261. nand_release_device(chip);
  3262. return ret;
  3263. }
  3264. /**
  3265. * nand_write_page_raw_notsupp - dummy raw page write function
  3266. * @chip: nand chip info structure
  3267. * @buf: data buffer
  3268. * @oob_required: must write chip->oob_poi to OOB
  3269. * @page: page number to write
  3270. *
  3271. * Returns -ENOTSUPP unconditionally.
  3272. */
  3273. int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
  3274. int oob_required, int page)
  3275. {
  3276. return -ENOTSUPP;
  3277. }
  3278. /**
  3279. * nand_write_page_raw - [INTERN] raw page write function
  3280. * @chip: nand chip info structure
  3281. * @buf: data buffer
  3282. * @oob_required: must write chip->oob_poi to OOB
  3283. * @page: page number to write
  3284. *
  3285. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  3286. */
  3287. int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
  3288. int oob_required, int page)
  3289. {
  3290. struct mtd_info *mtd = nand_to_mtd(chip);
  3291. int ret;
  3292. ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
  3293. if (ret)
  3294. return ret;
  3295. if (oob_required) {
  3296. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
  3297. false);
  3298. if (ret)
  3299. return ret;
  3300. }
  3301. return nand_prog_page_end_op(chip);
  3302. }
  3303. EXPORT_SYMBOL(nand_write_page_raw);
  3304. /**
  3305. * nand_monolithic_write_page_raw - Monolithic page write in raw mode
  3306. * @chip: NAND chip info structure
  3307. * @buf: data buffer to write
  3308. * @oob_required: must write chip->oob_poi to OOB
  3309. * @page: page number to write
  3310. *
  3311. * This is a raw page write, ie. without any error detection/correction.
  3312. * Monolithic means we are requesting all the relevant data (main plus
  3313. * eventually OOB) to be sent over the bus and effectively programmed
  3314. * into the NAND chip arrays in a single operation. This is an
  3315. * alternative to nand_write_page_raw(), which first sends the main
  3316. * data, then eventually send the OOB data by latching more data
  3317. * cycles on the NAND bus, and finally sends the program command to
  3318. * synchronyze the NAND chip cache.
  3319. */
  3320. int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
  3321. int oob_required, int page)
  3322. {
  3323. struct mtd_info *mtd = nand_to_mtd(chip);
  3324. unsigned int size = mtd->writesize;
  3325. u8 *write_buf = (u8 *)buf;
  3326. if (oob_required) {
  3327. size += mtd->oobsize;
  3328. if (buf != chip->data_buf) {
  3329. write_buf = nand_get_data_buf(chip);
  3330. memcpy(write_buf, buf, mtd->writesize);
  3331. }
  3332. }
  3333. return nand_prog_page_op(chip, page, 0, write_buf, size);
  3334. }
  3335. EXPORT_SYMBOL(nand_monolithic_write_page_raw);
  3336. /**
  3337. * nand_write_page_raw_syndrome - [INTERN] raw page write function
  3338. * @chip: nand chip info structure
  3339. * @buf: data buffer
  3340. * @oob_required: must write chip->oob_poi to OOB
  3341. * @page: page number to write
  3342. *
  3343. * We need a special oob layout and handling even when ECC isn't checked.
  3344. */
  3345. static int nand_write_page_raw_syndrome(struct nand_chip *chip,
  3346. const uint8_t *buf, int oob_required,
  3347. int page)
  3348. {
  3349. struct mtd_info *mtd = nand_to_mtd(chip);
  3350. int eccsize = chip->ecc.size;
  3351. int eccbytes = chip->ecc.bytes;
  3352. uint8_t *oob = chip->oob_poi;
  3353. int steps, size, ret;
  3354. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3355. if (ret)
  3356. return ret;
  3357. for (steps = chip->ecc.steps; steps > 0; steps--) {
  3358. ret = nand_write_data_op(chip, buf, eccsize, false);
  3359. if (ret)
  3360. return ret;
  3361. buf += eccsize;
  3362. if (chip->ecc.prepad) {
  3363. ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
  3364. false);
  3365. if (ret)
  3366. return ret;
  3367. oob += chip->ecc.prepad;
  3368. }
  3369. ret = nand_write_data_op(chip, oob, eccbytes, false);
  3370. if (ret)
  3371. return ret;
  3372. oob += eccbytes;
  3373. if (chip->ecc.postpad) {
  3374. ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
  3375. false);
  3376. if (ret)
  3377. return ret;
  3378. oob += chip->ecc.postpad;
  3379. }
  3380. }
  3381. size = mtd->oobsize - (oob - chip->oob_poi);
  3382. if (size) {
  3383. ret = nand_write_data_op(chip, oob, size, false);
  3384. if (ret)
  3385. return ret;
  3386. }
  3387. return nand_prog_page_end_op(chip);
  3388. }
  3389. /**
  3390. * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
  3391. * @chip: nand chip info structure
  3392. * @buf: data buffer
  3393. * @oob_required: must write chip->oob_poi to OOB
  3394. * @page: page number to write
  3395. */
  3396. static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
  3397. int oob_required, int page)
  3398. {
  3399. struct mtd_info *mtd = nand_to_mtd(chip);
  3400. int i, eccsize = chip->ecc.size, ret;
  3401. int eccbytes = chip->ecc.bytes;
  3402. int eccsteps = chip->ecc.steps;
  3403. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3404. const uint8_t *p = buf;
  3405. /* Software ECC calculation */
  3406. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  3407. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  3408. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3409. chip->ecc.total);
  3410. if (ret)
  3411. return ret;
  3412. return chip->ecc.write_page_raw(chip, buf, 1, page);
  3413. }
  3414. /**
  3415. * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
  3416. * @chip: nand chip info structure
  3417. * @buf: data buffer
  3418. * @oob_required: must write chip->oob_poi to OOB
  3419. * @page: page number to write
  3420. */
  3421. static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
  3422. int oob_required, int page)
  3423. {
  3424. struct mtd_info *mtd = nand_to_mtd(chip);
  3425. int i, eccsize = chip->ecc.size, ret;
  3426. int eccbytes = chip->ecc.bytes;
  3427. int eccsteps = chip->ecc.steps;
  3428. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3429. const uint8_t *p = buf;
  3430. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3431. if (ret)
  3432. return ret;
  3433. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  3434. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3435. ret = nand_write_data_op(chip, p, eccsize, false);
  3436. if (ret)
  3437. return ret;
  3438. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  3439. }
  3440. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3441. chip->ecc.total);
  3442. if (ret)
  3443. return ret;
  3444. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
  3445. if (ret)
  3446. return ret;
  3447. return nand_prog_page_end_op(chip);
  3448. }
  3449. /**
  3450. * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
  3451. * @chip: nand chip info structure
  3452. * @offset: column address of subpage within the page
  3453. * @data_len: data length
  3454. * @buf: data buffer
  3455. * @oob_required: must write chip->oob_poi to OOB
  3456. * @page: page number to write
  3457. */
  3458. static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
  3459. uint32_t data_len, const uint8_t *buf,
  3460. int oob_required, int page)
  3461. {
  3462. struct mtd_info *mtd = nand_to_mtd(chip);
  3463. uint8_t *oob_buf = chip->oob_poi;
  3464. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3465. int ecc_size = chip->ecc.size;
  3466. int ecc_bytes = chip->ecc.bytes;
  3467. int ecc_steps = chip->ecc.steps;
  3468. uint32_t start_step = offset / ecc_size;
  3469. uint32_t end_step = (offset + data_len - 1) / ecc_size;
  3470. int oob_bytes = mtd->oobsize / ecc_steps;
  3471. int step, ret;
  3472. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3473. if (ret)
  3474. return ret;
  3475. for (step = 0; step < ecc_steps; step++) {
  3476. /* configure controller for WRITE access */
  3477. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3478. /* write data (untouched subpages already masked by 0xFF) */
  3479. ret = nand_write_data_op(chip, buf, ecc_size, false);
  3480. if (ret)
  3481. return ret;
  3482. /* mask ECC of un-touched subpages by padding 0xFF */
  3483. if ((step < start_step) || (step > end_step))
  3484. memset(ecc_calc, 0xff, ecc_bytes);
  3485. else
  3486. chip->ecc.calculate(chip, buf, ecc_calc);
  3487. /* mask OOB of un-touched subpages by padding 0xFF */
  3488. /* if oob_required, preserve OOB metadata of written subpage */
  3489. if (!oob_required || (step < start_step) || (step > end_step))
  3490. memset(oob_buf, 0xff, oob_bytes);
  3491. buf += ecc_size;
  3492. ecc_calc += ecc_bytes;
  3493. oob_buf += oob_bytes;
  3494. }
  3495. /* copy calculated ECC for whole page to chip->buffer->oob */
  3496. /* this include masked-value(0xFF) for unwritten subpages */
  3497. ecc_calc = chip->ecc.calc_buf;
  3498. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3499. chip->ecc.total);
  3500. if (ret)
  3501. return ret;
  3502. /* write OOB buffer to NAND device */
  3503. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
  3504. if (ret)
  3505. return ret;
  3506. return nand_prog_page_end_op(chip);
  3507. }
  3508. /**
  3509. * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
  3510. * @chip: nand chip info structure
  3511. * @buf: data buffer
  3512. * @oob_required: must write chip->oob_poi to OOB
  3513. * @page: page number to write
  3514. *
  3515. * The hw generator calculates the error syndrome automatically. Therefore we
  3516. * need a special oob layout and handling.
  3517. */
  3518. static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
  3519. int oob_required, int page)
  3520. {
  3521. struct mtd_info *mtd = nand_to_mtd(chip);
  3522. int i, eccsize = chip->ecc.size;
  3523. int eccbytes = chip->ecc.bytes;
  3524. int eccsteps = chip->ecc.steps;
  3525. const uint8_t *p = buf;
  3526. uint8_t *oob = chip->oob_poi;
  3527. int ret;
  3528. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3529. if (ret)
  3530. return ret;
  3531. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  3532. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3533. ret = nand_write_data_op(chip, p, eccsize, false);
  3534. if (ret)
  3535. return ret;
  3536. if (chip->ecc.prepad) {
  3537. ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
  3538. false);
  3539. if (ret)
  3540. return ret;
  3541. oob += chip->ecc.prepad;
  3542. }
  3543. chip->ecc.calculate(chip, p, oob);
  3544. ret = nand_write_data_op(chip, oob, eccbytes, false);
  3545. if (ret)
  3546. return ret;
  3547. oob += eccbytes;
  3548. if (chip->ecc.postpad) {
  3549. ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
  3550. false);
  3551. if (ret)
  3552. return ret;
  3553. oob += chip->ecc.postpad;
  3554. }
  3555. }
  3556. /* Calculate remaining oob bytes */
  3557. i = mtd->oobsize - (oob - chip->oob_poi);
  3558. if (i) {
  3559. ret = nand_write_data_op(chip, oob, i, false);
  3560. if (ret)
  3561. return ret;
  3562. }
  3563. return nand_prog_page_end_op(chip);
  3564. }
  3565. /**
  3566. * nand_write_page - write one page
  3567. * @chip: NAND chip descriptor
  3568. * @offset: address offset within the page
  3569. * @data_len: length of actual data to be written
  3570. * @buf: the data to write
  3571. * @oob_required: must write chip->oob_poi to OOB
  3572. * @page: page number to write
  3573. * @raw: use _raw version of write_page
  3574. */
  3575. static int nand_write_page(struct nand_chip *chip, uint32_t offset,
  3576. int data_len, const uint8_t *buf, int oob_required,
  3577. int page, int raw)
  3578. {
  3579. struct mtd_info *mtd = nand_to_mtd(chip);
  3580. int status, subpage;
  3581. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
  3582. chip->ecc.write_subpage)
  3583. subpage = offset || (data_len < mtd->writesize);
  3584. else
  3585. subpage = 0;
  3586. if (unlikely(raw))
  3587. status = chip->ecc.write_page_raw(chip, buf, oob_required,
  3588. page);
  3589. else if (subpage)
  3590. status = chip->ecc.write_subpage(chip, offset, data_len, buf,
  3591. oob_required, page);
  3592. else
  3593. status = chip->ecc.write_page(chip, buf, oob_required, page);
  3594. if (status < 0)
  3595. return status;
  3596. return 0;
  3597. }
  3598. #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
  3599. /**
  3600. * nand_do_write_ops - [INTERN] NAND write with ECC
  3601. * @chip: NAND chip object
  3602. * @to: offset to write to
  3603. * @ops: oob operations description structure
  3604. *
  3605. * NAND write with ECC.
  3606. */
  3607. static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
  3608. struct mtd_oob_ops *ops)
  3609. {
  3610. struct mtd_info *mtd = nand_to_mtd(chip);
  3611. int chipnr, realpage, page, column;
  3612. uint32_t writelen = ops->len;
  3613. uint32_t oobwritelen = ops->ooblen;
  3614. uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
  3615. uint8_t *oob = ops->oobbuf;
  3616. uint8_t *buf = ops->datbuf;
  3617. int ret;
  3618. int oob_required = oob ? 1 : 0;
  3619. ops->retlen = 0;
  3620. if (!writelen)
  3621. return 0;
  3622. /* Reject writes, which are not page aligned */
  3623. if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
  3624. pr_notice("%s: attempt to write non page aligned data\n",
  3625. __func__);
  3626. return -EINVAL;
  3627. }
  3628. /* Check if the region is secured */
  3629. if (nand_region_is_secured(chip, to, writelen))
  3630. return -EIO;
  3631. column = to & (mtd->writesize - 1);
  3632. chipnr = (int)(to >> chip->chip_shift);
  3633. nand_select_target(chip, chipnr);
  3634. /* Check, if it is write protected */
  3635. if (nand_check_wp(chip)) {
  3636. ret = -EIO;
  3637. goto err_out;
  3638. }
  3639. realpage = (int)(to >> chip->page_shift);
  3640. page = realpage & chip->pagemask;
  3641. /* Invalidate the page cache, when we write to the cached page */
  3642. if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
  3643. ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
  3644. chip->pagecache.page = -1;
  3645. /* Don't allow multipage oob writes with offset */
  3646. if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
  3647. ret = -EINVAL;
  3648. goto err_out;
  3649. }
  3650. while (1) {
  3651. int bytes = mtd->writesize;
  3652. uint8_t *wbuf = buf;
  3653. int use_bounce_buf;
  3654. int part_pagewr = (column || writelen < mtd->writesize);
  3655. if (part_pagewr)
  3656. use_bounce_buf = 1;
  3657. else if (chip->options & NAND_USES_DMA)
  3658. use_bounce_buf = !virt_addr_valid(buf) ||
  3659. !IS_ALIGNED((unsigned long)buf,
  3660. chip->buf_align);
  3661. else
  3662. use_bounce_buf = 0;
  3663. /*
  3664. * Copy the data from the initial buffer when doing partial page
  3665. * writes or when a bounce buffer is required.
  3666. */
  3667. if (use_bounce_buf) {
  3668. pr_debug("%s: using write bounce buffer for buf@%p\n",
  3669. __func__, buf);
  3670. if (part_pagewr)
  3671. bytes = min_t(int, bytes - column, writelen);
  3672. wbuf = nand_get_data_buf(chip);
  3673. memset(wbuf, 0xff, mtd->writesize);
  3674. memcpy(&wbuf[column], buf, bytes);
  3675. }
  3676. if (unlikely(oob)) {
  3677. size_t len = min(oobwritelen, oobmaxlen);
  3678. oob = nand_fill_oob(chip, oob, len, ops);
  3679. oobwritelen -= len;
  3680. } else {
  3681. /* We still need to erase leftover OOB data */
  3682. memset(chip->oob_poi, 0xff, mtd->oobsize);
  3683. }
  3684. ret = nand_write_page(chip, column, bytes, wbuf,
  3685. oob_required, page,
  3686. (ops->mode == MTD_OPS_RAW));
  3687. if (ret)
  3688. break;
  3689. writelen -= bytes;
  3690. if (!writelen)
  3691. break;
  3692. column = 0;
  3693. buf += bytes;
  3694. realpage++;
  3695. page = realpage & chip->pagemask;
  3696. /* Check, if we cross a chip boundary */
  3697. if (!page) {
  3698. chipnr++;
  3699. nand_deselect_target(chip);
  3700. nand_select_target(chip, chipnr);
  3701. }
  3702. }
  3703. ops->retlen = ops->len - writelen;
  3704. if (unlikely(oob))
  3705. ops->oobretlen = ops->ooblen;
  3706. err_out:
  3707. nand_deselect_target(chip);
  3708. return ret;
  3709. }
  3710. /**
  3711. * panic_nand_write - [MTD Interface] NAND write with ECC
  3712. * @mtd: MTD device structure
  3713. * @to: offset to write to
  3714. * @len: number of bytes to write
  3715. * @retlen: pointer to variable to store the number of written bytes
  3716. * @buf: the data to write
  3717. *
  3718. * NAND write with ECC. Used when performing writes in interrupt context, this
  3719. * may for example be called by mtdoops when writing an oops while in panic.
  3720. */
  3721. static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  3722. size_t *retlen, const uint8_t *buf)
  3723. {
  3724. struct nand_chip *chip = mtd_to_nand(mtd);
  3725. int chipnr = (int)(to >> chip->chip_shift);
  3726. struct mtd_oob_ops ops;
  3727. int ret;
  3728. nand_select_target(chip, chipnr);
  3729. /* Wait for the device to get ready */
  3730. panic_nand_wait(chip, 400);
  3731. memset(&ops, 0, sizeof(ops));
  3732. ops.len = len;
  3733. ops.datbuf = (uint8_t *)buf;
  3734. ops.mode = MTD_OPS_PLACE_OOB;
  3735. ret = nand_do_write_ops(chip, to, &ops);
  3736. *retlen = ops.retlen;
  3737. return ret;
  3738. }
  3739. /**
  3740. * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
  3741. * @mtd: MTD device structure
  3742. * @to: offset to write to
  3743. * @ops: oob operation description structure
  3744. */
  3745. static int nand_write_oob(struct mtd_info *mtd, loff_t to,
  3746. struct mtd_oob_ops *ops)
  3747. {
  3748. struct nand_chip *chip = mtd_to_nand(mtd);
  3749. int ret = 0;
  3750. ops->retlen = 0;
  3751. nand_get_device(chip);
  3752. switch (ops->mode) {
  3753. case MTD_OPS_PLACE_OOB:
  3754. case MTD_OPS_AUTO_OOB:
  3755. case MTD_OPS_RAW:
  3756. break;
  3757. default:
  3758. goto out;
  3759. }
  3760. if (!ops->datbuf)
  3761. ret = nand_do_write_oob(chip, to, ops);
  3762. else
  3763. ret = nand_do_write_ops(chip, to, ops);
  3764. out:
  3765. nand_release_device(chip);
  3766. return ret;
  3767. }
  3768. /**
  3769. * nand_erase - [MTD Interface] erase block(s)
  3770. * @mtd: MTD device structure
  3771. * @instr: erase instruction
  3772. *
  3773. * Erase one ore more blocks.
  3774. */
  3775. static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
  3776. {
  3777. return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
  3778. }
  3779. /**
  3780. * nand_erase_nand - [INTERN] erase block(s)
  3781. * @chip: NAND chip object
  3782. * @instr: erase instruction
  3783. * @allowbbt: allow erasing the bbt area
  3784. *
  3785. * Erase one ore more blocks.
  3786. */
  3787. int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
  3788. int allowbbt)
  3789. {
  3790. int page, pages_per_block, ret, chipnr;
  3791. loff_t len;
  3792. pr_debug("%s: start = 0x%012llx, len = %llu\n",
  3793. __func__, (unsigned long long)instr->addr,
  3794. (unsigned long long)instr->len);
  3795. if (check_offs_len(chip, instr->addr, instr->len))
  3796. return -EINVAL;
  3797. /* Check if the region is secured */
  3798. if (nand_region_is_secured(chip, instr->addr, instr->len))
  3799. return -EIO;
  3800. /* Grab the lock and see if the device is available */
  3801. nand_get_device(chip);
  3802. /* Shift to get first page */
  3803. page = (int)(instr->addr >> chip->page_shift);
  3804. chipnr = (int)(instr->addr >> chip->chip_shift);
  3805. /* Calculate pages in each block */
  3806. pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  3807. /* Select the NAND device */
  3808. nand_select_target(chip, chipnr);
  3809. /* Check, if it is write protected */
  3810. if (nand_check_wp(chip)) {
  3811. pr_debug("%s: device is write protected!\n",
  3812. __func__);
  3813. ret = -EIO;
  3814. goto erase_exit;
  3815. }
  3816. /* Loop through the pages */
  3817. len = instr->len;
  3818. while (len) {
  3819. loff_t ofs = (loff_t)page << chip->page_shift;
  3820. /* Check if we have a bad block, we do not erase bad blocks! */
  3821. if (nand_block_checkbad(chip, ((loff_t) page) <<
  3822. chip->page_shift, allowbbt)) {
  3823. pr_warn("%s: attempt to erase a bad block at 0x%08llx\n",
  3824. __func__, (unsigned long long)ofs);
  3825. ret = -EIO;
  3826. goto erase_exit;
  3827. }
  3828. /*
  3829. * Invalidate the page cache, if we erase the block which
  3830. * contains the current cached page.
  3831. */
  3832. if (page <= chip->pagecache.page && chip->pagecache.page <
  3833. (page + pages_per_block))
  3834. chip->pagecache.page = -1;
  3835. ret = nand_erase_op(chip, (page & chip->pagemask) >>
  3836. (chip->phys_erase_shift - chip->page_shift));
  3837. if (ret) {
  3838. pr_debug("%s: failed erase, page 0x%08x\n",
  3839. __func__, page);
  3840. instr->fail_addr = ofs;
  3841. goto erase_exit;
  3842. }
  3843. /* Increment page address and decrement length */
  3844. len -= (1ULL << chip->phys_erase_shift);
  3845. page += pages_per_block;
  3846. /* Check, if we cross a chip boundary */
  3847. if (len && !(page & chip->pagemask)) {
  3848. chipnr++;
  3849. nand_deselect_target(chip);
  3850. nand_select_target(chip, chipnr);
  3851. }
  3852. }
  3853. ret = 0;
  3854. erase_exit:
  3855. /* Deselect and wake up anyone waiting on the device */
  3856. nand_deselect_target(chip);
  3857. nand_release_device(chip);
  3858. /* Return more or less happy */
  3859. return ret;
  3860. }
  3861. /**
  3862. * nand_sync - [MTD Interface] sync
  3863. * @mtd: MTD device structure
  3864. *
  3865. * Sync is actually a wait for chip ready function.
  3866. */
  3867. static void nand_sync(struct mtd_info *mtd)
  3868. {
  3869. struct nand_chip *chip = mtd_to_nand(mtd);
  3870. pr_debug("%s: called\n", __func__);
  3871. /* Grab the lock and see if the device is available */
  3872. nand_get_device(chip);
  3873. /* Release it and go back */
  3874. nand_release_device(chip);
  3875. }
  3876. /**
  3877. * nand_block_isbad - [MTD Interface] Check if block at offset is bad
  3878. * @mtd: MTD device structure
  3879. * @offs: offset relative to mtd start
  3880. */
  3881. static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
  3882. {
  3883. struct nand_chip *chip = mtd_to_nand(mtd);
  3884. int chipnr = (int)(offs >> chip->chip_shift);
  3885. int ret;
  3886. /* Select the NAND device */
  3887. nand_get_device(chip);
  3888. nand_select_target(chip, chipnr);
  3889. ret = nand_block_checkbad(chip, offs, 0);
  3890. nand_deselect_target(chip);
  3891. nand_release_device(chip);
  3892. return ret;
  3893. }
  3894. /**
  3895. * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
  3896. * @mtd: MTD device structure
  3897. * @ofs: offset relative to mtd start
  3898. */
  3899. static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  3900. {
  3901. int ret;
  3902. ret = nand_block_isbad(mtd, ofs);
  3903. if (ret) {
  3904. /* If it was bad already, return success and do nothing */
  3905. if (ret > 0)
  3906. return 0;
  3907. return ret;
  3908. }
  3909. return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
  3910. }
  3911. /**
  3912. * nand_suspend - [MTD Interface] Suspend the NAND flash
  3913. * @mtd: MTD device structure
  3914. *
  3915. * Returns 0 for success or negative error code otherwise.
  3916. */
  3917. static int nand_suspend(struct mtd_info *mtd)
  3918. {
  3919. struct nand_chip *chip = mtd_to_nand(mtd);
  3920. int ret = 0;
  3921. mutex_lock(&chip->lock);
  3922. if (chip->ops.suspend)
  3923. ret = chip->ops.suspend(chip);
  3924. if (!ret)
  3925. chip->suspended = 1;
  3926. mutex_unlock(&chip->lock);
  3927. return ret;
  3928. }
  3929. /**
  3930. * nand_resume - [MTD Interface] Resume the NAND flash
  3931. * @mtd: MTD device structure
  3932. */
  3933. static void nand_resume(struct mtd_info *mtd)
  3934. {
  3935. struct nand_chip *chip = mtd_to_nand(mtd);
  3936. mutex_lock(&chip->lock);
  3937. if (chip->suspended) {
  3938. if (chip->ops.resume)
  3939. chip->ops.resume(chip);
  3940. chip->suspended = 0;
  3941. } else {
  3942. pr_err("%s called for a chip which is not in suspended state\n",
  3943. __func__);
  3944. }
  3945. mutex_unlock(&chip->lock);
  3946. wake_up_all(&chip->resume_wq);
  3947. }
  3948. /**
  3949. * nand_shutdown - [MTD Interface] Finish the current NAND operation and
  3950. * prevent further operations
  3951. * @mtd: MTD device structure
  3952. */
  3953. static void nand_shutdown(struct mtd_info *mtd)
  3954. {
  3955. nand_suspend(mtd);
  3956. }
  3957. /**
  3958. * nand_lock - [MTD Interface] Lock the NAND flash
  3959. * @mtd: MTD device structure
  3960. * @ofs: offset byte address
  3961. * @len: number of bytes to lock (must be a multiple of block/page size)
  3962. */
  3963. static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  3964. {
  3965. struct nand_chip *chip = mtd_to_nand(mtd);
  3966. if (!chip->ops.lock_area)
  3967. return -ENOTSUPP;
  3968. return chip->ops.lock_area(chip, ofs, len);
  3969. }
  3970. /**
  3971. * nand_unlock - [MTD Interface] Unlock the NAND flash
  3972. * @mtd: MTD device structure
  3973. * @ofs: offset byte address
  3974. * @len: number of bytes to unlock (must be a multiple of block/page size)
  3975. */
  3976. static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  3977. {
  3978. struct nand_chip *chip = mtd_to_nand(mtd);
  3979. if (!chip->ops.unlock_area)
  3980. return -ENOTSUPP;
  3981. return chip->ops.unlock_area(chip, ofs, len);
  3982. }
  3983. /* Set default functions */
  3984. static void nand_set_defaults(struct nand_chip *chip)
  3985. {
  3986. /* If no controller is provided, use the dummy, legacy one. */
  3987. if (!chip->controller) {
  3988. chip->controller = &chip->legacy.dummy_controller;
  3989. nand_controller_init(chip->controller);
  3990. }
  3991. nand_legacy_set_defaults(chip);
  3992. if (!chip->buf_align)
  3993. chip->buf_align = 1;
  3994. }
  3995. /* Sanitize ONFI strings so we can safely print them */
  3996. void sanitize_string(uint8_t *s, size_t len)
  3997. {
  3998. ssize_t i;
  3999. /* Null terminate */
  4000. s[len - 1] = 0;
  4001. /* Remove non printable chars */
  4002. for (i = 0; i < len - 1; i++) {
  4003. if (s[i] < ' ' || s[i] > 127)
  4004. s[i] = '?';
  4005. }
  4006. /* Remove trailing spaces */
  4007. strim(s);
  4008. }
  4009. /*
  4010. * nand_id_has_period - Check if an ID string has a given wraparound period
  4011. * @id_data: the ID string
  4012. * @arrlen: the length of the @id_data array
  4013. * @period: the period of repitition
  4014. *
  4015. * Check if an ID string is repeated within a given sequence of bytes at
  4016. * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
  4017. * period of 3). This is a helper function for nand_id_len(). Returns non-zero
  4018. * if the repetition has a period of @period; otherwise, returns zero.
  4019. */
  4020. static int nand_id_has_period(u8 *id_data, int arrlen, int period)
  4021. {
  4022. int i, j;
  4023. for (i = 0; i < period; i++)
  4024. for (j = i + period; j < arrlen; j += period)
  4025. if (id_data[i] != id_data[j])
  4026. return 0;
  4027. return 1;
  4028. }
  4029. /*
  4030. * nand_id_len - Get the length of an ID string returned by CMD_READID
  4031. * @id_data: the ID string
  4032. * @arrlen: the length of the @id_data array
  4033. * Returns the length of the ID string, according to known wraparound/trailing
  4034. * zero patterns. If no pattern exists, returns the length of the array.
  4035. */
  4036. static int nand_id_len(u8 *id_data, int arrlen)
  4037. {
  4038. int last_nonzero, period;
  4039. /* Find last non-zero byte */
  4040. for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
  4041. if (id_data[last_nonzero])
  4042. break;
  4043. /* All zeros */
  4044. if (last_nonzero < 0)
  4045. return 0;
  4046. /* Calculate wraparound period */
  4047. for (period = 1; period < arrlen; period++)
  4048. if (nand_id_has_period(id_data, arrlen, period))
  4049. break;
  4050. /* There's a repeated pattern */
  4051. if (period < arrlen)
  4052. return period;
  4053. /* There are trailing zeros */
  4054. if (last_nonzero < arrlen - 1)
  4055. return last_nonzero + 1;
  4056. /* No pattern detected */
  4057. return arrlen;
  4058. }
  4059. /* Extract the bits of per cell from the 3rd byte of the extended ID */
  4060. static int nand_get_bits_per_cell(u8 cellinfo)
  4061. {
  4062. int bits;
  4063. bits = cellinfo & NAND_CI_CELLTYPE_MSK;
  4064. bits >>= NAND_CI_CELLTYPE_SHIFT;
  4065. return bits + 1;
  4066. }
  4067. /*
  4068. * Many new NAND share similar device ID codes, which represent the size of the
  4069. * chip. The rest of the parameters must be decoded according to generic or
  4070. * manufacturer-specific "extended ID" decoding patterns.
  4071. */
  4072. void nand_decode_ext_id(struct nand_chip *chip)
  4073. {
  4074. struct nand_memory_organization *memorg;
  4075. struct mtd_info *mtd = nand_to_mtd(chip);
  4076. int extid;
  4077. u8 *id_data = chip->id.data;
  4078. memorg = nanddev_get_memorg(&chip->base);
  4079. /* The 3rd id byte holds MLC / multichip data */
  4080. memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  4081. /* The 4th id byte is the important one */
  4082. extid = id_data[3];
  4083. /* Calc pagesize */
  4084. memorg->pagesize = 1024 << (extid & 0x03);
  4085. mtd->writesize = memorg->pagesize;
  4086. extid >>= 2;
  4087. /* Calc oobsize */
  4088. memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
  4089. mtd->oobsize = memorg->oobsize;
  4090. extid >>= 2;
  4091. /* Calc blocksize. Blocksize is multiples of 64KiB */
  4092. memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
  4093. memorg->pagesize;
  4094. mtd->erasesize = (64 * 1024) << (extid & 0x03);
  4095. extid >>= 2;
  4096. /* Get buswidth information */
  4097. if (extid & 0x1)
  4098. chip->options |= NAND_BUSWIDTH_16;
  4099. }
  4100. EXPORT_SYMBOL_GPL(nand_decode_ext_id);
  4101. /*
  4102. * Old devices have chip data hardcoded in the device ID table. nand_decode_id
  4103. * decodes a matching ID table entry and assigns the MTD size parameters for
  4104. * the chip.
  4105. */
  4106. static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
  4107. {
  4108. struct mtd_info *mtd = nand_to_mtd(chip);
  4109. struct nand_memory_organization *memorg;
  4110. memorg = nanddev_get_memorg(&chip->base);
  4111. memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
  4112. mtd->erasesize = type->erasesize;
  4113. memorg->pagesize = type->pagesize;
  4114. mtd->writesize = memorg->pagesize;
  4115. memorg->oobsize = memorg->pagesize / 32;
  4116. mtd->oobsize = memorg->oobsize;
  4117. /* All legacy ID NAND are small-page, SLC */
  4118. memorg->bits_per_cell = 1;
  4119. }
  4120. /*
  4121. * Set the bad block marker/indicator (BBM/BBI) patterns according to some
  4122. * heuristic patterns using various detected parameters (e.g., manufacturer,
  4123. * page size, cell-type information).
  4124. */
  4125. static void nand_decode_bbm_options(struct nand_chip *chip)
  4126. {
  4127. struct mtd_info *mtd = nand_to_mtd(chip);
  4128. /* Set the bad block position */
  4129. if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
  4130. chip->badblockpos = NAND_BBM_POS_LARGE;
  4131. else
  4132. chip->badblockpos = NAND_BBM_POS_SMALL;
  4133. }
  4134. static inline bool is_full_id_nand(struct nand_flash_dev *type)
  4135. {
  4136. return type->id_len;
  4137. }
  4138. static bool find_full_id_nand(struct nand_chip *chip,
  4139. struct nand_flash_dev *type)
  4140. {
  4141. struct nand_device *base = &chip->base;
  4142. struct nand_ecc_props requirements;
  4143. struct mtd_info *mtd = nand_to_mtd(chip);
  4144. struct nand_memory_organization *memorg;
  4145. u8 *id_data = chip->id.data;
  4146. memorg = nanddev_get_memorg(&chip->base);
  4147. if (!strncmp(type->id, id_data, type->id_len)) {
  4148. memorg->pagesize = type->pagesize;
  4149. mtd->writesize = memorg->pagesize;
  4150. memorg->pages_per_eraseblock = type->erasesize /
  4151. type->pagesize;
  4152. mtd->erasesize = type->erasesize;
  4153. memorg->oobsize = type->oobsize;
  4154. mtd->oobsize = memorg->oobsize;
  4155. memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  4156. memorg->eraseblocks_per_lun =
  4157. DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
  4158. memorg->pagesize *
  4159. memorg->pages_per_eraseblock);
  4160. chip->options |= type->options;
  4161. requirements.strength = NAND_ECC_STRENGTH(type);
  4162. requirements.step_size = NAND_ECC_STEP(type);
  4163. nanddev_set_ecc_requirements(base, &requirements);
  4164. chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
  4165. if (!chip->parameters.model)
  4166. return false;
  4167. return true;
  4168. }
  4169. return false;
  4170. }
  4171. /*
  4172. * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
  4173. * compliant and does not have a full-id or legacy-id entry in the nand_ids
  4174. * table.
  4175. */
  4176. static void nand_manufacturer_detect(struct nand_chip *chip)
  4177. {
  4178. /*
  4179. * Try manufacturer detection if available and use
  4180. * nand_decode_ext_id() otherwise.
  4181. */
  4182. if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
  4183. chip->manufacturer.desc->ops->detect) {
  4184. struct nand_memory_organization *memorg;
  4185. memorg = nanddev_get_memorg(&chip->base);
  4186. /* The 3rd id byte holds MLC / multichip data */
  4187. memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
  4188. chip->manufacturer.desc->ops->detect(chip);
  4189. } else {
  4190. nand_decode_ext_id(chip);
  4191. }
  4192. }
  4193. /*
  4194. * Manufacturer initialization. This function is called for all NANDs including
  4195. * ONFI and JEDEC compliant ones.
  4196. * Manufacturer drivers should put all their specific initialization code in
  4197. * their ->init() hook.
  4198. */
  4199. static int nand_manufacturer_init(struct nand_chip *chip)
  4200. {
  4201. if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
  4202. !chip->manufacturer.desc->ops->init)
  4203. return 0;
  4204. return chip->manufacturer.desc->ops->init(chip);
  4205. }
  4206. /*
  4207. * Manufacturer cleanup. This function is called for all NANDs including
  4208. * ONFI and JEDEC compliant ones.
  4209. * Manufacturer drivers should put all their specific cleanup code in their
  4210. * ->cleanup() hook.
  4211. */
  4212. static void nand_manufacturer_cleanup(struct nand_chip *chip)
  4213. {
  4214. /* Release manufacturer private data */
  4215. if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
  4216. chip->manufacturer.desc->ops->cleanup)
  4217. chip->manufacturer.desc->ops->cleanup(chip);
  4218. }
  4219. static const char *
  4220. nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
  4221. {
  4222. return manufacturer_desc ? manufacturer_desc->name : "Unknown";
  4223. }
  4224. /*
  4225. * Get the flash and manufacturer id and lookup if the type is supported.
  4226. */
  4227. static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
  4228. {
  4229. const struct nand_manufacturer_desc *manufacturer_desc;
  4230. struct mtd_info *mtd = nand_to_mtd(chip);
  4231. struct nand_memory_organization *memorg;
  4232. int busw, ret;
  4233. u8 *id_data = chip->id.data;
  4234. u8 maf_id, dev_id;
  4235. u64 targetsize;
  4236. /*
  4237. * Let's start by initializing memorg fields that might be left
  4238. * unassigned by the ID-based detection logic.
  4239. */
  4240. memorg = nanddev_get_memorg(&chip->base);
  4241. memorg->planes_per_lun = 1;
  4242. memorg->luns_per_target = 1;
  4243. /*
  4244. * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
  4245. * after power-up.
  4246. */
  4247. ret = nand_reset(chip, 0);
  4248. if (ret)
  4249. return ret;
  4250. /* Select the device */
  4251. nand_select_target(chip, 0);
  4252. /* Send the command for reading device ID */
  4253. ret = nand_readid_op(chip, 0, id_data, 2);
  4254. if (ret)
  4255. return ret;
  4256. /* Read manufacturer and device IDs */
  4257. maf_id = id_data[0];
  4258. dev_id = id_data[1];
  4259. /*
  4260. * Try again to make sure, as some systems the bus-hold or other
  4261. * interface concerns can cause random data which looks like a
  4262. * possibly credible NAND flash to appear. If the two results do
  4263. * not match, ignore the device completely.
  4264. */
  4265. /* Read entire ID string */
  4266. ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
  4267. if (ret)
  4268. return ret;
  4269. if (id_data[0] != maf_id || id_data[1] != dev_id) {
  4270. pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
  4271. maf_id, dev_id, id_data[0], id_data[1]);
  4272. return -ENODEV;
  4273. }
  4274. chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
  4275. /* Try to identify manufacturer */
  4276. manufacturer_desc = nand_get_manufacturer_desc(maf_id);
  4277. chip->manufacturer.desc = manufacturer_desc;
  4278. if (!type)
  4279. type = nand_flash_ids;
  4280. /*
  4281. * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
  4282. * override it.
  4283. * This is required to make sure initial NAND bus width set by the
  4284. * NAND controller driver is coherent with the real NAND bus width
  4285. * (extracted by auto-detection code).
  4286. */
  4287. busw = chip->options & NAND_BUSWIDTH_16;
  4288. /*
  4289. * The flag is only set (never cleared), reset it to its default value
  4290. * before starting auto-detection.
  4291. */
  4292. chip->options &= ~NAND_BUSWIDTH_16;
  4293. for (; type->name != NULL; type++) {
  4294. if (is_full_id_nand(type)) {
  4295. if (find_full_id_nand(chip, type))
  4296. goto ident_done;
  4297. } else if (dev_id == type->dev_id) {
  4298. break;
  4299. }
  4300. }
  4301. if (!type->name || !type->pagesize) {
  4302. /* Check if the chip is ONFI compliant */
  4303. ret = nand_onfi_detect(chip);
  4304. if (ret < 0)
  4305. return ret;
  4306. else if (ret)
  4307. goto ident_done;
  4308. /* Check if the chip is JEDEC compliant */
  4309. ret = nand_jedec_detect(chip);
  4310. if (ret < 0)
  4311. return ret;
  4312. else if (ret)
  4313. goto ident_done;
  4314. }
  4315. if (!type->name)
  4316. return -ENODEV;
  4317. chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
  4318. if (!chip->parameters.model)
  4319. return -ENOMEM;
  4320. if (!type->pagesize)
  4321. nand_manufacturer_detect(chip);
  4322. else
  4323. nand_decode_id(chip, type);
  4324. /* Get chip options */
  4325. chip->options |= type->options;
  4326. memorg->eraseblocks_per_lun =
  4327. DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
  4328. memorg->pagesize *
  4329. memorg->pages_per_eraseblock);
  4330. ident_done:
  4331. if (!mtd->name)
  4332. mtd->name = chip->parameters.model;
  4333. if (chip->options & NAND_BUSWIDTH_AUTO) {
  4334. WARN_ON(busw & NAND_BUSWIDTH_16);
  4335. nand_set_defaults(chip);
  4336. } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
  4337. /*
  4338. * Check, if buswidth is correct. Hardware drivers should set
  4339. * chip correct!
  4340. */
  4341. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4342. maf_id, dev_id);
  4343. pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
  4344. mtd->name);
  4345. pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
  4346. (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
  4347. ret = -EINVAL;
  4348. goto free_detect_allocation;
  4349. }
  4350. nand_decode_bbm_options(chip);
  4351. /* Calculate the address shift from the page size */
  4352. chip->page_shift = ffs(mtd->writesize) - 1;
  4353. /* Convert chipsize to number of pages per chip -1 */
  4354. targetsize = nanddev_target_size(&chip->base);
  4355. chip->pagemask = (targetsize >> chip->page_shift) - 1;
  4356. chip->bbt_erase_shift = chip->phys_erase_shift =
  4357. ffs(mtd->erasesize) - 1;
  4358. if (targetsize & 0xffffffff)
  4359. chip->chip_shift = ffs((unsigned)targetsize) - 1;
  4360. else {
  4361. chip->chip_shift = ffs((unsigned)(targetsize >> 32));
  4362. chip->chip_shift += 32 - 1;
  4363. }
  4364. if (chip->chip_shift - chip->page_shift > 16)
  4365. chip->options |= NAND_ROW_ADDR_3;
  4366. chip->badblockbits = 8;
  4367. nand_legacy_adjust_cmdfunc(chip);
  4368. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4369. maf_id, dev_id);
  4370. pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
  4371. chip->parameters.model);
  4372. pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
  4373. (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
  4374. mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
  4375. return 0;
  4376. free_detect_allocation:
  4377. kfree(chip->parameters.model);
  4378. return ret;
  4379. }
  4380. static enum nand_ecc_engine_type
  4381. of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
  4382. {
  4383. enum nand_ecc_legacy_mode {
  4384. NAND_ECC_INVALID,
  4385. NAND_ECC_NONE,
  4386. NAND_ECC_SOFT,
  4387. NAND_ECC_SOFT_BCH,
  4388. NAND_ECC_HW,
  4389. NAND_ECC_HW_SYNDROME,
  4390. NAND_ECC_ON_DIE,
  4391. };
  4392. const char * const nand_ecc_legacy_modes[] = {
  4393. [NAND_ECC_NONE] = "none",
  4394. [NAND_ECC_SOFT] = "soft",
  4395. [NAND_ECC_SOFT_BCH] = "soft_bch",
  4396. [NAND_ECC_HW] = "hw",
  4397. [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
  4398. [NAND_ECC_ON_DIE] = "on-die",
  4399. };
  4400. enum nand_ecc_legacy_mode eng_type;
  4401. const char *pm;
  4402. int err;
  4403. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4404. if (err)
  4405. return NAND_ECC_ENGINE_TYPE_INVALID;
  4406. for (eng_type = NAND_ECC_NONE;
  4407. eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
  4408. if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
  4409. switch (eng_type) {
  4410. case NAND_ECC_NONE:
  4411. return NAND_ECC_ENGINE_TYPE_NONE;
  4412. case NAND_ECC_SOFT:
  4413. case NAND_ECC_SOFT_BCH:
  4414. return NAND_ECC_ENGINE_TYPE_SOFT;
  4415. case NAND_ECC_HW:
  4416. case NAND_ECC_HW_SYNDROME:
  4417. return NAND_ECC_ENGINE_TYPE_ON_HOST;
  4418. case NAND_ECC_ON_DIE:
  4419. return NAND_ECC_ENGINE_TYPE_ON_DIE;
  4420. default:
  4421. break;
  4422. }
  4423. }
  4424. }
  4425. return NAND_ECC_ENGINE_TYPE_INVALID;
  4426. }
  4427. static enum nand_ecc_placement
  4428. of_get_rawnand_ecc_placement_legacy(struct device_node *np)
  4429. {
  4430. const char *pm;
  4431. int err;
  4432. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4433. if (!err) {
  4434. if (!strcasecmp(pm, "hw_syndrome"))
  4435. return NAND_ECC_PLACEMENT_INTERLEAVED;
  4436. }
  4437. return NAND_ECC_PLACEMENT_UNKNOWN;
  4438. }
  4439. static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
  4440. {
  4441. const char *pm;
  4442. int err;
  4443. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4444. if (!err) {
  4445. if (!strcasecmp(pm, "soft"))
  4446. return NAND_ECC_ALGO_HAMMING;
  4447. else if (!strcasecmp(pm, "soft_bch"))
  4448. return NAND_ECC_ALGO_BCH;
  4449. }
  4450. return NAND_ECC_ALGO_UNKNOWN;
  4451. }
  4452. static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
  4453. {
  4454. struct device_node *dn = nand_get_flash_node(chip);
  4455. struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
  4456. if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
  4457. user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
  4458. if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
  4459. user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
  4460. if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
  4461. user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
  4462. }
  4463. static int of_get_nand_bus_width(struct nand_chip *chip)
  4464. {
  4465. struct device_node *dn = nand_get_flash_node(chip);
  4466. u32 val;
  4467. int ret;
  4468. ret = of_property_read_u32(dn, "nand-bus-width", &val);
  4469. if (ret == -EINVAL)
  4470. /* Buswidth defaults to 8 if the property does not exist .*/
  4471. return 0;
  4472. else if (ret)
  4473. return ret;
  4474. if (val == 16)
  4475. chip->options |= NAND_BUSWIDTH_16;
  4476. else if (val != 8)
  4477. return -EINVAL;
  4478. return 0;
  4479. }
  4480. static int of_get_nand_secure_regions(struct nand_chip *chip)
  4481. {
  4482. struct device_node *dn = nand_get_flash_node(chip);
  4483. struct property *prop;
  4484. int nr_elem, i, j;
  4485. /* Only proceed if the "secure-regions" property is present in DT */
  4486. prop = of_find_property(dn, "secure-regions", NULL);
  4487. if (!prop)
  4488. return 0;
  4489. nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
  4490. if (nr_elem <= 0)
  4491. return nr_elem;
  4492. chip->nr_secure_regions = nr_elem / 2;
  4493. chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
  4494. GFP_KERNEL);
  4495. if (!chip->secure_regions)
  4496. return -ENOMEM;
  4497. for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
  4498. of_property_read_u64_index(dn, "secure-regions", j,
  4499. &chip->secure_regions[i].offset);
  4500. of_property_read_u64_index(dn, "secure-regions", j + 1,
  4501. &chip->secure_regions[i].size);
  4502. }
  4503. return 0;
  4504. }
  4505. /**
  4506. * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
  4507. * @dev: Device that will be parsed. Also used for managed allocations.
  4508. * @cs_array: Array of GPIO desc pointers allocated on success
  4509. * @ncs_array: Number of entries in @cs_array updated on success.
  4510. * @return 0 on success, an error otherwise.
  4511. */
  4512. int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
  4513. unsigned int *ncs_array)
  4514. {
  4515. struct gpio_desc **descs;
  4516. int ndescs, i;
  4517. ndescs = gpiod_count(dev, "cs");
  4518. if (ndescs < 0) {
  4519. dev_dbg(dev, "No valid cs-gpios property\n");
  4520. return 0;
  4521. }
  4522. descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
  4523. if (!descs)
  4524. return -ENOMEM;
  4525. for (i = 0; i < ndescs; i++) {
  4526. descs[i] = gpiod_get_index_optional(dev, "cs", i,
  4527. GPIOD_OUT_HIGH);
  4528. if (IS_ERR(descs[i]))
  4529. return PTR_ERR(descs[i]);
  4530. }
  4531. *ncs_array = ndescs;
  4532. *cs_array = descs;
  4533. return 0;
  4534. }
  4535. EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
  4536. static int rawnand_dt_init(struct nand_chip *chip)
  4537. {
  4538. struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
  4539. struct device_node *dn = nand_get_flash_node(chip);
  4540. int ret;
  4541. if (!dn)
  4542. return 0;
  4543. ret = of_get_nand_bus_width(chip);
  4544. if (ret)
  4545. return ret;
  4546. if (of_property_read_bool(dn, "nand-is-boot-medium"))
  4547. chip->options |= NAND_IS_BOOT_MEDIUM;
  4548. if (of_property_read_bool(dn, "nand-on-flash-bbt"))
  4549. chip->bbt_options |= NAND_BBT_USE_FLASH;
  4550. of_get_nand_ecc_user_config(nand);
  4551. of_get_nand_ecc_legacy_user_config(chip);
  4552. /*
  4553. * If neither the user nor the NAND controller have requested a specific
  4554. * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
  4555. */
  4556. nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  4557. /*
  4558. * Use the user requested engine type, unless there is none, in this
  4559. * case default to the NAND controller choice, otherwise fallback to
  4560. * the raw NAND default one.
  4561. */
  4562. if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
  4563. chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
  4564. if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
  4565. chip->ecc.engine_type = nand->ecc.defaults.engine_type;
  4566. chip->ecc.placement = nand->ecc.user_conf.placement;
  4567. chip->ecc.algo = nand->ecc.user_conf.algo;
  4568. chip->ecc.strength = nand->ecc.user_conf.strength;
  4569. chip->ecc.size = nand->ecc.user_conf.step_size;
  4570. return 0;
  4571. }
  4572. /**
  4573. * nand_scan_ident - Scan for the NAND device
  4574. * @chip: NAND chip object
  4575. * @maxchips: number of chips to scan for
  4576. * @table: alternative NAND ID table
  4577. *
  4578. * This is the first phase of the normal nand_scan() function. It reads the
  4579. * flash ID and sets up MTD fields accordingly.
  4580. *
  4581. * This helper used to be called directly from controller drivers that needed
  4582. * to tweak some ECC-related parameters before nand_scan_tail(). This separation
  4583. * prevented dynamic allocations during this phase which was unconvenient and
  4584. * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
  4585. */
  4586. static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
  4587. struct nand_flash_dev *table)
  4588. {
  4589. struct mtd_info *mtd = nand_to_mtd(chip);
  4590. struct nand_memory_organization *memorg;
  4591. int nand_maf_id, nand_dev_id;
  4592. unsigned int i;
  4593. int ret;
  4594. memorg = nanddev_get_memorg(&chip->base);
  4595. /* Assume all dies are deselected when we enter nand_scan_ident(). */
  4596. chip->cur_cs = -1;
  4597. mutex_init(&chip->lock);
  4598. init_waitqueue_head(&chip->resume_wq);
  4599. /* Enforce the right timings for reset/detection */
  4600. chip->current_interface_config = nand_get_reset_interface_config();
  4601. ret = rawnand_dt_init(chip);
  4602. if (ret)
  4603. return ret;
  4604. if (!mtd->name && mtd->dev.parent)
  4605. mtd->name = dev_name(mtd->dev.parent);
  4606. /* Set the default functions */
  4607. nand_set_defaults(chip);
  4608. ret = nand_legacy_check_hooks(chip);
  4609. if (ret)
  4610. return ret;
  4611. memorg->ntargets = maxchips;
  4612. /* Read the flash type */
  4613. ret = nand_detect(chip, table);
  4614. if (ret) {
  4615. if (!(chip->options & NAND_SCAN_SILENT_NODEV))
  4616. pr_warn("No NAND device found\n");
  4617. nand_deselect_target(chip);
  4618. return ret;
  4619. }
  4620. nand_maf_id = chip->id.data[0];
  4621. nand_dev_id = chip->id.data[1];
  4622. nand_deselect_target(chip);
  4623. /* Check for a chip array */
  4624. for (i = 1; i < maxchips; i++) {
  4625. u8 id[2];
  4626. /* See comment in nand_get_flash_type for reset */
  4627. ret = nand_reset(chip, i);
  4628. if (ret)
  4629. break;
  4630. nand_select_target(chip, i);
  4631. /* Send the command for reading device ID */
  4632. ret = nand_readid_op(chip, 0, id, sizeof(id));
  4633. if (ret)
  4634. break;
  4635. /* Read manufacturer and device IDs */
  4636. if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
  4637. nand_deselect_target(chip);
  4638. break;
  4639. }
  4640. nand_deselect_target(chip);
  4641. }
  4642. if (i > 1)
  4643. pr_info("%d chips detected\n", i);
  4644. /* Store the number of chips and calc total size for mtd */
  4645. memorg->ntargets = i;
  4646. mtd->size = i * nanddev_target_size(&chip->base);
  4647. return 0;
  4648. }
  4649. static void nand_scan_ident_cleanup(struct nand_chip *chip)
  4650. {
  4651. kfree(chip->parameters.model);
  4652. kfree(chip->parameters.onfi);
  4653. }
  4654. int rawnand_sw_hamming_init(struct nand_chip *chip)
  4655. {
  4656. struct nand_ecc_sw_hamming_conf *engine_conf;
  4657. struct nand_device *base = &chip->base;
  4658. int ret;
  4659. base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
  4660. base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
  4661. base->ecc.user_conf.strength = chip->ecc.strength;
  4662. base->ecc.user_conf.step_size = chip->ecc.size;
  4663. ret = nand_ecc_sw_hamming_init_ctx(base);
  4664. if (ret)
  4665. return ret;
  4666. engine_conf = base->ecc.ctx.priv;
  4667. if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
  4668. engine_conf->sm_order = true;
  4669. chip->ecc.size = base->ecc.ctx.conf.step_size;
  4670. chip->ecc.strength = base->ecc.ctx.conf.strength;
  4671. chip->ecc.total = base->ecc.ctx.total;
  4672. chip->ecc.steps = nanddev_get_ecc_nsteps(base);
  4673. chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
  4674. return 0;
  4675. }
  4676. EXPORT_SYMBOL(rawnand_sw_hamming_init);
  4677. int rawnand_sw_hamming_calculate(struct nand_chip *chip,
  4678. const unsigned char *buf,
  4679. unsigned char *code)
  4680. {
  4681. struct nand_device *base = &chip->base;
  4682. return nand_ecc_sw_hamming_calculate(base, buf, code);
  4683. }
  4684. EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
  4685. int rawnand_sw_hamming_correct(struct nand_chip *chip,
  4686. unsigned char *buf,
  4687. unsigned char *read_ecc,
  4688. unsigned char *calc_ecc)
  4689. {
  4690. struct nand_device *base = &chip->base;
  4691. return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
  4692. }
  4693. EXPORT_SYMBOL(rawnand_sw_hamming_correct);
  4694. void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
  4695. {
  4696. struct nand_device *base = &chip->base;
  4697. nand_ecc_sw_hamming_cleanup_ctx(base);
  4698. }
  4699. EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
  4700. int rawnand_sw_bch_init(struct nand_chip *chip)
  4701. {
  4702. struct nand_device *base = &chip->base;
  4703. const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
  4704. int ret;
  4705. base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
  4706. base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
  4707. base->ecc.user_conf.step_size = chip->ecc.size;
  4708. base->ecc.user_conf.strength = chip->ecc.strength;
  4709. ret = nand_ecc_sw_bch_init_ctx(base);
  4710. if (ret)
  4711. return ret;
  4712. chip->ecc.size = ecc_conf->step_size;
  4713. chip->ecc.strength = ecc_conf->strength;
  4714. chip->ecc.total = base->ecc.ctx.total;
  4715. chip->ecc.steps = nanddev_get_ecc_nsteps(base);
  4716. chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
  4717. return 0;
  4718. }
  4719. EXPORT_SYMBOL(rawnand_sw_bch_init);
  4720. static int rawnand_sw_bch_calculate(struct nand_chip *chip,
  4721. const unsigned char *buf,
  4722. unsigned char *code)
  4723. {
  4724. struct nand_device *base = &chip->base;
  4725. return nand_ecc_sw_bch_calculate(base, buf, code);
  4726. }
  4727. int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
  4728. unsigned char *read_ecc, unsigned char *calc_ecc)
  4729. {
  4730. struct nand_device *base = &chip->base;
  4731. return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
  4732. }
  4733. EXPORT_SYMBOL(rawnand_sw_bch_correct);
  4734. void rawnand_sw_bch_cleanup(struct nand_chip *chip)
  4735. {
  4736. struct nand_device *base = &chip->base;
  4737. nand_ecc_sw_bch_cleanup_ctx(base);
  4738. }
  4739. EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
  4740. static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
  4741. {
  4742. struct nand_ecc_ctrl *ecc = &chip->ecc;
  4743. switch (ecc->placement) {
  4744. case NAND_ECC_PLACEMENT_UNKNOWN:
  4745. case NAND_ECC_PLACEMENT_OOB:
  4746. /* Use standard hwecc read page function? */
  4747. if (!ecc->read_page)
  4748. ecc->read_page = nand_read_page_hwecc;
  4749. if (!ecc->write_page)
  4750. ecc->write_page = nand_write_page_hwecc;
  4751. if (!ecc->read_page_raw)
  4752. ecc->read_page_raw = nand_read_page_raw;
  4753. if (!ecc->write_page_raw)
  4754. ecc->write_page_raw = nand_write_page_raw;
  4755. if (!ecc->read_oob)
  4756. ecc->read_oob = nand_read_oob_std;
  4757. if (!ecc->write_oob)
  4758. ecc->write_oob = nand_write_oob_std;
  4759. if (!ecc->read_subpage)
  4760. ecc->read_subpage = nand_read_subpage;
  4761. if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
  4762. ecc->write_subpage = nand_write_subpage_hwecc;
  4763. fallthrough;
  4764. case NAND_ECC_PLACEMENT_INTERLEAVED:
  4765. if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
  4766. (!ecc->read_page ||
  4767. ecc->read_page == nand_read_page_hwecc ||
  4768. !ecc->write_page ||
  4769. ecc->write_page == nand_write_page_hwecc)) {
  4770. WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
  4771. return -EINVAL;
  4772. }
  4773. /* Use standard syndrome read/write page function? */
  4774. if (!ecc->read_page)
  4775. ecc->read_page = nand_read_page_syndrome;
  4776. if (!ecc->write_page)
  4777. ecc->write_page = nand_write_page_syndrome;
  4778. if (!ecc->read_page_raw)
  4779. ecc->read_page_raw = nand_read_page_raw_syndrome;
  4780. if (!ecc->write_page_raw)
  4781. ecc->write_page_raw = nand_write_page_raw_syndrome;
  4782. if (!ecc->read_oob)
  4783. ecc->read_oob = nand_read_oob_syndrome;
  4784. if (!ecc->write_oob)
  4785. ecc->write_oob = nand_write_oob_syndrome;
  4786. break;
  4787. default:
  4788. pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
  4789. ecc->placement);
  4790. return -EINVAL;
  4791. }
  4792. return 0;
  4793. }
  4794. static int nand_set_ecc_soft_ops(struct nand_chip *chip)
  4795. {
  4796. struct mtd_info *mtd = nand_to_mtd(chip);
  4797. struct nand_device *nanddev = mtd_to_nanddev(mtd);
  4798. struct nand_ecc_ctrl *ecc = &chip->ecc;
  4799. int ret;
  4800. if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
  4801. return -EINVAL;
  4802. switch (ecc->algo) {
  4803. case NAND_ECC_ALGO_HAMMING:
  4804. ecc->calculate = rawnand_sw_hamming_calculate;
  4805. ecc->correct = rawnand_sw_hamming_correct;
  4806. ecc->read_page = nand_read_page_swecc;
  4807. ecc->read_subpage = nand_read_subpage;
  4808. ecc->write_page = nand_write_page_swecc;
  4809. if (!ecc->read_page_raw)
  4810. ecc->read_page_raw = nand_read_page_raw;
  4811. if (!ecc->write_page_raw)
  4812. ecc->write_page_raw = nand_write_page_raw;
  4813. ecc->read_oob = nand_read_oob_std;
  4814. ecc->write_oob = nand_write_oob_std;
  4815. if (!ecc->size)
  4816. ecc->size = 256;
  4817. ecc->bytes = 3;
  4818. ecc->strength = 1;
  4819. if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
  4820. ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
  4821. ret = rawnand_sw_hamming_init(chip);
  4822. if (ret) {
  4823. WARN(1, "Hamming ECC initialization failed!\n");
  4824. return ret;
  4825. }
  4826. return 0;
  4827. case NAND_ECC_ALGO_BCH:
  4828. if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
  4829. WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
  4830. return -EINVAL;
  4831. }
  4832. ecc->calculate = rawnand_sw_bch_calculate;
  4833. ecc->correct = rawnand_sw_bch_correct;
  4834. ecc->read_page = nand_read_page_swecc;
  4835. ecc->read_subpage = nand_read_subpage;
  4836. ecc->write_page = nand_write_page_swecc;
  4837. if (!ecc->read_page_raw)
  4838. ecc->read_page_raw = nand_read_page_raw;
  4839. if (!ecc->write_page_raw)
  4840. ecc->write_page_raw = nand_write_page_raw;
  4841. ecc->read_oob = nand_read_oob_std;
  4842. ecc->write_oob = nand_write_oob_std;
  4843. /*
  4844. * We can only maximize ECC config when the default layout is
  4845. * used, otherwise we don't know how many bytes can really be
  4846. * used.
  4847. */
  4848. if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
  4849. mtd->ooblayout != nand_get_large_page_ooblayout())
  4850. nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
  4851. ret = rawnand_sw_bch_init(chip);
  4852. if (ret) {
  4853. WARN(1, "BCH ECC initialization failed!\n");
  4854. return ret;
  4855. }
  4856. return 0;
  4857. default:
  4858. WARN(1, "Unsupported ECC algorithm!\n");
  4859. return -EINVAL;
  4860. }
  4861. }
  4862. /**
  4863. * nand_check_ecc_caps - check the sanity of preset ECC settings
  4864. * @chip: nand chip info structure
  4865. * @caps: ECC caps info structure
  4866. * @oobavail: OOB size that the ECC engine can use
  4867. *
  4868. * When ECC step size and strength are already set, check if they are supported
  4869. * by the controller and the calculated ECC bytes fit within the chip's OOB.
  4870. * On success, the calculated ECC bytes is set.
  4871. */
  4872. static int
  4873. nand_check_ecc_caps(struct nand_chip *chip,
  4874. const struct nand_ecc_caps *caps, int oobavail)
  4875. {
  4876. struct mtd_info *mtd = nand_to_mtd(chip);
  4877. const struct nand_ecc_step_info *stepinfo;
  4878. int preset_step = chip->ecc.size;
  4879. int preset_strength = chip->ecc.strength;
  4880. int ecc_bytes, nsteps = mtd->writesize / preset_step;
  4881. int i, j;
  4882. for (i = 0; i < caps->nstepinfos; i++) {
  4883. stepinfo = &caps->stepinfos[i];
  4884. if (stepinfo->stepsize != preset_step)
  4885. continue;
  4886. for (j = 0; j < stepinfo->nstrengths; j++) {
  4887. if (stepinfo->strengths[j] != preset_strength)
  4888. continue;
  4889. ecc_bytes = caps->calc_ecc_bytes(preset_step,
  4890. preset_strength);
  4891. if (WARN_ON_ONCE(ecc_bytes < 0))
  4892. return ecc_bytes;
  4893. if (ecc_bytes * nsteps > oobavail) {
  4894. pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
  4895. preset_step, preset_strength);
  4896. return -ENOSPC;
  4897. }
  4898. chip->ecc.bytes = ecc_bytes;
  4899. return 0;
  4900. }
  4901. }
  4902. pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
  4903. preset_step, preset_strength);
  4904. return -ENOTSUPP;
  4905. }
  4906. /**
  4907. * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
  4908. * @chip: nand chip info structure
  4909. * @caps: ECC engine caps info structure
  4910. * @oobavail: OOB size that the ECC engine can use
  4911. *
  4912. * If a chip's ECC requirement is provided, try to meet it with the least
  4913. * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
  4914. * On success, the chosen ECC settings are set.
  4915. */
  4916. static int
  4917. nand_match_ecc_req(struct nand_chip *chip,
  4918. const struct nand_ecc_caps *caps, int oobavail)
  4919. {
  4920. const struct nand_ecc_props *requirements =
  4921. nanddev_get_ecc_requirements(&chip->base);
  4922. struct mtd_info *mtd = nand_to_mtd(chip);
  4923. const struct nand_ecc_step_info *stepinfo;
  4924. int req_step = requirements->step_size;
  4925. int req_strength = requirements->strength;
  4926. int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
  4927. int best_step = 0, best_strength = 0, best_ecc_bytes = 0;
  4928. int best_ecc_bytes_total = INT_MAX;
  4929. int i, j;
  4930. /* No information provided by the NAND chip */
  4931. if (!req_step || !req_strength)
  4932. return -ENOTSUPP;
  4933. /* number of correctable bits the chip requires in a page */
  4934. req_corr = mtd->writesize / req_step * req_strength;
  4935. for (i = 0; i < caps->nstepinfos; i++) {
  4936. stepinfo = &caps->stepinfos[i];
  4937. step_size = stepinfo->stepsize;
  4938. for (j = 0; j < stepinfo->nstrengths; j++) {
  4939. strength = stepinfo->strengths[j];
  4940. /*
  4941. * If both step size and strength are smaller than the
  4942. * chip's requirement, it is not easy to compare the
  4943. * resulted reliability.
  4944. */
  4945. if (step_size < req_step && strength < req_strength)
  4946. continue;
  4947. if (mtd->writesize % step_size)
  4948. continue;
  4949. nsteps = mtd->writesize / step_size;
  4950. ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
  4951. if (WARN_ON_ONCE(ecc_bytes < 0))
  4952. continue;
  4953. ecc_bytes_total = ecc_bytes * nsteps;
  4954. if (ecc_bytes_total > oobavail ||
  4955. strength * nsteps < req_corr)
  4956. continue;
  4957. /*
  4958. * We assume the best is to meet the chip's requrement
  4959. * with the least number of ECC bytes.
  4960. */
  4961. if (ecc_bytes_total < best_ecc_bytes_total) {
  4962. best_ecc_bytes_total = ecc_bytes_total;
  4963. best_step = step_size;
  4964. best_strength = strength;
  4965. best_ecc_bytes = ecc_bytes;
  4966. }
  4967. }
  4968. }
  4969. if (best_ecc_bytes_total == INT_MAX)
  4970. return -ENOTSUPP;
  4971. chip->ecc.size = best_step;
  4972. chip->ecc.strength = best_strength;
  4973. chip->ecc.bytes = best_ecc_bytes;
  4974. return 0;
  4975. }
  4976. /**
  4977. * nand_maximize_ecc - choose the max ECC strength available
  4978. * @chip: nand chip info structure
  4979. * @caps: ECC engine caps info structure
  4980. * @oobavail: OOB size that the ECC engine can use
  4981. *
  4982. * Choose the max ECC strength that is supported on the controller, and can fit
  4983. * within the chip's OOB. On success, the chosen ECC settings are set.
  4984. */
  4985. static int
  4986. nand_maximize_ecc(struct nand_chip *chip,
  4987. const struct nand_ecc_caps *caps, int oobavail)
  4988. {
  4989. struct mtd_info *mtd = nand_to_mtd(chip);
  4990. const struct nand_ecc_step_info *stepinfo;
  4991. int step_size, strength, nsteps, ecc_bytes, corr;
  4992. int best_corr = 0;
  4993. int best_step = 0;
  4994. int best_strength = 0, best_ecc_bytes = 0;
  4995. int i, j;
  4996. for (i = 0; i < caps->nstepinfos; i++) {
  4997. stepinfo = &caps->stepinfos[i];
  4998. step_size = stepinfo->stepsize;
  4999. /* If chip->ecc.size is already set, respect it */
  5000. if (chip->ecc.size && step_size != chip->ecc.size)
  5001. continue;
  5002. for (j = 0; j < stepinfo->nstrengths; j++) {
  5003. strength = stepinfo->strengths[j];
  5004. if (mtd->writesize % step_size)
  5005. continue;
  5006. nsteps = mtd->writesize / step_size;
  5007. ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
  5008. if (WARN_ON_ONCE(ecc_bytes < 0))
  5009. continue;
  5010. if (ecc_bytes * nsteps > oobavail)
  5011. continue;
  5012. corr = strength * nsteps;
  5013. /*
  5014. * If the number of correctable bits is the same,
  5015. * bigger step_size has more reliability.
  5016. */
  5017. if (corr > best_corr ||
  5018. (corr == best_corr && step_size > best_step)) {
  5019. best_corr = corr;
  5020. best_step = step_size;
  5021. best_strength = strength;
  5022. best_ecc_bytes = ecc_bytes;
  5023. }
  5024. }
  5025. }
  5026. if (!best_corr)
  5027. return -ENOTSUPP;
  5028. chip->ecc.size = best_step;
  5029. chip->ecc.strength = best_strength;
  5030. chip->ecc.bytes = best_ecc_bytes;
  5031. return 0;
  5032. }
  5033. /**
  5034. * nand_ecc_choose_conf - Set the ECC strength and ECC step size
  5035. * @chip: nand chip info structure
  5036. * @caps: ECC engine caps info structure
  5037. * @oobavail: OOB size that the ECC engine can use
  5038. *
  5039. * Choose the ECC configuration according to following logic.
  5040. *
  5041. * 1. If both ECC step size and ECC strength are already set (usually by DT)
  5042. * then check if it is supported by this controller.
  5043. * 2. If the user provided the nand-ecc-maximize property, then select maximum
  5044. * ECC strength.
  5045. * 3. Otherwise, try to match the ECC step size and ECC strength closest
  5046. * to the chip's requirement. If available OOB size can't fit the chip
  5047. * requirement then fallback to the maximum ECC step size and ECC strength.
  5048. *
  5049. * On success, the chosen ECC settings are set.
  5050. */
  5051. int nand_ecc_choose_conf(struct nand_chip *chip,
  5052. const struct nand_ecc_caps *caps, int oobavail)
  5053. {
  5054. struct mtd_info *mtd = nand_to_mtd(chip);
  5055. struct nand_device *nanddev = mtd_to_nanddev(mtd);
  5056. if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
  5057. return -EINVAL;
  5058. if (chip->ecc.size && chip->ecc.strength)
  5059. return nand_check_ecc_caps(chip, caps, oobavail);
  5060. if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
  5061. return nand_maximize_ecc(chip, caps, oobavail);
  5062. if (!nand_match_ecc_req(chip, caps, oobavail))
  5063. return 0;
  5064. return nand_maximize_ecc(chip, caps, oobavail);
  5065. }
  5066. EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
  5067. static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
  5068. {
  5069. struct nand_chip *chip = container_of(nand, struct nand_chip,
  5070. base);
  5071. unsigned int eb = nanddev_pos_to_row(nand, pos);
  5072. int ret;
  5073. eb >>= nand->rowconv.eraseblock_addr_shift;
  5074. nand_select_target(chip, pos->target);
  5075. ret = nand_erase_op(chip, eb);
  5076. nand_deselect_target(chip);
  5077. return ret;
  5078. }
  5079. static int rawnand_markbad(struct nand_device *nand,
  5080. const struct nand_pos *pos)
  5081. {
  5082. struct nand_chip *chip = container_of(nand, struct nand_chip,
  5083. base);
  5084. return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
  5085. }
  5086. static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
  5087. {
  5088. struct nand_chip *chip = container_of(nand, struct nand_chip,
  5089. base);
  5090. int ret;
  5091. nand_select_target(chip, pos->target);
  5092. ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
  5093. nand_deselect_target(chip);
  5094. return ret;
  5095. }
  5096. static const struct nand_ops rawnand_ops = {
  5097. .erase = rawnand_erase,
  5098. .markbad = rawnand_markbad,
  5099. .isbad = rawnand_isbad,
  5100. };
  5101. /**
  5102. * nand_scan_tail - Scan for the NAND device
  5103. * @chip: NAND chip object
  5104. *
  5105. * This is the second phase of the normal nand_scan() function. It fills out
  5106. * all the uninitialized function pointers with the defaults and scans for a
  5107. * bad block table if appropriate.
  5108. */
  5109. static int nand_scan_tail(struct nand_chip *chip)
  5110. {
  5111. struct mtd_info *mtd = nand_to_mtd(chip);
  5112. struct nand_ecc_ctrl *ecc = &chip->ecc;
  5113. int ret, i;
  5114. /* New bad blocks should be marked in OOB, flash-based BBT, or both */
  5115. if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
  5116. !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
  5117. return -EINVAL;
  5118. }
  5119. chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  5120. if (!chip->data_buf)
  5121. return -ENOMEM;
  5122. /*
  5123. * FIXME: some NAND manufacturer drivers expect the first die to be
  5124. * selected when manufacturer->init() is called. They should be fixed
  5125. * to explictly select the relevant die when interacting with the NAND
  5126. * chip.
  5127. */
  5128. nand_select_target(chip, 0);
  5129. ret = nand_manufacturer_init(chip);
  5130. nand_deselect_target(chip);
  5131. if (ret)
  5132. goto err_free_buf;
  5133. /* Set the internal oob buffer location, just after the page data */
  5134. chip->oob_poi = chip->data_buf + mtd->writesize;
  5135. /*
  5136. * If no default placement scheme is given, select an appropriate one.
  5137. */
  5138. if (!mtd->ooblayout &&
  5139. !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
  5140. ecc->algo == NAND_ECC_ALGO_BCH) &&
  5141. !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
  5142. ecc->algo == NAND_ECC_ALGO_HAMMING)) {
  5143. switch (mtd->oobsize) {
  5144. case 8:
  5145. case 16:
  5146. mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
  5147. break;
  5148. case 64:
  5149. case 128:
  5150. mtd_set_ooblayout(mtd,
  5151. nand_get_large_page_hamming_ooblayout());
  5152. break;
  5153. default:
  5154. /*
  5155. * Expose the whole OOB area to users if ECC_NONE
  5156. * is passed. We could do that for all kind of
  5157. * ->oobsize, but we must keep the old large/small
  5158. * page with ECC layout when ->oobsize <= 128 for
  5159. * compatibility reasons.
  5160. */
  5161. if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
  5162. mtd_set_ooblayout(mtd,
  5163. nand_get_large_page_ooblayout());
  5164. break;
  5165. }
  5166. WARN(1, "No oob scheme defined for oobsize %d\n",
  5167. mtd->oobsize);
  5168. ret = -EINVAL;
  5169. goto err_nand_manuf_cleanup;
  5170. }
  5171. }
  5172. /*
  5173. * Check ECC mode, default to software if 3byte/512byte hardware ECC is
  5174. * selected and we have 256 byte pagesize fallback to software ECC
  5175. */
  5176. switch (ecc->engine_type) {
  5177. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  5178. ret = nand_set_ecc_on_host_ops(chip);
  5179. if (ret)
  5180. goto err_nand_manuf_cleanup;
  5181. if (mtd->writesize >= ecc->size) {
  5182. if (!ecc->strength) {
  5183. WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
  5184. ret = -EINVAL;
  5185. goto err_nand_manuf_cleanup;
  5186. }
  5187. break;
  5188. }
  5189. pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
  5190. ecc->size, mtd->writesize);
  5191. ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
  5192. ecc->algo = NAND_ECC_ALGO_HAMMING;
  5193. fallthrough;
  5194. case NAND_ECC_ENGINE_TYPE_SOFT:
  5195. ret = nand_set_ecc_soft_ops(chip);
  5196. if (ret)
  5197. goto err_nand_manuf_cleanup;
  5198. break;
  5199. case NAND_ECC_ENGINE_TYPE_ON_DIE:
  5200. if (!ecc->read_page || !ecc->write_page) {
  5201. WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
  5202. ret = -EINVAL;
  5203. goto err_nand_manuf_cleanup;
  5204. }
  5205. if (!ecc->read_oob)
  5206. ecc->read_oob = nand_read_oob_std;
  5207. if (!ecc->write_oob)
  5208. ecc->write_oob = nand_write_oob_std;
  5209. break;
  5210. case NAND_ECC_ENGINE_TYPE_NONE:
  5211. pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
  5212. ecc->read_page = nand_read_page_raw;
  5213. ecc->write_page = nand_write_page_raw;
  5214. ecc->read_oob = nand_read_oob_std;
  5215. ecc->read_page_raw = nand_read_page_raw;
  5216. ecc->write_page_raw = nand_write_page_raw;
  5217. ecc->write_oob = nand_write_oob_std;
  5218. ecc->size = mtd->writesize;
  5219. ecc->bytes = 0;
  5220. ecc->strength = 0;
  5221. break;
  5222. default:
  5223. WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
  5224. ret = -EINVAL;
  5225. goto err_nand_manuf_cleanup;
  5226. }
  5227. if (ecc->correct || ecc->calculate) {
  5228. ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
  5229. ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
  5230. if (!ecc->calc_buf || !ecc->code_buf) {
  5231. ret = -ENOMEM;
  5232. goto err_nand_manuf_cleanup;
  5233. }
  5234. }
  5235. /* For many systems, the standard OOB write also works for raw */
  5236. if (!ecc->read_oob_raw)
  5237. ecc->read_oob_raw = ecc->read_oob;
  5238. if (!ecc->write_oob_raw)
  5239. ecc->write_oob_raw = ecc->write_oob;
  5240. /* propagate ecc info to mtd_info */
  5241. mtd->ecc_strength = ecc->strength;
  5242. mtd->ecc_step_size = ecc->size;
  5243. /*
  5244. * Set the number of read / write steps for one page depending on ECC
  5245. * mode.
  5246. */
  5247. if (!ecc->steps)
  5248. ecc->steps = mtd->writesize / ecc->size;
  5249. if (ecc->steps * ecc->size != mtd->writesize) {
  5250. WARN(1, "Invalid ECC parameters\n");
  5251. ret = -EINVAL;
  5252. goto err_nand_manuf_cleanup;
  5253. }
  5254. if (!ecc->total) {
  5255. ecc->total = ecc->steps * ecc->bytes;
  5256. chip->base.ecc.ctx.total = ecc->total;
  5257. }
  5258. if (ecc->total > mtd->oobsize) {
  5259. WARN(1, "Total number of ECC bytes exceeded oobsize\n");
  5260. ret = -EINVAL;
  5261. goto err_nand_manuf_cleanup;
  5262. }
  5263. /*
  5264. * The number of bytes available for a client to place data into
  5265. * the out of band area.
  5266. */
  5267. ret = mtd_ooblayout_count_freebytes(mtd);
  5268. if (ret < 0)
  5269. ret = 0;
  5270. mtd->oobavail = ret;
  5271. /* ECC sanity check: warn if it's too weak */
  5272. if (!nand_ecc_is_strong_enough(&chip->base))
  5273. pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
  5274. mtd->name, chip->ecc.strength, chip->ecc.size,
  5275. nanddev_get_ecc_requirements(&chip->base)->strength,
  5276. nanddev_get_ecc_requirements(&chip->base)->step_size);
  5277. /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
  5278. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
  5279. switch (ecc->steps) {
  5280. case 2:
  5281. mtd->subpage_sft = 1;
  5282. break;
  5283. case 4:
  5284. case 8:
  5285. case 16:
  5286. mtd->subpage_sft = 2;
  5287. break;
  5288. }
  5289. }
  5290. chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
  5291. /* Invalidate the pagebuffer reference */
  5292. chip->pagecache.page = -1;
  5293. /* Large page NAND with SOFT_ECC should support subpage reads */
  5294. switch (ecc->engine_type) {
  5295. case NAND_ECC_ENGINE_TYPE_SOFT:
  5296. if (chip->page_shift > 9)
  5297. chip->options |= NAND_SUBPAGE_READ;
  5298. break;
  5299. default:
  5300. break;
  5301. }
  5302. ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
  5303. if (ret)
  5304. goto err_nand_manuf_cleanup;
  5305. /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
  5306. if (chip->options & NAND_ROM)
  5307. mtd->flags = MTD_CAP_ROM;
  5308. /* Fill in remaining MTD driver data */
  5309. mtd->_erase = nand_erase;
  5310. mtd->_point = NULL;
  5311. mtd->_unpoint = NULL;
  5312. mtd->_panic_write = panic_nand_write;
  5313. mtd->_read_oob = nand_read_oob;
  5314. mtd->_write_oob = nand_write_oob;
  5315. mtd->_sync = nand_sync;
  5316. mtd->_lock = nand_lock;
  5317. mtd->_unlock = nand_unlock;
  5318. mtd->_suspend = nand_suspend;
  5319. mtd->_resume = nand_resume;
  5320. mtd->_reboot = nand_shutdown;
  5321. mtd->_block_isreserved = nand_block_isreserved;
  5322. mtd->_block_isbad = nand_block_isbad;
  5323. mtd->_block_markbad = nand_block_markbad;
  5324. mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
  5325. /*
  5326. * Initialize bitflip_threshold to its default prior scan_bbt() call.
  5327. * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
  5328. * properly set.
  5329. */
  5330. if (!mtd->bitflip_threshold)
  5331. mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
  5332. /* Find the fastest data interface for this chip */
  5333. ret = nand_choose_interface_config(chip);
  5334. if (ret)
  5335. goto err_nanddev_cleanup;
  5336. /* Enter fastest possible mode on all dies. */
  5337. for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
  5338. ret = nand_setup_interface(chip, i);
  5339. if (ret)
  5340. goto err_free_interface_config;
  5341. }
  5342. /*
  5343. * Look for secure regions in the NAND chip. These regions are supposed
  5344. * to be protected by a secure element like Trustzone. So the read/write
  5345. * accesses to these regions will be blocked in the runtime by this
  5346. * driver.
  5347. */
  5348. ret = of_get_nand_secure_regions(chip);
  5349. if (ret)
  5350. goto err_free_interface_config;
  5351. /* Check, if we should skip the bad block table scan */
  5352. if (chip->options & NAND_SKIP_BBTSCAN)
  5353. return 0;
  5354. /* Build bad block table */
  5355. ret = nand_create_bbt(chip);
  5356. if (ret)
  5357. goto err_free_secure_regions;
  5358. return 0;
  5359. err_free_secure_regions:
  5360. kfree(chip->secure_regions);
  5361. err_free_interface_config:
  5362. kfree(chip->best_interface_config);
  5363. err_nanddev_cleanup:
  5364. nanddev_cleanup(&chip->base);
  5365. err_nand_manuf_cleanup:
  5366. nand_manufacturer_cleanup(chip);
  5367. err_free_buf:
  5368. kfree(chip->data_buf);
  5369. kfree(ecc->code_buf);
  5370. kfree(ecc->calc_buf);
  5371. return ret;
  5372. }
  5373. static int nand_attach(struct nand_chip *chip)
  5374. {
  5375. if (chip->controller->ops && chip->controller->ops->attach_chip)
  5376. return chip->controller->ops->attach_chip(chip);
  5377. return 0;
  5378. }
  5379. static void nand_detach(struct nand_chip *chip)
  5380. {
  5381. if (chip->controller->ops && chip->controller->ops->detach_chip)
  5382. chip->controller->ops->detach_chip(chip);
  5383. }
  5384. /**
  5385. * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
  5386. * @chip: NAND chip object
  5387. * @maxchips: number of chips to scan for.
  5388. * @ids: optional flash IDs table
  5389. *
  5390. * This fills out all the uninitialized function pointers with the defaults.
  5391. * The flash ID is read and the mtd/chip structures are filled with the
  5392. * appropriate values.
  5393. */
  5394. int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
  5395. struct nand_flash_dev *ids)
  5396. {
  5397. int ret;
  5398. if (!maxchips)
  5399. return -EINVAL;
  5400. ret = nand_scan_ident(chip, maxchips, ids);
  5401. if (ret)
  5402. return ret;
  5403. ret = nand_attach(chip);
  5404. if (ret)
  5405. goto cleanup_ident;
  5406. ret = nand_scan_tail(chip);
  5407. if (ret)
  5408. goto detach_chip;
  5409. return 0;
  5410. detach_chip:
  5411. nand_detach(chip);
  5412. cleanup_ident:
  5413. nand_scan_ident_cleanup(chip);
  5414. return ret;
  5415. }
  5416. EXPORT_SYMBOL(nand_scan_with_ids);
  5417. /**
  5418. * nand_cleanup - [NAND Interface] Free resources held by the NAND device
  5419. * @chip: NAND chip object
  5420. */
  5421. void nand_cleanup(struct nand_chip *chip)
  5422. {
  5423. if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
  5424. if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
  5425. rawnand_sw_hamming_cleanup(chip);
  5426. else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
  5427. rawnand_sw_bch_cleanup(chip);
  5428. }
  5429. nanddev_cleanup(&chip->base);
  5430. /* Free secure regions data */
  5431. kfree(chip->secure_regions);
  5432. /* Free bad block table memory */
  5433. kfree(chip->bbt);
  5434. kfree(chip->data_buf);
  5435. kfree(chip->ecc.code_buf);
  5436. kfree(chip->ecc.calc_buf);
  5437. /* Free bad block descriptor memory */
  5438. if (chip->badblock_pattern && chip->badblock_pattern->options
  5439. & NAND_BBT_DYNAMICSTRUCT)
  5440. kfree(chip->badblock_pattern);
  5441. /* Free the data interface */
  5442. kfree(chip->best_interface_config);
  5443. /* Free manufacturer priv data. */
  5444. nand_manufacturer_cleanup(chip);
  5445. /* Free controller specific allocations after chip identification */
  5446. nand_detach(chip);
  5447. /* Free identification phase allocations */
  5448. nand_scan_ident_cleanup(chip);
  5449. }
  5450. EXPORT_SYMBOL_GPL(nand_cleanup);
  5451. MODULE_LICENSE("GPL");
  5452. MODULE_AUTHOR("Steven J. Hill <[email protected]>");
  5453. MODULE_AUTHOR("Thomas Gleixner <[email protected]>");
  5454. MODULE_DESCRIPTION("Generic NAND flash driver code");