f2fs.h 148 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * fs/f2fs/f2fs.h
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #ifndef _LINUX_F2FS_H
  9. #define _LINUX_F2FS_H
  10. #include <linux/uio.h>
  11. #include <linux/types.h>
  12. #include <linux/page-flags.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/slab.h>
  15. #include <linux/crc32.h>
  16. #include <linux/magic.h>
  17. #include <linux/kobject.h>
  18. #include <linux/sched.h>
  19. #include <linux/cred.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/bio.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/part_stat.h>
  26. #include <crypto/hash.h>
  27. #include <linux/fscrypt.h>
  28. #include <linux/fsverity.h>
  29. struct pagevec;
  30. #ifdef CONFIG_F2FS_CHECK_FS
  31. #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
  32. #else
  33. #define f2fs_bug_on(sbi, condition) \
  34. do { \
  35. if (WARN_ON(condition)) \
  36. set_sbi_flag(sbi, SBI_NEED_FSCK); \
  37. } while (0)
  38. #endif
  39. enum {
  40. FAULT_KMALLOC,
  41. FAULT_KVMALLOC,
  42. FAULT_PAGE_ALLOC,
  43. FAULT_PAGE_GET,
  44. FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */
  45. FAULT_ALLOC_NID,
  46. FAULT_ORPHAN,
  47. FAULT_BLOCK,
  48. FAULT_DIR_DEPTH,
  49. FAULT_EVICT_INODE,
  50. FAULT_TRUNCATE,
  51. FAULT_READ_IO,
  52. FAULT_CHECKPOINT,
  53. FAULT_DISCARD,
  54. FAULT_WRITE_IO,
  55. FAULT_SLAB_ALLOC,
  56. FAULT_DQUOT_INIT,
  57. FAULT_LOCK_OP,
  58. FAULT_BLKADDR_VALIDITY,
  59. FAULT_BLKADDR_CONSISTENCE,
  60. FAULT_MAX,
  61. };
  62. #ifdef CONFIG_F2FS_FAULT_INJECTION
  63. #define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0))
  64. struct f2fs_fault_info {
  65. atomic_t inject_ops;
  66. unsigned int inject_rate;
  67. unsigned int inject_type;
  68. };
  69. extern const char *f2fs_fault_name[FAULT_MAX];
  70. #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
  71. #endif
  72. /*
  73. * For mount options
  74. */
  75. #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
  76. #define F2FS_MOUNT_DISCARD 0x00000004
  77. #define F2FS_MOUNT_NOHEAP 0x00000008
  78. #define F2FS_MOUNT_XATTR_USER 0x00000010
  79. #define F2FS_MOUNT_POSIX_ACL 0x00000020
  80. #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
  81. #define F2FS_MOUNT_INLINE_XATTR 0x00000080
  82. #define F2FS_MOUNT_INLINE_DATA 0x00000100
  83. #define F2FS_MOUNT_INLINE_DENTRY 0x00000200
  84. #define F2FS_MOUNT_FLUSH_MERGE 0x00000400
  85. #define F2FS_MOUNT_NOBARRIER 0x00000800
  86. #define F2FS_MOUNT_FASTBOOT 0x00001000
  87. #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00002000
  88. #define F2FS_MOUNT_DATA_FLUSH 0x00008000
  89. #define F2FS_MOUNT_FAULT_INJECTION 0x00010000
  90. #define F2FS_MOUNT_USRQUOTA 0x00080000
  91. #define F2FS_MOUNT_GRPQUOTA 0x00100000
  92. #define F2FS_MOUNT_PRJQUOTA 0x00200000
  93. #define F2FS_MOUNT_QUOTA 0x00400000
  94. #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
  95. #define F2FS_MOUNT_RESERVE_ROOT 0x01000000
  96. #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
  97. #define F2FS_MOUNT_NORECOVERY 0x04000000
  98. #define F2FS_MOUNT_ATGC 0x08000000
  99. #define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
  100. #define F2FS_MOUNT_GC_MERGE 0x20000000
  101. #define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
  102. #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x80000000
  103. #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
  104. #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
  105. #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
  106. #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
  107. #define ver_after(a, b) (typecheck(unsigned long long, a) && \
  108. typecheck(unsigned long long, b) && \
  109. ((long long)((a) - (b)) > 0))
  110. typedef u32 block_t; /*
  111. * should not change u32, since it is the on-disk block
  112. * address format, __le32.
  113. */
  114. typedef u32 nid_t;
  115. #define COMPRESS_EXT_NUM 16
  116. /*
  117. * An implementation of an rwsem that is explicitly unfair to readers. This
  118. * prevents priority inversion when a low-priority reader acquires the read lock
  119. * while sleeping on the write lock but the write lock is needed by
  120. * higher-priority clients.
  121. */
  122. struct f2fs_rwsem {
  123. struct rw_semaphore internal_rwsem;
  124. #ifdef CONFIG_F2FS_UNFAIR_RWSEM
  125. wait_queue_head_t read_waiters;
  126. #endif
  127. };
  128. struct f2fs_mount_info {
  129. unsigned int opt;
  130. int write_io_size_bits; /* Write IO size bits */
  131. block_t root_reserved_blocks; /* root reserved blocks */
  132. kuid_t s_resuid; /* reserved blocks for uid */
  133. kgid_t s_resgid; /* reserved blocks for gid */
  134. int active_logs; /* # of active logs */
  135. int inline_xattr_size; /* inline xattr size */
  136. #ifdef CONFIG_F2FS_FAULT_INJECTION
  137. struct f2fs_fault_info fault_info; /* For fault injection */
  138. #endif
  139. #ifdef CONFIG_QUOTA
  140. /* Names of quota files with journalled quota */
  141. char *s_qf_names[MAXQUOTAS];
  142. int s_jquota_fmt; /* Format of quota to use */
  143. #endif
  144. /* For which write hints are passed down to block layer */
  145. int alloc_mode; /* segment allocation policy */
  146. int fsync_mode; /* fsync policy */
  147. int fs_mode; /* fs mode: LFS or ADAPTIVE */
  148. int bggc_mode; /* bggc mode: off, on or sync */
  149. int memory_mode; /* memory mode */
  150. int discard_unit; /*
  151. * discard command's offset/size should
  152. * be aligned to this unit: block,
  153. * segment or section
  154. */
  155. struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
  156. block_t unusable_cap_perc; /* percentage for cap */
  157. block_t unusable_cap; /* Amount of space allowed to be
  158. * unusable when disabling checkpoint
  159. */
  160. /* For compression */
  161. unsigned char compress_algorithm; /* algorithm type */
  162. unsigned char compress_log_size; /* cluster log size */
  163. unsigned char compress_level; /* compress level */
  164. bool compress_chksum; /* compressed data chksum */
  165. unsigned char compress_ext_cnt; /* extension count */
  166. unsigned char nocompress_ext_cnt; /* nocompress extension count */
  167. int compress_mode; /* compression mode */
  168. unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
  169. unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
  170. };
  171. #define F2FS_FEATURE_ENCRYPT 0x0001
  172. #define F2FS_FEATURE_BLKZONED 0x0002
  173. #define F2FS_FEATURE_ATOMIC_WRITE 0x0004
  174. #define F2FS_FEATURE_EXTRA_ATTR 0x0008
  175. #define F2FS_FEATURE_PRJQUOTA 0x0010
  176. #define F2FS_FEATURE_INODE_CHKSUM 0x0020
  177. #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
  178. #define F2FS_FEATURE_QUOTA_INO 0x0080
  179. #define F2FS_FEATURE_INODE_CRTIME 0x0100
  180. #define F2FS_FEATURE_LOST_FOUND 0x0200
  181. #define F2FS_FEATURE_VERITY 0x0400
  182. #define F2FS_FEATURE_SB_CHKSUM 0x0800
  183. #define F2FS_FEATURE_CASEFOLD 0x1000
  184. #define F2FS_FEATURE_COMPRESSION 0x2000
  185. #define F2FS_FEATURE_RO 0x4000
  186. #define __F2FS_HAS_FEATURE(raw_super, mask) \
  187. ((raw_super->feature & cpu_to_le32(mask)) != 0)
  188. #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
  189. /*
  190. * Default values for user and/or group using reserved blocks
  191. */
  192. #define F2FS_DEF_RESUID 0
  193. #define F2FS_DEF_RESGID 0
  194. /*
  195. * For checkpoint manager
  196. */
  197. enum {
  198. NAT_BITMAP,
  199. SIT_BITMAP
  200. };
  201. #define CP_UMOUNT 0x00000001
  202. #define CP_FASTBOOT 0x00000002
  203. #define CP_SYNC 0x00000004
  204. #define CP_RECOVERY 0x00000008
  205. #define CP_DISCARD 0x00000010
  206. #define CP_TRIMMED 0x00000020
  207. #define CP_PAUSE 0x00000040
  208. #define CP_RESIZE 0x00000080
  209. #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
  210. #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
  211. #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
  212. #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
  213. #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
  214. #define DEF_CP_INTERVAL 60 /* 60 secs */
  215. #define DEF_IDLE_INTERVAL 5 /* 5 secs */
  216. #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
  217. #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
  218. #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
  219. struct cp_control {
  220. int reason;
  221. __u64 trim_start;
  222. __u64 trim_end;
  223. __u64 trim_minlen;
  224. };
  225. /*
  226. * indicate meta/data type
  227. */
  228. enum {
  229. META_CP,
  230. META_NAT,
  231. META_SIT,
  232. META_SSA,
  233. META_MAX,
  234. META_POR,
  235. DATA_GENERIC, /* check range only */
  236. DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
  237. DATA_GENERIC_ENHANCE_READ, /*
  238. * strong check on range and segment
  239. * bitmap but no warning due to race
  240. * condition of read on truncated area
  241. * by extent_cache
  242. */
  243. DATA_GENERIC_ENHANCE_UPDATE, /*
  244. * strong check on range and segment
  245. * bitmap for update case
  246. */
  247. META_GENERIC,
  248. };
  249. /* for the list of ino */
  250. enum {
  251. ORPHAN_INO, /* for orphan ino list */
  252. APPEND_INO, /* for append ino list */
  253. UPDATE_INO, /* for update ino list */
  254. TRANS_DIR_INO, /* for transactions dir ino list */
  255. FLUSH_INO, /* for multiple device flushing */
  256. MAX_INO_ENTRY, /* max. list */
  257. };
  258. struct ino_entry {
  259. struct list_head list; /* list head */
  260. nid_t ino; /* inode number */
  261. unsigned int dirty_device; /* dirty device bitmap */
  262. };
  263. /* for the list of inodes to be GCed */
  264. struct inode_entry {
  265. struct list_head list; /* list head */
  266. struct inode *inode; /* vfs inode pointer */
  267. };
  268. struct fsync_node_entry {
  269. struct list_head list; /* list head */
  270. struct page *page; /* warm node page pointer */
  271. unsigned int seq_id; /* sequence id */
  272. };
  273. struct ckpt_req {
  274. struct completion wait; /* completion for checkpoint done */
  275. struct llist_node llnode; /* llist_node to be linked in wait queue */
  276. int ret; /* return code of checkpoint */
  277. ktime_t queue_time; /* request queued time */
  278. };
  279. struct ckpt_req_control {
  280. struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
  281. int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
  282. wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
  283. atomic_t issued_ckpt; /* # of actually issued ckpts */
  284. atomic_t total_ckpt; /* # of total ckpts */
  285. atomic_t queued_ckpt; /* # of queued ckpts */
  286. struct llist_head issue_list; /* list for command issue */
  287. spinlock_t stat_lock; /* lock for below checkpoint time stats */
  288. unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
  289. unsigned int peak_time; /* peak wait time in msec until now */
  290. };
  291. /* for the bitmap indicate blocks to be discarded */
  292. struct discard_entry {
  293. struct list_head list; /* list head */
  294. block_t start_blkaddr; /* start blockaddr of current segment */
  295. unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
  296. };
  297. /* minimum discard granularity, unit: block count */
  298. #define MIN_DISCARD_GRANULARITY 1
  299. /* default discard granularity of inner discard thread, unit: block count */
  300. #define DEFAULT_DISCARD_GRANULARITY 16
  301. /* default maximum discard granularity of ordered discard, unit: block count */
  302. #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16
  303. /* max discard pend list number */
  304. #define MAX_PLIST_NUM 512
  305. #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
  306. (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
  307. enum {
  308. D_PREP, /* initial */
  309. D_PARTIAL, /* partially submitted */
  310. D_SUBMIT, /* all submitted */
  311. D_DONE, /* finished */
  312. };
  313. struct discard_info {
  314. block_t lstart; /* logical start address */
  315. block_t len; /* length */
  316. block_t start; /* actual start address in dev */
  317. };
  318. struct discard_cmd {
  319. struct rb_node rb_node; /* rb node located in rb-tree */
  320. struct discard_info di; /* discard info */
  321. struct list_head list; /* command list */
  322. struct completion wait; /* compleation */
  323. struct block_device *bdev; /* bdev */
  324. unsigned short ref; /* reference count */
  325. unsigned char state; /* state */
  326. unsigned char queued; /* queued discard */
  327. int error; /* bio error */
  328. spinlock_t lock; /* for state/bio_ref updating */
  329. unsigned short bio_ref; /* bio reference count */
  330. };
  331. enum {
  332. DPOLICY_BG,
  333. DPOLICY_FORCE,
  334. DPOLICY_FSTRIM,
  335. DPOLICY_UMOUNT,
  336. MAX_DPOLICY,
  337. };
  338. struct discard_policy {
  339. int type; /* type of discard */
  340. unsigned int min_interval; /* used for candidates exist */
  341. unsigned int mid_interval; /* used for device busy */
  342. unsigned int max_interval; /* used for candidates not exist */
  343. unsigned int max_requests; /* # of discards issued per round */
  344. unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
  345. bool io_aware; /* issue discard in idle time */
  346. bool sync; /* submit discard with REQ_SYNC flag */
  347. bool ordered; /* issue discard by lba order */
  348. bool timeout; /* discard timeout for put_super */
  349. unsigned int granularity; /* discard granularity */
  350. };
  351. struct discard_cmd_control {
  352. struct task_struct *f2fs_issue_discard; /* discard thread */
  353. struct list_head entry_list; /* 4KB discard entry list */
  354. struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
  355. struct list_head wait_list; /* store on-flushing entries */
  356. struct list_head fstrim_list; /* in-flight discard from fstrim */
  357. wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
  358. struct mutex cmd_lock;
  359. unsigned int nr_discards; /* # of discards in the list */
  360. unsigned int max_discards; /* max. discards to be issued */
  361. unsigned int max_discard_request; /* max. discard request per round */
  362. unsigned int min_discard_issue_time; /* min. interval between discard issue */
  363. unsigned int mid_discard_issue_time; /* mid. interval between discard issue */
  364. unsigned int max_discard_issue_time; /* max. interval between discard issue */
  365. unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */
  366. unsigned int discard_urgent_util; /* utilization which issue discard proactively */
  367. unsigned int discard_granularity; /* discard granularity */
  368. unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */
  369. unsigned int undiscard_blks; /* # of undiscard blocks */
  370. unsigned int next_pos; /* next discard position */
  371. atomic_t issued_discard; /* # of issued discard */
  372. atomic_t queued_discard; /* # of queued discard */
  373. atomic_t discard_cmd_cnt; /* # of cached cmd count */
  374. struct rb_root_cached root; /* root of discard rb-tree */
  375. bool rbtree_check; /* config for consistence check */
  376. bool discard_wake; /* to wake up discard thread */
  377. };
  378. /* for the list of fsync inodes, used only during recovery */
  379. struct fsync_inode_entry {
  380. struct list_head list; /* list head */
  381. struct inode *inode; /* vfs inode pointer */
  382. block_t blkaddr; /* block address locating the last fsync */
  383. block_t last_dentry; /* block address locating the last dentry */
  384. };
  385. #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
  386. #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
  387. #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
  388. #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
  389. #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
  390. #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
  391. #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
  392. #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
  393. static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
  394. {
  395. int before = nats_in_cursum(journal);
  396. journal->n_nats = cpu_to_le16(before + i);
  397. return before;
  398. }
  399. static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
  400. {
  401. int before = sits_in_cursum(journal);
  402. journal->n_sits = cpu_to_le16(before + i);
  403. return before;
  404. }
  405. static inline bool __has_cursum_space(struct f2fs_journal *journal,
  406. int size, int type)
  407. {
  408. if (type == NAT_JOURNAL)
  409. return size <= MAX_NAT_JENTRIES(journal);
  410. return size <= MAX_SIT_JENTRIES(journal);
  411. }
  412. /* for inline stuff */
  413. #define DEF_INLINE_RESERVED_SIZE 1
  414. static inline int get_extra_isize(struct inode *inode);
  415. static inline int get_inline_xattr_addrs(struct inode *inode);
  416. #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
  417. (CUR_ADDRS_PER_INODE(inode) - \
  418. get_inline_xattr_addrs(inode) - \
  419. DEF_INLINE_RESERVED_SIZE))
  420. /* for inline dir */
  421. #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
  422. ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
  423. BITS_PER_BYTE + 1))
  424. #define INLINE_DENTRY_BITMAP_SIZE(inode) \
  425. DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
  426. #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
  427. ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
  428. NR_INLINE_DENTRY(inode) + \
  429. INLINE_DENTRY_BITMAP_SIZE(inode)))
  430. /*
  431. * For INODE and NODE manager
  432. */
  433. /* for directory operations */
  434. struct f2fs_filename {
  435. /*
  436. * The filename the user specified. This is NULL for some
  437. * filesystem-internal operations, e.g. converting an inline directory
  438. * to a non-inline one, or roll-forward recovering an encrypted dentry.
  439. */
  440. const struct qstr *usr_fname;
  441. /*
  442. * The on-disk filename. For encrypted directories, this is encrypted.
  443. * This may be NULL for lookups in an encrypted dir without the key.
  444. */
  445. struct fscrypt_str disk_name;
  446. /* The dirhash of this filename */
  447. f2fs_hash_t hash;
  448. #ifdef CONFIG_FS_ENCRYPTION
  449. /*
  450. * For lookups in encrypted directories: either the buffer backing
  451. * disk_name, or a buffer that holds the decoded no-key name.
  452. */
  453. struct fscrypt_str crypto_buf;
  454. #endif
  455. #if IS_ENABLED(CONFIG_UNICODE)
  456. /*
  457. * For casefolded directories: the casefolded name, but it's left NULL
  458. * if the original name is not valid Unicode, if the original name is
  459. * "." or "..", if the directory is both casefolded and encrypted and
  460. * its encryption key is unavailable, or if the filesystem is doing an
  461. * internal operation where usr_fname is also NULL. In all these cases
  462. * we fall back to treating the name as an opaque byte sequence.
  463. */
  464. struct fscrypt_str cf_name;
  465. #endif
  466. };
  467. struct f2fs_dentry_ptr {
  468. struct inode *inode;
  469. void *bitmap;
  470. struct f2fs_dir_entry *dentry;
  471. __u8 (*filename)[F2FS_SLOT_LEN];
  472. int max;
  473. int nr_bitmap;
  474. };
  475. static inline void make_dentry_ptr_block(struct inode *inode,
  476. struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
  477. {
  478. d->inode = inode;
  479. d->max = NR_DENTRY_IN_BLOCK;
  480. d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
  481. d->bitmap = t->dentry_bitmap;
  482. d->dentry = t->dentry;
  483. d->filename = t->filename;
  484. }
  485. static inline void make_dentry_ptr_inline(struct inode *inode,
  486. struct f2fs_dentry_ptr *d, void *t)
  487. {
  488. int entry_cnt = NR_INLINE_DENTRY(inode);
  489. int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
  490. int reserved_size = INLINE_RESERVED_SIZE(inode);
  491. d->inode = inode;
  492. d->max = entry_cnt;
  493. d->nr_bitmap = bitmap_size;
  494. d->bitmap = t;
  495. d->dentry = t + bitmap_size + reserved_size;
  496. d->filename = t + bitmap_size + reserved_size +
  497. SIZE_OF_DIR_ENTRY * entry_cnt;
  498. }
  499. /*
  500. * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
  501. * as its node offset to distinguish from index node blocks.
  502. * But some bits are used to mark the node block.
  503. */
  504. #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
  505. >> OFFSET_BIT_SHIFT)
  506. enum {
  507. ALLOC_NODE, /* allocate a new node page if needed */
  508. LOOKUP_NODE, /* look up a node without readahead */
  509. LOOKUP_NODE_RA, /*
  510. * look up a node with readahead called
  511. * by get_data_block.
  512. */
  513. };
  514. #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */
  515. /* congestion wait timeout value, default: 20ms */
  516. #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
  517. /* maximum retry quota flush count */
  518. #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
  519. /* maximum retry of EIO'ed page */
  520. #define MAX_RETRY_PAGE_EIO 100
  521. #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
  522. #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
  523. /* dirty segments threshold for triggering CP */
  524. #define DEFAULT_DIRTY_THRESHOLD 4
  525. #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS
  526. #define RECOVERY_MIN_RA_BLOCKS 1
  527. #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */
  528. /* for in-memory extent cache entry */
  529. #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
  530. /* number of extent info in extent cache we try to shrink */
  531. #define READ_EXTENT_CACHE_SHRINK_NUMBER 128
  532. /* number of age extent info in extent cache we try to shrink */
  533. #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128
  534. #define LAST_AGE_WEIGHT 30
  535. #define SAME_AGE_REGION 1024
  536. /*
  537. * Define data block with age less than 1GB as hot data
  538. * define data block with age less than 10GB but more than 1GB as warm data
  539. */
  540. #define DEF_HOT_DATA_AGE_THRESHOLD 262144
  541. #define DEF_WARM_DATA_AGE_THRESHOLD 2621440
  542. /* extent cache type */
  543. enum extent_type {
  544. EX_READ,
  545. EX_BLOCK_AGE,
  546. NR_EXTENT_CACHES,
  547. };
  548. struct extent_info {
  549. unsigned int fofs; /* start offset in a file */
  550. unsigned int len; /* length of the extent */
  551. union {
  552. /* read extent_cache */
  553. struct {
  554. /* start block address of the extent */
  555. block_t blk;
  556. #ifdef CONFIG_F2FS_FS_COMPRESSION
  557. /* physical extent length of compressed blocks */
  558. unsigned int c_len;
  559. #endif
  560. };
  561. /* block age extent_cache */
  562. struct {
  563. /* block age of the extent */
  564. unsigned long long age;
  565. /* last total blocks allocated */
  566. unsigned long long last_blocks;
  567. };
  568. };
  569. };
  570. struct extent_node {
  571. struct rb_node rb_node; /* rb node located in rb-tree */
  572. struct extent_info ei; /* extent info */
  573. struct list_head list; /* node in global extent list of sbi */
  574. struct extent_tree *et; /* extent tree pointer */
  575. };
  576. struct extent_tree {
  577. nid_t ino; /* inode number */
  578. enum extent_type type; /* keep the extent tree type */
  579. struct rb_root_cached root; /* root of extent info rb-tree */
  580. struct extent_node *cached_en; /* recently accessed extent node */
  581. struct list_head list; /* to be used by sbi->zombie_list */
  582. rwlock_t lock; /* protect extent info rb-tree */
  583. atomic_t node_cnt; /* # of extent node in rb-tree*/
  584. bool largest_updated; /* largest extent updated */
  585. struct extent_info largest; /* largest cached extent for EX_READ */
  586. };
  587. struct extent_tree_info {
  588. struct radix_tree_root extent_tree_root;/* cache extent cache entries */
  589. struct mutex extent_tree_lock; /* locking extent radix tree */
  590. struct list_head extent_list; /* lru list for shrinker */
  591. spinlock_t extent_lock; /* locking extent lru list */
  592. atomic_t total_ext_tree; /* extent tree count */
  593. struct list_head zombie_list; /* extent zombie tree list */
  594. atomic_t total_zombie_tree; /* extent zombie tree count */
  595. atomic_t total_ext_node; /* extent info count */
  596. };
  597. /*
  598. * State of block returned by f2fs_map_blocks.
  599. */
  600. #define F2FS_MAP_NEW (1U << 0)
  601. #define F2FS_MAP_MAPPED (1U << 1)
  602. #define F2FS_MAP_DELALLOC (1U << 2)
  603. #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
  604. F2FS_MAP_DELALLOC)
  605. struct f2fs_map_blocks {
  606. struct block_device *m_bdev; /* for multi-device dio */
  607. block_t m_pblk;
  608. block_t m_lblk;
  609. unsigned int m_len;
  610. unsigned int m_flags;
  611. pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
  612. pgoff_t *m_next_extent; /* point to next possible extent */
  613. int m_seg_type;
  614. bool m_may_create; /* indicate it is from write path */
  615. bool m_multidev_dio; /* indicate it allows multi-device dio */
  616. };
  617. /* for flag in get_data_block */
  618. enum {
  619. F2FS_GET_BLOCK_DEFAULT,
  620. F2FS_GET_BLOCK_FIEMAP,
  621. F2FS_GET_BLOCK_BMAP,
  622. F2FS_GET_BLOCK_DIO,
  623. F2FS_GET_BLOCK_PRE_DIO,
  624. F2FS_GET_BLOCK_PRE_AIO,
  625. F2FS_GET_BLOCK_PRECACHE,
  626. };
  627. /*
  628. * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  629. */
  630. #define FADVISE_COLD_BIT 0x01
  631. #define FADVISE_LOST_PINO_BIT 0x02
  632. #define FADVISE_ENCRYPT_BIT 0x04
  633. #define FADVISE_ENC_NAME_BIT 0x08
  634. #define FADVISE_KEEP_SIZE_BIT 0x10
  635. #define FADVISE_HOT_BIT 0x20
  636. #define FADVISE_VERITY_BIT 0x40
  637. #define FADVISE_TRUNC_BIT 0x80
  638. #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
  639. #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
  640. #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
  641. #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
  642. #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
  643. #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
  644. #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
  645. #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
  646. #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
  647. #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
  648. #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
  649. #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
  650. #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
  651. #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
  652. #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
  653. #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
  654. #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
  655. #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
  656. #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT)
  657. #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT)
  658. #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT)
  659. #define DEF_DIR_LEVEL 0
  660. enum {
  661. GC_FAILURE_PIN,
  662. MAX_GC_FAILURE
  663. };
  664. /* used for f2fs_inode_info->flags */
  665. enum {
  666. FI_NEW_INODE, /* indicate newly allocated inode */
  667. FI_DIRTY_INODE, /* indicate inode is dirty or not */
  668. FI_AUTO_RECOVER, /* indicate inode is recoverable */
  669. FI_DIRTY_DIR, /* indicate directory has dirty pages */
  670. FI_INC_LINK, /* need to increment i_nlink */
  671. FI_ACL_MODE, /* indicate acl mode */
  672. FI_NO_ALLOC, /* should not allocate any blocks */
  673. FI_FREE_NID, /* free allocated nide */
  674. FI_NO_EXTENT, /* not to use the extent cache */
  675. FI_INLINE_XATTR, /* used for inline xattr */
  676. FI_INLINE_DATA, /* used for inline data*/
  677. FI_INLINE_DENTRY, /* used for inline dentry */
  678. FI_APPEND_WRITE, /* inode has appended data */
  679. FI_UPDATE_WRITE, /* inode has in-place-update data */
  680. FI_NEED_IPU, /* used for ipu per file */
  681. FI_ATOMIC_FILE, /* indicate atomic file */
  682. FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
  683. FI_DROP_CACHE, /* drop dirty page cache */
  684. FI_DATA_EXIST, /* indicate data exists */
  685. FI_INLINE_DOTS, /* indicate inline dot dentries */
  686. FI_SKIP_WRITES, /* should skip data page writeback */
  687. FI_OPU_WRITE, /* used for opu per file */
  688. FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
  689. FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */
  690. FI_HOT_DATA, /* indicate file is hot */
  691. FI_EXTRA_ATTR, /* indicate file has extra attribute */
  692. FI_PROJ_INHERIT, /* indicate file inherits projectid */
  693. FI_PIN_FILE, /* indicate file should not be gced */
  694. FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
  695. FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
  696. FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */
  697. FI_MMAP_FILE, /* indicate file was mmapped */
  698. FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
  699. FI_COMPRESS_RELEASED, /* compressed blocks were released */
  700. FI_ALIGNED_WRITE, /* enable aligned write */
  701. FI_COW_FILE, /* indicate COW file */
  702. FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */
  703. FI_ATOMIC_REPLACE, /* indicate atomic replace */
  704. FI_MAX, /* max flag, never be used */
  705. };
  706. struct f2fs_inode_info {
  707. struct inode vfs_inode; /* serve a vfs inode */
  708. unsigned long i_flags; /* keep an inode flags for ioctl */
  709. unsigned char i_advise; /* use to give file attribute hints */
  710. unsigned char i_dir_level; /* use for dentry level for large dir */
  711. unsigned int i_current_depth; /* only for directory depth */
  712. /* for gc failure statistic */
  713. unsigned int i_gc_failures[MAX_GC_FAILURE];
  714. unsigned int i_pino; /* parent inode number */
  715. umode_t i_acl_mode; /* keep file acl mode temporarily */
  716. /* Use below internally in f2fs*/
  717. unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
  718. struct f2fs_rwsem i_sem; /* protect fi info */
  719. atomic_t dirty_pages; /* # of dirty pages */
  720. f2fs_hash_t chash; /* hash value of given file name */
  721. unsigned int clevel; /* maximum level of given file name */
  722. struct task_struct *task; /* lookup and create consistency */
  723. struct task_struct *cp_task; /* separate cp/wb IO stats*/
  724. struct task_struct *wb_task; /* indicate inode is in context of writeback */
  725. nid_t i_xattr_nid; /* node id that contains xattrs */
  726. loff_t last_disk_size; /* lastly written file size */
  727. spinlock_t i_size_lock; /* protect last_disk_size */
  728. #ifdef CONFIG_QUOTA
  729. struct dquot *i_dquot[MAXQUOTAS];
  730. /* quota space reservation, managed internally by quota code */
  731. qsize_t i_reserved_quota;
  732. #endif
  733. struct list_head dirty_list; /* dirty list for dirs and files */
  734. struct list_head gdirty_list; /* linked in global dirty list */
  735. struct task_struct *atomic_write_task; /* store atomic write task */
  736. struct extent_tree *extent_tree[NR_EXTENT_CACHES];
  737. /* cached extent_tree entry */
  738. struct inode *cow_inode; /* copy-on-write inode for atomic write */
  739. /* avoid racing between foreground op and gc */
  740. struct f2fs_rwsem i_gc_rwsem[2];
  741. struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
  742. int i_extra_isize; /* size of extra space located in i_addr */
  743. kprojid_t i_projid; /* id for project quota */
  744. int i_inline_xattr_size; /* inline xattr size */
  745. struct timespec64 i_crtime; /* inode creation time */
  746. struct timespec64 i_disk_time[3];/* inode disk times */
  747. /* for file compress */
  748. atomic_t i_compr_blocks; /* # of compressed blocks */
  749. unsigned char i_compress_algorithm; /* algorithm type */
  750. unsigned char i_log_cluster_size; /* log of cluster size */
  751. unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
  752. unsigned char i_compress_flag; /* compress flag */
  753. unsigned int i_cluster_size; /* cluster size */
  754. unsigned int atomic_write_cnt;
  755. loff_t original_i_size; /* original i_size before atomic write */
  756. };
  757. static inline void get_read_extent_info(struct extent_info *ext,
  758. struct f2fs_extent *i_ext)
  759. {
  760. ext->fofs = le32_to_cpu(i_ext->fofs);
  761. ext->blk = le32_to_cpu(i_ext->blk);
  762. ext->len = le32_to_cpu(i_ext->len);
  763. }
  764. static inline void set_raw_read_extent(struct extent_info *ext,
  765. struct f2fs_extent *i_ext)
  766. {
  767. i_ext->fofs = cpu_to_le32(ext->fofs);
  768. i_ext->blk = cpu_to_le32(ext->blk);
  769. i_ext->len = cpu_to_le32(ext->len);
  770. }
  771. static inline bool __is_discard_mergeable(struct discard_info *back,
  772. struct discard_info *front, unsigned int max_len)
  773. {
  774. return (back->lstart + back->len == front->lstart) &&
  775. (back->len + front->len <= max_len);
  776. }
  777. static inline bool __is_discard_back_mergeable(struct discard_info *cur,
  778. struct discard_info *back, unsigned int max_len)
  779. {
  780. return __is_discard_mergeable(back, cur, max_len);
  781. }
  782. static inline bool __is_discard_front_mergeable(struct discard_info *cur,
  783. struct discard_info *front, unsigned int max_len)
  784. {
  785. return __is_discard_mergeable(cur, front, max_len);
  786. }
  787. /*
  788. * For free nid management
  789. */
  790. enum nid_state {
  791. FREE_NID, /* newly added to free nid list */
  792. PREALLOC_NID, /* it is preallocated */
  793. MAX_NID_STATE,
  794. };
  795. enum nat_state {
  796. TOTAL_NAT,
  797. DIRTY_NAT,
  798. RECLAIMABLE_NAT,
  799. MAX_NAT_STATE,
  800. };
  801. struct f2fs_nm_info {
  802. block_t nat_blkaddr; /* base disk address of NAT */
  803. nid_t max_nid; /* maximum possible node ids */
  804. nid_t available_nids; /* # of available node ids */
  805. nid_t next_scan_nid; /* the next nid to be scanned */
  806. nid_t max_rf_node_blocks; /* max # of nodes for recovery */
  807. unsigned int ram_thresh; /* control the memory footprint */
  808. unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
  809. unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
  810. /* NAT cache management */
  811. struct radix_tree_root nat_root;/* root of the nat entry cache */
  812. struct radix_tree_root nat_set_root;/* root of the nat set cache */
  813. struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
  814. struct list_head nat_entries; /* cached nat entry list (clean) */
  815. spinlock_t nat_list_lock; /* protect clean nat entry list */
  816. unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
  817. unsigned int nat_blocks; /* # of nat blocks */
  818. /* free node ids management */
  819. struct radix_tree_root free_nid_root;/* root of the free_nid cache */
  820. struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
  821. unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
  822. spinlock_t nid_list_lock; /* protect nid lists ops */
  823. struct mutex build_lock; /* lock for build free nids */
  824. unsigned char **free_nid_bitmap;
  825. unsigned char *nat_block_bitmap;
  826. unsigned short *free_nid_count; /* free nid count of NAT block */
  827. /* for checkpoint */
  828. char *nat_bitmap; /* NAT bitmap pointer */
  829. unsigned int nat_bits_blocks; /* # of nat bits blocks */
  830. unsigned char *nat_bits; /* NAT bits blocks */
  831. unsigned char *full_nat_bits; /* full NAT pages */
  832. unsigned char *empty_nat_bits; /* empty NAT pages */
  833. #ifdef CONFIG_F2FS_CHECK_FS
  834. char *nat_bitmap_mir; /* NAT bitmap mirror */
  835. #endif
  836. int bitmap_size; /* bitmap size */
  837. };
  838. /*
  839. * this structure is used as one of function parameters.
  840. * all the information are dedicated to a given direct node block determined
  841. * by the data offset in a file.
  842. */
  843. struct dnode_of_data {
  844. struct inode *inode; /* vfs inode pointer */
  845. struct page *inode_page; /* its inode page, NULL is possible */
  846. struct page *node_page; /* cached direct node page */
  847. nid_t nid; /* node id of the direct node block */
  848. unsigned int ofs_in_node; /* data offset in the node page */
  849. bool inode_page_locked; /* inode page is locked or not */
  850. bool node_changed; /* is node block changed */
  851. char cur_level; /* level of hole node page */
  852. char max_level; /* level of current page located */
  853. block_t data_blkaddr; /* block address of the node block */
  854. };
  855. static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
  856. struct page *ipage, struct page *npage, nid_t nid)
  857. {
  858. memset(dn, 0, sizeof(*dn));
  859. dn->inode = inode;
  860. dn->inode_page = ipage;
  861. dn->node_page = npage;
  862. dn->nid = nid;
  863. }
  864. /*
  865. * For SIT manager
  866. *
  867. * By default, there are 6 active log areas across the whole main area.
  868. * When considering hot and cold data separation to reduce cleaning overhead,
  869. * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
  870. * respectively.
  871. * In the current design, you should not change the numbers intentionally.
  872. * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
  873. * logs individually according to the underlying devices. (default: 6)
  874. * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
  875. * data and 8 for node logs.
  876. */
  877. #define NR_CURSEG_DATA_TYPE (3)
  878. #define NR_CURSEG_NODE_TYPE (3)
  879. #define NR_CURSEG_INMEM_TYPE (2)
  880. #define NR_CURSEG_RO_TYPE (2)
  881. #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
  882. #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
  883. enum {
  884. CURSEG_HOT_DATA = 0, /* directory entry blocks */
  885. CURSEG_WARM_DATA, /* data blocks */
  886. CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
  887. CURSEG_HOT_NODE, /* direct node blocks of directory files */
  888. CURSEG_WARM_NODE, /* direct node blocks of normal files */
  889. CURSEG_COLD_NODE, /* indirect node blocks */
  890. NR_PERSISTENT_LOG, /* number of persistent log */
  891. CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
  892. /* pinned file that needs consecutive block address */
  893. CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */
  894. NO_CHECK_TYPE, /* number of persistent & inmem log */
  895. };
  896. struct flush_cmd {
  897. struct completion wait;
  898. struct llist_node llnode;
  899. nid_t ino;
  900. int ret;
  901. };
  902. struct flush_cmd_control {
  903. struct task_struct *f2fs_issue_flush; /* flush thread */
  904. wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
  905. atomic_t issued_flush; /* # of issued flushes */
  906. atomic_t queued_flush; /* # of queued flushes */
  907. struct llist_head issue_list; /* list for command issue */
  908. struct llist_node *dispatch_list; /* list for command dispatch */
  909. };
  910. struct f2fs_sm_info {
  911. struct sit_info *sit_info; /* whole segment information */
  912. struct free_segmap_info *free_info; /* free segment information */
  913. struct dirty_seglist_info *dirty_info; /* dirty segment information */
  914. struct curseg_info *curseg_array; /* active segment information */
  915. struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
  916. block_t seg0_blkaddr; /* block address of 0'th segment */
  917. block_t main_blkaddr; /* start block address of main area */
  918. block_t ssa_blkaddr; /* start block address of SSA area */
  919. unsigned int segment_count; /* total # of segments */
  920. unsigned int main_segments; /* # of segments in main area */
  921. unsigned int reserved_segments; /* # of reserved segments */
  922. unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
  923. unsigned int ovp_segments; /* # of overprovision segments */
  924. /* a threshold to reclaim prefree segments */
  925. unsigned int rec_prefree_segments;
  926. struct list_head sit_entry_set; /* sit entry set list */
  927. unsigned int ipu_policy; /* in-place-update policy */
  928. unsigned int min_ipu_util; /* in-place-update threshold */
  929. unsigned int min_fsync_blocks; /* threshold for fsync */
  930. unsigned int min_seq_blocks; /* threshold for sequential blocks */
  931. unsigned int min_hot_blocks; /* threshold for hot block allocation */
  932. unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
  933. /* for flush command control */
  934. struct flush_cmd_control *fcc_info;
  935. /* for discard command control */
  936. struct discard_cmd_control *dcc_info;
  937. };
  938. /*
  939. * For superblock
  940. */
  941. /*
  942. * COUNT_TYPE for monitoring
  943. *
  944. * f2fs monitors the number of several block types such as on-writeback,
  945. * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
  946. */
  947. #define WB_DATA_TYPE(p, f) \
  948. (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
  949. enum count_type {
  950. F2FS_DIRTY_DENTS,
  951. F2FS_DIRTY_DATA,
  952. F2FS_DIRTY_QDATA,
  953. F2FS_DIRTY_NODES,
  954. F2FS_DIRTY_META,
  955. F2FS_DIRTY_IMETA,
  956. F2FS_WB_CP_DATA,
  957. F2FS_WB_DATA,
  958. F2FS_RD_DATA,
  959. F2FS_RD_NODE,
  960. F2FS_RD_META,
  961. F2FS_DIO_WRITE,
  962. F2FS_DIO_READ,
  963. NR_COUNT_TYPE,
  964. };
  965. /*
  966. * The below are the page types of bios used in submit_bio().
  967. * The available types are:
  968. * DATA User data pages. It operates as async mode.
  969. * NODE Node pages. It operates as async mode.
  970. * META FS metadata pages such as SIT, NAT, CP.
  971. * NR_PAGE_TYPE The number of page types.
  972. * META_FLUSH Make sure the previous pages are written
  973. * with waiting the bio's completion
  974. * ... Only can be used with META.
  975. */
  976. #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
  977. enum page_type {
  978. DATA = 0,
  979. NODE = 1, /* should not change this */
  980. META,
  981. NR_PAGE_TYPE,
  982. META_FLUSH,
  983. IPU, /* the below types are used by tracepoints only. */
  984. OPU,
  985. };
  986. enum temp_type {
  987. HOT = 0, /* must be zero for meta bio */
  988. WARM,
  989. COLD,
  990. NR_TEMP_TYPE,
  991. };
  992. enum need_lock_type {
  993. LOCK_REQ = 0,
  994. LOCK_DONE,
  995. LOCK_RETRY,
  996. };
  997. enum cp_reason_type {
  998. CP_NO_NEEDED,
  999. CP_NON_REGULAR,
  1000. CP_COMPRESSED,
  1001. CP_HARDLINK,
  1002. CP_SB_NEED_CP,
  1003. CP_WRONG_PINO,
  1004. CP_NO_SPC_ROLL,
  1005. CP_NODE_NEED_CP,
  1006. CP_FASTBOOT_MODE,
  1007. CP_SPEC_LOG_NUM,
  1008. CP_RECOVER_DIR,
  1009. };
  1010. enum iostat_type {
  1011. /* WRITE IO */
  1012. APP_DIRECT_IO, /* app direct write IOs */
  1013. APP_BUFFERED_IO, /* app buffered write IOs */
  1014. APP_WRITE_IO, /* app write IOs */
  1015. APP_MAPPED_IO, /* app mapped IOs */
  1016. APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */
  1017. APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */
  1018. FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
  1019. FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */
  1020. FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
  1021. FS_META_IO, /* meta IOs from kworker/reclaimer */
  1022. FS_GC_DATA_IO, /* data IOs from forground gc */
  1023. FS_GC_NODE_IO, /* node IOs from forground gc */
  1024. FS_CP_DATA_IO, /* data IOs from checkpoint */
  1025. FS_CP_NODE_IO, /* node IOs from checkpoint */
  1026. FS_CP_META_IO, /* meta IOs from checkpoint */
  1027. /* READ IO */
  1028. APP_DIRECT_READ_IO, /* app direct read IOs */
  1029. APP_BUFFERED_READ_IO, /* app buffered read IOs */
  1030. APP_READ_IO, /* app read IOs */
  1031. APP_MAPPED_READ_IO, /* app mapped read IOs */
  1032. APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */
  1033. APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */
  1034. FS_DATA_READ_IO, /* data read IOs */
  1035. FS_GDATA_READ_IO, /* data read IOs from background gc */
  1036. FS_CDATA_READ_IO, /* compressed data read IOs */
  1037. FS_NODE_READ_IO, /* node read IOs */
  1038. FS_META_READ_IO, /* meta read IOs */
  1039. /* other */
  1040. FS_DISCARD_IO, /* discard */
  1041. FS_FLUSH_IO, /* flush */
  1042. NR_IO_TYPE,
  1043. };
  1044. struct f2fs_io_info {
  1045. struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
  1046. nid_t ino; /* inode number */
  1047. enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
  1048. enum temp_type temp; /* contains HOT/WARM/COLD */
  1049. enum req_op op; /* contains REQ_OP_ */
  1050. blk_opf_t op_flags; /* req_flag_bits */
  1051. block_t new_blkaddr; /* new block address to be written */
  1052. block_t old_blkaddr; /* old block address before Cow */
  1053. struct page *page; /* page to be written */
  1054. struct page *encrypted_page; /* encrypted page */
  1055. struct page *compressed_page; /* compressed page */
  1056. struct list_head list; /* serialize IOs */
  1057. unsigned int compr_blocks; /* # of compressed block addresses */
  1058. unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */
  1059. unsigned int version:8; /* version of the node */
  1060. unsigned int submitted:1; /* indicate IO submission */
  1061. unsigned int in_list:1; /* indicate fio is in io_list */
  1062. unsigned int is_por:1; /* indicate IO is from recovery or not */
  1063. unsigned int retry:1; /* need to reallocate block address */
  1064. unsigned int encrypted:1; /* indicate file is encrypted */
  1065. unsigned int post_read:1; /* require post read */
  1066. enum iostat_type io_type; /* io type */
  1067. struct writeback_control *io_wbc; /* writeback control */
  1068. struct bio **bio; /* bio for ipu */
  1069. sector_t *last_block; /* last block number in bio */
  1070. };
  1071. struct bio_entry {
  1072. struct bio *bio;
  1073. struct list_head list;
  1074. };
  1075. #define is_read_io(rw) ((rw) == READ)
  1076. struct f2fs_bio_info {
  1077. struct f2fs_sb_info *sbi; /* f2fs superblock */
  1078. struct bio *bio; /* bios to merge */
  1079. sector_t last_block_in_bio; /* last block number */
  1080. struct f2fs_io_info fio; /* store buffered io info. */
  1081. #ifdef CONFIG_BLK_DEV_ZONED
  1082. struct completion zone_wait; /* condition value for the previous open zone to close */
  1083. struct bio *zone_pending_bio; /* pending bio for the previous zone */
  1084. void *bi_private; /* previous bi_private for pending bio */
  1085. #endif
  1086. struct f2fs_rwsem io_rwsem; /* blocking op for bio */
  1087. spinlock_t io_lock; /* serialize DATA/NODE IOs */
  1088. struct list_head io_list; /* track fios */
  1089. struct list_head bio_list; /* bio entry list head */
  1090. struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
  1091. };
  1092. #define FDEV(i) (sbi->devs[i])
  1093. #define RDEV(i) (raw_super->devs[i])
  1094. struct f2fs_dev_info {
  1095. struct block_device *bdev;
  1096. char path[MAX_PATH_LEN];
  1097. unsigned int total_segments;
  1098. block_t start_blk;
  1099. block_t end_blk;
  1100. #ifdef CONFIG_BLK_DEV_ZONED
  1101. unsigned int nr_blkz; /* Total number of zones */
  1102. unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
  1103. #endif
  1104. };
  1105. enum inode_type {
  1106. DIR_INODE, /* for dirty dir inode */
  1107. FILE_INODE, /* for dirty regular/symlink inode */
  1108. DIRTY_META, /* for all dirtied inode metadata */
  1109. NR_INODE_TYPE,
  1110. };
  1111. /* for inner inode cache management */
  1112. struct inode_management {
  1113. struct radix_tree_root ino_root; /* ino entry array */
  1114. spinlock_t ino_lock; /* for ino entry lock */
  1115. struct list_head ino_list; /* inode list head */
  1116. unsigned long ino_num; /* number of entries */
  1117. };
  1118. /* for GC_AT */
  1119. struct atgc_management {
  1120. bool atgc_enabled; /* ATGC is enabled or not */
  1121. struct rb_root_cached root; /* root of victim rb-tree */
  1122. struct list_head victim_list; /* linked with all victim entries */
  1123. unsigned int victim_count; /* victim count in rb-tree */
  1124. unsigned int candidate_ratio; /* candidate ratio */
  1125. unsigned int max_candidate_count; /* max candidate count */
  1126. unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */
  1127. unsigned long long age_threshold; /* age threshold */
  1128. };
  1129. struct f2fs_gc_control {
  1130. unsigned int victim_segno; /* target victim segment number */
  1131. int init_gc_type; /* FG_GC or BG_GC */
  1132. bool no_bg_gc; /* check the space and stop bg_gc */
  1133. bool should_migrate_blocks; /* should migrate blocks */
  1134. bool err_gc_skipped; /* return EAGAIN if GC skipped */
  1135. unsigned int nr_free_secs; /* # of free sections to do GC */
  1136. };
  1137. /*
  1138. * For s_flag in struct f2fs_sb_info
  1139. * Modification on enum should be synchronized with s_flag array
  1140. */
  1141. enum {
  1142. SBI_IS_DIRTY, /* dirty flag for checkpoint */
  1143. SBI_IS_CLOSE, /* specify unmounting */
  1144. SBI_NEED_FSCK, /* need fsck.f2fs to fix */
  1145. SBI_POR_DOING, /* recovery is doing or not */
  1146. SBI_NEED_SB_WRITE, /* need to recover superblock */
  1147. SBI_NEED_CP, /* need to checkpoint */
  1148. SBI_IS_SHUTDOWN, /* shutdown by ioctl */
  1149. SBI_IS_RECOVERED, /* recovered orphan/data */
  1150. SBI_CP_DISABLED, /* CP was disabled last mount */
  1151. SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
  1152. SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
  1153. SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
  1154. SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
  1155. SBI_IS_RESIZEFS, /* resizefs is in process */
  1156. SBI_IS_FREEZING, /* freezefs is in process */
  1157. SBI_IS_WRITABLE, /* remove ro mountoption transiently */
  1158. MAX_SBI_FLAG,
  1159. };
  1160. enum {
  1161. CP_TIME,
  1162. REQ_TIME,
  1163. DISCARD_TIME,
  1164. GC_TIME,
  1165. DISABLE_TIME,
  1166. UMOUNT_DISCARD_TIMEOUT,
  1167. MAX_TIME,
  1168. };
  1169. /* Note that you need to keep synchronization with this gc_mode_names array */
  1170. enum {
  1171. GC_NORMAL,
  1172. GC_IDLE_CB,
  1173. GC_IDLE_GREEDY,
  1174. GC_IDLE_AT,
  1175. GC_URGENT_HIGH,
  1176. GC_URGENT_LOW,
  1177. GC_URGENT_MID,
  1178. MAX_GC_MODE,
  1179. };
  1180. enum {
  1181. BGGC_MODE_ON, /* background gc is on */
  1182. BGGC_MODE_OFF, /* background gc is off */
  1183. BGGC_MODE_SYNC, /*
  1184. * background gc is on, migrating blocks
  1185. * like foreground gc
  1186. */
  1187. };
  1188. enum {
  1189. FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
  1190. FS_MODE_LFS, /* use lfs allocation only */
  1191. FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */
  1192. FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */
  1193. };
  1194. enum {
  1195. ALLOC_MODE_DEFAULT, /* stay default */
  1196. ALLOC_MODE_REUSE, /* reuse segments as much as possible */
  1197. };
  1198. enum fsync_mode {
  1199. FSYNC_MODE_POSIX, /* fsync follows posix semantics */
  1200. FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
  1201. FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
  1202. };
  1203. enum {
  1204. COMPR_MODE_FS, /*
  1205. * automatically compress compression
  1206. * enabled files
  1207. */
  1208. COMPR_MODE_USER, /*
  1209. * automatical compression is disabled.
  1210. * user can control the file compression
  1211. * using ioctls
  1212. */
  1213. };
  1214. enum {
  1215. DISCARD_UNIT_BLOCK, /* basic discard unit is block */
  1216. DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */
  1217. DISCARD_UNIT_SECTION, /* basic discard unit is section */
  1218. };
  1219. enum {
  1220. MEMORY_MODE_NORMAL, /* memory mode for normal devices */
  1221. MEMORY_MODE_LOW, /* memory mode for low memry devices */
  1222. };
  1223. static inline int f2fs_test_bit(unsigned int nr, char *addr);
  1224. static inline void f2fs_set_bit(unsigned int nr, char *addr);
  1225. static inline void f2fs_clear_bit(unsigned int nr, char *addr);
  1226. /*
  1227. * Layout of f2fs page.private:
  1228. *
  1229. * Layout A: lowest bit should be 1
  1230. * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... |
  1231. * bit 0 PAGE_PRIVATE_NOT_POINTER
  1232. * bit 1 PAGE_PRIVATE_DUMMY_WRITE
  1233. * bit 2 PAGE_PRIVATE_ONGOING_MIGRATION
  1234. * bit 3 PAGE_PRIVATE_INLINE_INODE
  1235. * bit 4 PAGE_PRIVATE_REF_RESOURCE
  1236. * bit 5- f2fs private data
  1237. *
  1238. * Layout B: lowest bit should be 0
  1239. * page.private is a wrapped pointer.
  1240. */
  1241. enum {
  1242. PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */
  1243. PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */
  1244. PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */
  1245. PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */
  1246. PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */
  1247. PAGE_PRIVATE_MAX
  1248. };
  1249. /* For compression */
  1250. enum compress_algorithm_type {
  1251. COMPRESS_LZO,
  1252. COMPRESS_LZ4,
  1253. COMPRESS_ZSTD,
  1254. COMPRESS_LZORLE,
  1255. COMPRESS_MAX,
  1256. };
  1257. enum compress_flag {
  1258. COMPRESS_CHKSUM,
  1259. COMPRESS_MAX_FLAG,
  1260. };
  1261. #define COMPRESS_WATERMARK 20
  1262. #define COMPRESS_PERCENT 20
  1263. #define COMPRESS_DATA_RESERVED_SIZE 4
  1264. struct compress_data {
  1265. __le32 clen; /* compressed data size */
  1266. __le32 chksum; /* compressed data chksum */
  1267. __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
  1268. u8 cdata[]; /* compressed data */
  1269. };
  1270. #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
  1271. #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
  1272. #define COMPRESS_LEVEL_OFFSET 8
  1273. /* compress context */
  1274. struct compress_ctx {
  1275. struct inode *inode; /* inode the context belong to */
  1276. pgoff_t cluster_idx; /* cluster index number */
  1277. unsigned int cluster_size; /* page count in cluster */
  1278. unsigned int log_cluster_size; /* log of cluster size */
  1279. struct page **rpages; /* pages store raw data in cluster */
  1280. unsigned int nr_rpages; /* total page number in rpages */
  1281. struct page **cpages; /* pages store compressed data in cluster */
  1282. unsigned int nr_cpages; /* total page number in cpages */
  1283. unsigned int valid_nr_cpages; /* valid page number in cpages */
  1284. void *rbuf; /* virtual mapped address on rpages */
  1285. struct compress_data *cbuf; /* virtual mapped address on cpages */
  1286. size_t rlen; /* valid data length in rbuf */
  1287. size_t clen; /* valid data length in cbuf */
  1288. void *private; /* payload buffer for specified compression algorithm */
  1289. void *private2; /* extra payload buffer */
  1290. };
  1291. /* compress context for write IO path */
  1292. struct compress_io_ctx {
  1293. u32 magic; /* magic number to indicate page is compressed */
  1294. struct inode *inode; /* inode the context belong to */
  1295. struct page **rpages; /* pages store raw data in cluster */
  1296. unsigned int nr_rpages; /* total page number in rpages */
  1297. atomic_t pending_pages; /* in-flight compressed page count */
  1298. };
  1299. /* Context for decompressing one cluster on the read IO path */
  1300. struct decompress_io_ctx {
  1301. u32 magic; /* magic number to indicate page is compressed */
  1302. struct inode *inode; /* inode the context belong to */
  1303. pgoff_t cluster_idx; /* cluster index number */
  1304. unsigned int cluster_size; /* page count in cluster */
  1305. unsigned int log_cluster_size; /* log of cluster size */
  1306. struct page **rpages; /* pages store raw data in cluster */
  1307. unsigned int nr_rpages; /* total page number in rpages */
  1308. struct page **cpages; /* pages store compressed data in cluster */
  1309. unsigned int nr_cpages; /* total page number in cpages */
  1310. struct page **tpages; /* temp pages to pad holes in cluster */
  1311. void *rbuf; /* virtual mapped address on rpages */
  1312. struct compress_data *cbuf; /* virtual mapped address on cpages */
  1313. size_t rlen; /* valid data length in rbuf */
  1314. size_t clen; /* valid data length in cbuf */
  1315. /*
  1316. * The number of compressed pages remaining to be read in this cluster.
  1317. * This is initially nr_cpages. It is decremented by 1 each time a page
  1318. * has been read (or failed to be read). When it reaches 0, the cluster
  1319. * is decompressed (or an error is reported).
  1320. *
  1321. * If an error occurs before all the pages have been submitted for I/O,
  1322. * then this will never reach 0. In this case the I/O submitter is
  1323. * responsible for calling f2fs_decompress_end_io() instead.
  1324. */
  1325. atomic_t remaining_pages;
  1326. /*
  1327. * Number of references to this decompress_io_ctx.
  1328. *
  1329. * One reference is held for I/O completion. This reference is dropped
  1330. * after the pagecache pages are updated and unlocked -- either after
  1331. * decompression (and verity if enabled), or after an error.
  1332. *
  1333. * In addition, each compressed page holds a reference while it is in a
  1334. * bio. These references are necessary prevent compressed pages from
  1335. * being freed while they are still in a bio.
  1336. */
  1337. refcount_t refcnt;
  1338. bool failed; /* IO error occurred before decompression? */
  1339. bool need_verity; /* need fs-verity verification after decompression? */
  1340. void *private; /* payload buffer for specified decompression algorithm */
  1341. void *private2; /* extra payload buffer */
  1342. struct work_struct verity_work; /* work to verify the decompressed pages */
  1343. struct work_struct free_work; /* work for late free this structure itself */
  1344. };
  1345. #define NULL_CLUSTER ((unsigned int)(~0))
  1346. #define MIN_COMPRESS_LOG_SIZE 2
  1347. #define MAX_COMPRESS_LOG_SIZE 8
  1348. #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size))
  1349. struct f2fs_sb_info {
  1350. struct super_block *sb; /* pointer to VFS super block */
  1351. struct proc_dir_entry *s_proc; /* proc entry */
  1352. struct f2fs_super_block *raw_super; /* raw super block pointer */
  1353. struct f2fs_rwsem sb_lock; /* lock for raw super block */
  1354. int valid_super_block; /* valid super block no */
  1355. unsigned long s_flag; /* flags for sbi */
  1356. struct mutex writepages; /* mutex for writepages() */
  1357. #ifdef CONFIG_BLK_DEV_ZONED
  1358. unsigned int blocks_per_blkz; /* F2FS blocks per zone */
  1359. #endif
  1360. /* for node-related operations */
  1361. struct f2fs_nm_info *nm_info; /* node manager */
  1362. struct inode *node_inode; /* cache node blocks */
  1363. /* for segment-related operations */
  1364. struct f2fs_sm_info *sm_info; /* segment manager */
  1365. /* for bio operations */
  1366. struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
  1367. /* keep migration IO order for LFS mode */
  1368. struct f2fs_rwsem io_order_lock;
  1369. mempool_t *write_io_dummy; /* Dummy pages */
  1370. pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */
  1371. int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */
  1372. /* for checkpoint */
  1373. struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
  1374. int cur_cp_pack; /* remain current cp pack */
  1375. spinlock_t cp_lock; /* for flag in ckpt */
  1376. struct inode *meta_inode; /* cache meta blocks */
  1377. struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
  1378. struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
  1379. struct f2fs_rwsem node_write; /* locking node writes */
  1380. struct f2fs_rwsem node_change; /* locking node change */
  1381. wait_queue_head_t cp_wait;
  1382. unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
  1383. long interval_time[MAX_TIME]; /* to store thresholds */
  1384. struct ckpt_req_control cprc_info; /* for checkpoint request control */
  1385. struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
  1386. spinlock_t fsync_node_lock; /* for node entry lock */
  1387. struct list_head fsync_node_list; /* node list head */
  1388. unsigned int fsync_seg_id; /* sequence id */
  1389. unsigned int fsync_node_num; /* number of node entries */
  1390. /* for orphan inode, use 0'th array */
  1391. unsigned int max_orphans; /* max orphan inodes */
  1392. /* for inode management */
  1393. struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
  1394. spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
  1395. struct mutex flush_lock; /* for flush exclusion */
  1396. /* for extent tree cache */
  1397. struct extent_tree_info extent_tree[NR_EXTENT_CACHES];
  1398. atomic64_t allocated_data_blocks; /* for block age extent_cache */
  1399. /* The threshold used for hot and warm data seperation*/
  1400. unsigned int hot_data_age_threshold;
  1401. unsigned int warm_data_age_threshold;
  1402. unsigned int last_age_weight;
  1403. /* basic filesystem units */
  1404. unsigned int log_sectors_per_block; /* log2 sectors per block */
  1405. unsigned int log_blocksize; /* log2 block size */
  1406. unsigned int blocksize; /* block size */
  1407. unsigned int root_ino_num; /* root inode number*/
  1408. unsigned int node_ino_num; /* node inode number*/
  1409. unsigned int meta_ino_num; /* meta inode number*/
  1410. unsigned int log_blocks_per_seg; /* log2 blocks per segment */
  1411. unsigned int blocks_per_seg; /* blocks per segment */
  1412. unsigned int unusable_blocks_per_sec; /* unusable blocks per section */
  1413. unsigned int segs_per_sec; /* segments per section */
  1414. unsigned int secs_per_zone; /* sections per zone */
  1415. unsigned int total_sections; /* total section count */
  1416. unsigned int total_node_count; /* total node block count */
  1417. unsigned int total_valid_node_count; /* valid node block count */
  1418. int dir_level; /* directory level */
  1419. bool readdir_ra; /* readahead inode in readdir */
  1420. u64 max_io_bytes; /* max io bytes to merge IOs */
  1421. block_t user_block_count; /* # of user blocks */
  1422. block_t total_valid_block_count; /* # of valid blocks */
  1423. block_t discard_blks; /* discard command candidats */
  1424. block_t last_valid_block_count; /* for recovery */
  1425. block_t reserved_blocks; /* configurable reserved blocks */
  1426. block_t current_reserved_blocks; /* current reserved blocks */
  1427. /* Additional tracking for no checkpoint mode */
  1428. block_t unusable_block_count; /* # of blocks saved by last cp */
  1429. unsigned int nquota_files; /* # of quota sysfile */
  1430. struct f2fs_rwsem quota_sem; /* blocking cp for flags */
  1431. /* # of pages, see count_type */
  1432. atomic_t nr_pages[NR_COUNT_TYPE];
  1433. /* # of allocated blocks */
  1434. struct percpu_counter alloc_valid_block_count;
  1435. /* # of node block writes as roll forward recovery */
  1436. struct percpu_counter rf_node_block_count;
  1437. /* writeback control */
  1438. atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
  1439. /* valid inode count */
  1440. struct percpu_counter total_valid_inode_count;
  1441. struct f2fs_mount_info mount_opt; /* mount options */
  1442. /* for cleaning operations */
  1443. struct f2fs_rwsem gc_lock; /*
  1444. * semaphore for GC, avoid
  1445. * race between GC and GC or CP
  1446. */
  1447. struct f2fs_gc_kthread *gc_thread; /* GC thread */
  1448. struct atgc_management am; /* atgc management */
  1449. unsigned int cur_victim_sec; /* current victim section num */
  1450. unsigned int gc_mode; /* current GC state */
  1451. unsigned int next_victim_seg[2]; /* next segment in victim section */
  1452. spinlock_t gc_remaining_trials_lock;
  1453. /* remaining trial count for GC_URGENT_* and GC_IDLE_* */
  1454. unsigned int gc_remaining_trials;
  1455. /* for skip statistic */
  1456. unsigned long long skipped_gc_rwsem; /* FG_GC only */
  1457. /* threshold for gc trials on pinned files */
  1458. u64 gc_pin_file_threshold;
  1459. struct f2fs_rwsem pin_sem;
  1460. /* maximum # of trials to find a victim segment for SSR and GC */
  1461. unsigned int max_victim_search;
  1462. /* migration granularity of garbage collection, unit: segment */
  1463. unsigned int migration_granularity;
  1464. /*
  1465. * for stat information.
  1466. * one is for the LFS mode, and the other is for the SSR mode.
  1467. */
  1468. #ifdef CONFIG_F2FS_STAT_FS
  1469. struct f2fs_stat_info *stat_info; /* FS status information */
  1470. atomic_t meta_count[META_MAX]; /* # of meta blocks */
  1471. unsigned int segment_count[2]; /* # of allocated segments */
  1472. unsigned int block_count[2]; /* # of allocated blocks */
  1473. atomic_t inplace_count; /* # of inplace update */
  1474. /* # of lookup extent cache */
  1475. atomic64_t total_hit_ext[NR_EXTENT_CACHES];
  1476. /* # of hit rbtree extent node */
  1477. atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
  1478. /* # of hit cached extent node */
  1479. atomic64_t read_hit_cached[NR_EXTENT_CACHES];
  1480. /* # of hit largest extent node in read extent cache */
  1481. atomic64_t read_hit_largest;
  1482. atomic_t inline_xattr; /* # of inline_xattr inodes */
  1483. atomic_t inline_inode; /* # of inline_data inodes */
  1484. atomic_t inline_dir; /* # of inline_dentry inodes */
  1485. atomic_t compr_inode; /* # of compressed inodes */
  1486. atomic64_t compr_blocks; /* # of compressed blocks */
  1487. atomic_t swapfile_inode; /* # of swapfile inodes */
  1488. atomic_t atomic_files; /* # of opened atomic file */
  1489. atomic_t max_aw_cnt; /* max # of atomic writes */
  1490. unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
  1491. unsigned int other_skip_bggc; /* skip background gc for other reasons */
  1492. unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
  1493. #endif
  1494. spinlock_t stat_lock; /* lock for stat operations */
  1495. /* to attach REQ_META|REQ_FUA flags */
  1496. unsigned int data_io_flag;
  1497. unsigned int node_io_flag;
  1498. /* For sysfs support */
  1499. struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
  1500. struct completion s_kobj_unregister;
  1501. struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
  1502. struct completion s_stat_kobj_unregister;
  1503. struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
  1504. struct completion s_feature_list_kobj_unregister;
  1505. /* For shrinker support */
  1506. struct list_head s_list;
  1507. struct mutex umount_mutex;
  1508. unsigned int shrinker_run_no;
  1509. /* For multi devices */
  1510. int s_ndevs; /* number of devices */
  1511. struct f2fs_dev_info *devs; /* for device list */
  1512. unsigned int dirty_device; /* for checkpoint data flush */
  1513. spinlock_t dev_lock; /* protect dirty_device */
  1514. bool aligned_blksize; /* all devices has the same logical blksize */
  1515. /* For write statistics */
  1516. u64 sectors_written_start;
  1517. u64 kbytes_written;
  1518. /* Reference to checksum algorithm driver via cryptoapi */
  1519. struct crypto_shash *s_chksum_driver;
  1520. /* Precomputed FS UUID checksum for seeding other checksums */
  1521. __u32 s_chksum_seed;
  1522. struct workqueue_struct *post_read_wq; /* post read workqueue */
  1523. unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
  1524. spinlock_t error_lock; /* protect errors array */
  1525. bool error_dirty; /* errors of sb is dirty */
  1526. struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
  1527. unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
  1528. /* For reclaimed segs statistics per each GC mode */
  1529. unsigned int gc_segment_mode; /* GC state for reclaimed segments */
  1530. unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */
  1531. unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */
  1532. int max_fragment_chunk; /* max chunk size for block fragmentation mode */
  1533. int max_fragment_hole; /* max hole size for block fragmentation mode */
  1534. /* For atomic write statistics */
  1535. atomic64_t current_atomic_write;
  1536. s64 peak_atomic_write;
  1537. u64 committed_atomic_block;
  1538. u64 revoked_atomic_block;
  1539. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1540. struct kmem_cache *page_array_slab; /* page array entry */
  1541. unsigned int page_array_slab_size; /* default page array slab size */
  1542. /* For runtime compression statistics */
  1543. u64 compr_written_block;
  1544. u64 compr_saved_block;
  1545. u32 compr_new_inode;
  1546. /* For compressed block cache */
  1547. struct inode *compress_inode; /* cache compressed blocks */
  1548. unsigned int compress_percent; /* cache page percentage */
  1549. unsigned int compress_watermark; /* cache page watermark */
  1550. atomic_t compress_page_hit; /* cache hit count */
  1551. #endif
  1552. #ifdef CONFIG_F2FS_IOSTAT
  1553. /* For app/fs IO statistics */
  1554. spinlock_t iostat_lock;
  1555. unsigned long long iostat_count[NR_IO_TYPE];
  1556. unsigned long long iostat_bytes[NR_IO_TYPE];
  1557. unsigned long long prev_iostat_bytes[NR_IO_TYPE];
  1558. bool iostat_enable;
  1559. unsigned long iostat_next_period;
  1560. unsigned int iostat_period_ms;
  1561. /* For io latency related statistics info in one iostat period */
  1562. spinlock_t iostat_lat_lock;
  1563. struct iostat_lat_info *iostat_io_lat;
  1564. #endif
  1565. };
  1566. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1567. #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \
  1568. __builtin_return_address(0))
  1569. static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type,
  1570. const char *func, const char *parent_func)
  1571. {
  1572. struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
  1573. if (!ffi->inject_rate)
  1574. return false;
  1575. if (!IS_FAULT_SET(ffi, type))
  1576. return false;
  1577. atomic_inc(&ffi->inject_ops);
  1578. if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
  1579. atomic_set(&ffi->inject_ops, 0);
  1580. printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",
  1581. KERN_INFO, sbi->sb->s_id, f2fs_fault_name[type],
  1582. func, parent_func);
  1583. return true;
  1584. }
  1585. return false;
  1586. }
  1587. #else
  1588. static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
  1589. {
  1590. return false;
  1591. }
  1592. #endif
  1593. /*
  1594. * Test if the mounted volume is a multi-device volume.
  1595. * - For a single regular disk volume, sbi->s_ndevs is 0.
  1596. * - For a single zoned disk volume, sbi->s_ndevs is 1.
  1597. * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
  1598. */
  1599. static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
  1600. {
  1601. return sbi->s_ndevs > 1;
  1602. }
  1603. static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
  1604. {
  1605. unsigned long now = jiffies;
  1606. sbi->last_time[type] = now;
  1607. /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
  1608. if (type == REQ_TIME) {
  1609. sbi->last_time[DISCARD_TIME] = now;
  1610. sbi->last_time[GC_TIME] = now;
  1611. }
  1612. }
  1613. static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
  1614. {
  1615. unsigned long interval = sbi->interval_time[type] * HZ;
  1616. return time_after(jiffies, sbi->last_time[type] + interval);
  1617. }
  1618. static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
  1619. int type)
  1620. {
  1621. unsigned long interval = sbi->interval_time[type] * HZ;
  1622. unsigned int wait_ms = 0;
  1623. long delta;
  1624. delta = (sbi->last_time[type] + interval) - jiffies;
  1625. if (delta > 0)
  1626. wait_ms = jiffies_to_msecs(delta);
  1627. return wait_ms;
  1628. }
  1629. /*
  1630. * Inline functions
  1631. */
  1632. static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
  1633. const void *address, unsigned int length)
  1634. {
  1635. struct {
  1636. struct shash_desc shash;
  1637. char ctx[4];
  1638. } desc;
  1639. int err;
  1640. BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
  1641. desc.shash.tfm = sbi->s_chksum_driver;
  1642. *(u32 *)desc.ctx = crc;
  1643. err = crypto_shash_update(&desc.shash, address, length);
  1644. BUG_ON(err);
  1645. return *(u32 *)desc.ctx;
  1646. }
  1647. static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
  1648. unsigned int length)
  1649. {
  1650. return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
  1651. }
  1652. static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
  1653. void *buf, size_t buf_size)
  1654. {
  1655. return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
  1656. }
  1657. static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
  1658. const void *address, unsigned int length)
  1659. {
  1660. return __f2fs_crc32(sbi, crc, address, length);
  1661. }
  1662. static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
  1663. {
  1664. return container_of(inode, struct f2fs_inode_info, vfs_inode);
  1665. }
  1666. static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
  1667. {
  1668. return sb->s_fs_info;
  1669. }
  1670. static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
  1671. {
  1672. return F2FS_SB(inode->i_sb);
  1673. }
  1674. static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
  1675. {
  1676. return F2FS_I_SB(mapping->host);
  1677. }
  1678. static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
  1679. {
  1680. return F2FS_M_SB(page_file_mapping(page));
  1681. }
  1682. static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
  1683. {
  1684. return (struct f2fs_super_block *)(sbi->raw_super);
  1685. }
  1686. static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
  1687. {
  1688. return (struct f2fs_checkpoint *)(sbi->ckpt);
  1689. }
  1690. static inline struct f2fs_node *F2FS_NODE(struct page *page)
  1691. {
  1692. return (struct f2fs_node *)page_address(page);
  1693. }
  1694. static inline struct f2fs_inode *F2FS_INODE(struct page *page)
  1695. {
  1696. return &((struct f2fs_node *)page_address(page))->i;
  1697. }
  1698. static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
  1699. {
  1700. return (struct f2fs_nm_info *)(sbi->nm_info);
  1701. }
  1702. static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
  1703. {
  1704. return (struct f2fs_sm_info *)(sbi->sm_info);
  1705. }
  1706. static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
  1707. {
  1708. return (struct sit_info *)(SM_I(sbi)->sit_info);
  1709. }
  1710. static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
  1711. {
  1712. return (struct free_segmap_info *)(SM_I(sbi)->free_info);
  1713. }
  1714. static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
  1715. {
  1716. return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
  1717. }
  1718. static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
  1719. {
  1720. return sbi->meta_inode->i_mapping;
  1721. }
  1722. static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
  1723. {
  1724. return sbi->node_inode->i_mapping;
  1725. }
  1726. static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
  1727. {
  1728. return test_bit(type, &sbi->s_flag);
  1729. }
  1730. static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
  1731. {
  1732. set_bit(type, &sbi->s_flag);
  1733. }
  1734. static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
  1735. {
  1736. clear_bit(type, &sbi->s_flag);
  1737. }
  1738. static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
  1739. {
  1740. return le64_to_cpu(cp->checkpoint_ver);
  1741. }
  1742. static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
  1743. {
  1744. if (type < F2FS_MAX_QUOTAS)
  1745. return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
  1746. return 0;
  1747. }
  1748. static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
  1749. {
  1750. size_t crc_offset = le32_to_cpu(cp->checksum_offset);
  1751. return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
  1752. }
  1753. static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  1754. {
  1755. unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  1756. return ckpt_flags & f;
  1757. }
  1758. static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
  1759. {
  1760. return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
  1761. }
  1762. static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  1763. {
  1764. unsigned int ckpt_flags;
  1765. ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  1766. ckpt_flags |= f;
  1767. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  1768. }
  1769. static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
  1770. {
  1771. unsigned long flags;
  1772. spin_lock_irqsave(&sbi->cp_lock, flags);
  1773. __set_ckpt_flags(F2FS_CKPT(sbi), f);
  1774. spin_unlock_irqrestore(&sbi->cp_lock, flags);
  1775. }
  1776. static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
  1777. {
  1778. unsigned int ckpt_flags;
  1779. ckpt_flags = le32_to_cpu(cp->ckpt_flags);
  1780. ckpt_flags &= (~f);
  1781. cp->ckpt_flags = cpu_to_le32(ckpt_flags);
  1782. }
  1783. static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
  1784. {
  1785. unsigned long flags;
  1786. spin_lock_irqsave(&sbi->cp_lock, flags);
  1787. __clear_ckpt_flags(F2FS_CKPT(sbi), f);
  1788. spin_unlock_irqrestore(&sbi->cp_lock, flags);
  1789. }
  1790. #define init_f2fs_rwsem(sem) \
  1791. do { \
  1792. static struct lock_class_key __key; \
  1793. \
  1794. __init_f2fs_rwsem((sem), #sem, &__key); \
  1795. } while (0)
  1796. static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem,
  1797. const char *sem_name, struct lock_class_key *key)
  1798. {
  1799. __init_rwsem(&sem->internal_rwsem, sem_name, key);
  1800. #ifdef CONFIG_F2FS_UNFAIR_RWSEM
  1801. init_waitqueue_head(&sem->read_waiters);
  1802. #endif
  1803. }
  1804. static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
  1805. {
  1806. return rwsem_is_locked(&sem->internal_rwsem);
  1807. }
  1808. static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
  1809. {
  1810. return rwsem_is_contended(&sem->internal_rwsem);
  1811. }
  1812. static inline void f2fs_down_read(struct f2fs_rwsem *sem)
  1813. {
  1814. #ifdef CONFIG_F2FS_UNFAIR_RWSEM
  1815. wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
  1816. #else
  1817. down_read(&sem->internal_rwsem);
  1818. #endif
  1819. }
  1820. static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
  1821. {
  1822. return down_read_trylock(&sem->internal_rwsem);
  1823. }
  1824. static inline void f2fs_up_read(struct f2fs_rwsem *sem)
  1825. {
  1826. up_read(&sem->internal_rwsem);
  1827. }
  1828. static inline void f2fs_down_write(struct f2fs_rwsem *sem)
  1829. {
  1830. down_write(&sem->internal_rwsem);
  1831. }
  1832. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  1833. static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
  1834. {
  1835. down_read_nested(&sem->internal_rwsem, subclass);
  1836. }
  1837. static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
  1838. {
  1839. down_write_nested(&sem->internal_rwsem, subclass);
  1840. }
  1841. #else
  1842. #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
  1843. #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
  1844. #endif
  1845. static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
  1846. {
  1847. return down_write_trylock(&sem->internal_rwsem);
  1848. }
  1849. static inline void f2fs_up_write(struct f2fs_rwsem *sem)
  1850. {
  1851. up_write(&sem->internal_rwsem);
  1852. #ifdef CONFIG_F2FS_UNFAIR_RWSEM
  1853. wake_up_all(&sem->read_waiters);
  1854. #endif
  1855. }
  1856. static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
  1857. {
  1858. f2fs_down_read(&sbi->cp_rwsem);
  1859. }
  1860. static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
  1861. {
  1862. if (time_to_inject(sbi, FAULT_LOCK_OP))
  1863. return 0;
  1864. return f2fs_down_read_trylock(&sbi->cp_rwsem);
  1865. }
  1866. static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
  1867. {
  1868. f2fs_up_read(&sbi->cp_rwsem);
  1869. }
  1870. static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
  1871. {
  1872. f2fs_down_write(&sbi->cp_rwsem);
  1873. }
  1874. static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
  1875. {
  1876. f2fs_up_write(&sbi->cp_rwsem);
  1877. }
  1878. static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
  1879. {
  1880. int reason = CP_SYNC;
  1881. if (test_opt(sbi, FASTBOOT))
  1882. reason = CP_FASTBOOT;
  1883. if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
  1884. reason = CP_UMOUNT;
  1885. return reason;
  1886. }
  1887. static inline bool __remain_node_summaries(int reason)
  1888. {
  1889. return (reason & (CP_UMOUNT | CP_FASTBOOT));
  1890. }
  1891. static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
  1892. {
  1893. return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
  1894. is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
  1895. }
  1896. /*
  1897. * Check whether the inode has blocks or not
  1898. */
  1899. static inline int F2FS_HAS_BLOCKS(struct inode *inode)
  1900. {
  1901. block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
  1902. return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
  1903. }
  1904. static inline bool f2fs_has_xattr_block(unsigned int ofs)
  1905. {
  1906. return ofs == XATTR_NODE_OFFSET;
  1907. }
  1908. static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
  1909. struct inode *inode, bool cap)
  1910. {
  1911. if (!inode)
  1912. return true;
  1913. if (!test_opt(sbi, RESERVE_ROOT))
  1914. return false;
  1915. if (IS_NOQUOTA(inode))
  1916. return true;
  1917. if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
  1918. return true;
  1919. if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
  1920. in_group_p(F2FS_OPTION(sbi).s_resgid))
  1921. return true;
  1922. if (cap && capable(CAP_SYS_RESOURCE))
  1923. return true;
  1924. return false;
  1925. }
  1926. static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
  1927. static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
  1928. struct inode *inode, blkcnt_t *count, bool partial)
  1929. {
  1930. blkcnt_t diff = 0, release = 0;
  1931. block_t avail_user_block_count;
  1932. int ret;
  1933. ret = dquot_reserve_block(inode, *count);
  1934. if (ret)
  1935. return ret;
  1936. if (time_to_inject(sbi, FAULT_BLOCK)) {
  1937. release = *count;
  1938. goto release_quota;
  1939. }
  1940. /*
  1941. * let's increase this in prior to actual block count change in order
  1942. * for f2fs_sync_file to avoid data races when deciding checkpoint.
  1943. */
  1944. percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
  1945. spin_lock(&sbi->stat_lock);
  1946. sbi->total_valid_block_count += (block_t)(*count);
  1947. avail_user_block_count = sbi->user_block_count -
  1948. sbi->current_reserved_blocks;
  1949. if (!__allow_reserved_blocks(sbi, inode, true))
  1950. avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
  1951. if (F2FS_IO_ALIGNED(sbi))
  1952. avail_user_block_count -= sbi->blocks_per_seg *
  1953. SM_I(sbi)->additional_reserved_segments;
  1954. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
  1955. if (avail_user_block_count > sbi->unusable_block_count)
  1956. avail_user_block_count -= sbi->unusable_block_count;
  1957. else
  1958. avail_user_block_count = 0;
  1959. }
  1960. if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
  1961. if (!partial) {
  1962. spin_unlock(&sbi->stat_lock);
  1963. goto enospc;
  1964. }
  1965. diff = sbi->total_valid_block_count - avail_user_block_count;
  1966. if (diff > *count)
  1967. diff = *count;
  1968. *count -= diff;
  1969. release = diff;
  1970. sbi->total_valid_block_count -= diff;
  1971. if (!*count) {
  1972. spin_unlock(&sbi->stat_lock);
  1973. goto enospc;
  1974. }
  1975. }
  1976. spin_unlock(&sbi->stat_lock);
  1977. if (unlikely(release)) {
  1978. percpu_counter_sub(&sbi->alloc_valid_block_count, release);
  1979. dquot_release_reservation_block(inode, release);
  1980. }
  1981. f2fs_i_blocks_write(inode, *count, true, true);
  1982. return 0;
  1983. enospc:
  1984. percpu_counter_sub(&sbi->alloc_valid_block_count, release);
  1985. release_quota:
  1986. dquot_release_reservation_block(inode, release);
  1987. return -ENOSPC;
  1988. }
  1989. __printf(2, 3)
  1990. void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
  1991. #define f2fs_err(sbi, fmt, ...) \
  1992. f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
  1993. #define f2fs_warn(sbi, fmt, ...) \
  1994. f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
  1995. #define f2fs_notice(sbi, fmt, ...) \
  1996. f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
  1997. #define f2fs_info(sbi, fmt, ...) \
  1998. f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
  1999. #define f2fs_debug(sbi, fmt, ...) \
  2000. f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
  2001. #define PAGE_PRIVATE_GET_FUNC(name, flagname) \
  2002. static inline bool page_private_##name(struct page *page) \
  2003. { \
  2004. return PagePrivate(page) && \
  2005. test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \
  2006. test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
  2007. }
  2008. #define PAGE_PRIVATE_SET_FUNC(name, flagname) \
  2009. static inline void set_page_private_##name(struct page *page) \
  2010. { \
  2011. if (!PagePrivate(page)) \
  2012. attach_page_private(page, (void *)0); \
  2013. set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \
  2014. set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
  2015. }
  2016. #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \
  2017. static inline void clear_page_private_##name(struct page *page) \
  2018. { \
  2019. clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \
  2020. if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \
  2021. detach_page_private(page); \
  2022. }
  2023. PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER);
  2024. PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE);
  2025. PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION);
  2026. PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE);
  2027. PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE);
  2028. PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE);
  2029. PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION);
  2030. PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE);
  2031. PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE);
  2032. PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE);
  2033. PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
  2034. PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
  2035. static inline unsigned long get_page_private_data(struct page *page)
  2036. {
  2037. unsigned long data = page_private(page);
  2038. if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
  2039. return 0;
  2040. return data >> PAGE_PRIVATE_MAX;
  2041. }
  2042. static inline void set_page_private_data(struct page *page, unsigned long data)
  2043. {
  2044. if (!PagePrivate(page))
  2045. attach_page_private(page, (void *)0);
  2046. set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
  2047. page_private(page) |= data << PAGE_PRIVATE_MAX;
  2048. }
  2049. static inline void clear_page_private_data(struct page *page)
  2050. {
  2051. page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0);
  2052. if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER))
  2053. detach_page_private(page);
  2054. }
  2055. static inline void clear_page_private_all(struct page *page)
  2056. {
  2057. clear_page_private_data(page);
  2058. clear_page_private_reference(page);
  2059. clear_page_private_gcing(page);
  2060. clear_page_private_inline(page);
  2061. f2fs_bug_on(F2FS_P_SB(page), page_private(page));
  2062. }
  2063. static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
  2064. struct inode *inode,
  2065. block_t count)
  2066. {
  2067. blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
  2068. spin_lock(&sbi->stat_lock);
  2069. f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
  2070. sbi->total_valid_block_count -= (block_t)count;
  2071. if (sbi->reserved_blocks &&
  2072. sbi->current_reserved_blocks < sbi->reserved_blocks)
  2073. sbi->current_reserved_blocks = min(sbi->reserved_blocks,
  2074. sbi->current_reserved_blocks + count);
  2075. spin_unlock(&sbi->stat_lock);
  2076. if (unlikely(inode->i_blocks < sectors)) {
  2077. f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
  2078. inode->i_ino,
  2079. (unsigned long long)inode->i_blocks,
  2080. (unsigned long long)sectors);
  2081. set_sbi_flag(sbi, SBI_NEED_FSCK);
  2082. return;
  2083. }
  2084. f2fs_i_blocks_write(inode, count, false, true);
  2085. }
  2086. static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
  2087. {
  2088. atomic_inc(&sbi->nr_pages[count_type]);
  2089. if (count_type == F2FS_DIRTY_DENTS ||
  2090. count_type == F2FS_DIRTY_NODES ||
  2091. count_type == F2FS_DIRTY_META ||
  2092. count_type == F2FS_DIRTY_QDATA ||
  2093. count_type == F2FS_DIRTY_IMETA)
  2094. set_sbi_flag(sbi, SBI_IS_DIRTY);
  2095. }
  2096. static inline void inode_inc_dirty_pages(struct inode *inode)
  2097. {
  2098. atomic_inc(&F2FS_I(inode)->dirty_pages);
  2099. inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
  2100. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
  2101. if (IS_NOQUOTA(inode))
  2102. inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
  2103. }
  2104. static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
  2105. {
  2106. atomic_dec(&sbi->nr_pages[count_type]);
  2107. }
  2108. static inline void inode_dec_dirty_pages(struct inode *inode)
  2109. {
  2110. if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
  2111. !S_ISLNK(inode->i_mode))
  2112. return;
  2113. atomic_dec(&F2FS_I(inode)->dirty_pages);
  2114. dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
  2115. F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
  2116. if (IS_NOQUOTA(inode))
  2117. dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
  2118. }
  2119. static inline void inc_atomic_write_cnt(struct inode *inode)
  2120. {
  2121. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2122. struct f2fs_inode_info *fi = F2FS_I(inode);
  2123. u64 current_write;
  2124. fi->atomic_write_cnt++;
  2125. atomic64_inc(&sbi->current_atomic_write);
  2126. current_write = atomic64_read(&sbi->current_atomic_write);
  2127. if (current_write > sbi->peak_atomic_write)
  2128. sbi->peak_atomic_write = current_write;
  2129. }
  2130. static inline void release_atomic_write_cnt(struct inode *inode)
  2131. {
  2132. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2133. struct f2fs_inode_info *fi = F2FS_I(inode);
  2134. atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write);
  2135. fi->atomic_write_cnt = 0;
  2136. }
  2137. static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
  2138. {
  2139. return atomic_read(&sbi->nr_pages[count_type]);
  2140. }
  2141. static inline int get_dirty_pages(struct inode *inode)
  2142. {
  2143. return atomic_read(&F2FS_I(inode)->dirty_pages);
  2144. }
  2145. static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
  2146. {
  2147. unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
  2148. unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
  2149. sbi->log_blocks_per_seg;
  2150. return segs / sbi->segs_per_sec;
  2151. }
  2152. static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
  2153. {
  2154. return sbi->total_valid_block_count;
  2155. }
  2156. static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
  2157. {
  2158. return sbi->discard_blks;
  2159. }
  2160. static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
  2161. {
  2162. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2163. /* return NAT or SIT bitmap */
  2164. if (flag == NAT_BITMAP)
  2165. return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  2166. else if (flag == SIT_BITMAP)
  2167. return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  2168. return 0;
  2169. }
  2170. static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
  2171. {
  2172. return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
  2173. }
  2174. static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
  2175. {
  2176. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  2177. void *tmp_ptr = &ckpt->sit_nat_version_bitmap;
  2178. int offset;
  2179. if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
  2180. offset = (flag == SIT_BITMAP) ?
  2181. le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
  2182. /*
  2183. * if large_nat_bitmap feature is enabled, leave checksum
  2184. * protection for all nat/sit bitmaps.
  2185. */
  2186. return tmp_ptr + offset + sizeof(__le32);
  2187. }
  2188. if (__cp_payload(sbi) > 0) {
  2189. if (flag == NAT_BITMAP)
  2190. return tmp_ptr;
  2191. else
  2192. return (unsigned char *)ckpt + F2FS_BLKSIZE;
  2193. } else {
  2194. offset = (flag == NAT_BITMAP) ?
  2195. le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
  2196. return tmp_ptr + offset;
  2197. }
  2198. }
  2199. static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
  2200. {
  2201. block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
  2202. if (sbi->cur_cp_pack == 2)
  2203. start_addr += sbi->blocks_per_seg;
  2204. return start_addr;
  2205. }
  2206. static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
  2207. {
  2208. block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
  2209. if (sbi->cur_cp_pack == 1)
  2210. start_addr += sbi->blocks_per_seg;
  2211. return start_addr;
  2212. }
  2213. static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
  2214. {
  2215. sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
  2216. }
  2217. static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
  2218. {
  2219. return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
  2220. }
  2221. extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
  2222. static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
  2223. struct inode *inode, bool is_inode)
  2224. {
  2225. block_t valid_block_count;
  2226. unsigned int valid_node_count, user_block_count;
  2227. int err;
  2228. if (is_inode) {
  2229. if (inode) {
  2230. err = dquot_alloc_inode(inode);
  2231. if (err)
  2232. return err;
  2233. }
  2234. } else {
  2235. err = dquot_reserve_block(inode, 1);
  2236. if (err)
  2237. return err;
  2238. }
  2239. if (time_to_inject(sbi, FAULT_BLOCK))
  2240. goto enospc;
  2241. spin_lock(&sbi->stat_lock);
  2242. valid_block_count = sbi->total_valid_block_count +
  2243. sbi->current_reserved_blocks + 1;
  2244. if (!__allow_reserved_blocks(sbi, inode, false))
  2245. valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
  2246. if (F2FS_IO_ALIGNED(sbi))
  2247. valid_block_count += sbi->blocks_per_seg *
  2248. SM_I(sbi)->additional_reserved_segments;
  2249. user_block_count = sbi->user_block_count;
  2250. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  2251. user_block_count -= sbi->unusable_block_count;
  2252. if (unlikely(valid_block_count > user_block_count)) {
  2253. spin_unlock(&sbi->stat_lock);
  2254. goto enospc;
  2255. }
  2256. valid_node_count = sbi->total_valid_node_count + 1;
  2257. if (unlikely(valid_node_count > sbi->total_node_count)) {
  2258. spin_unlock(&sbi->stat_lock);
  2259. goto enospc;
  2260. }
  2261. sbi->total_valid_node_count++;
  2262. sbi->total_valid_block_count++;
  2263. spin_unlock(&sbi->stat_lock);
  2264. if (inode) {
  2265. if (is_inode)
  2266. f2fs_mark_inode_dirty_sync(inode, true);
  2267. else
  2268. f2fs_i_blocks_write(inode, 1, true, true);
  2269. }
  2270. percpu_counter_inc(&sbi->alloc_valid_block_count);
  2271. return 0;
  2272. enospc:
  2273. if (is_inode) {
  2274. if (inode)
  2275. dquot_free_inode(inode);
  2276. } else {
  2277. dquot_release_reservation_block(inode, 1);
  2278. }
  2279. return -ENOSPC;
  2280. }
  2281. static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
  2282. struct inode *inode, bool is_inode)
  2283. {
  2284. spin_lock(&sbi->stat_lock);
  2285. if (unlikely(!sbi->total_valid_block_count ||
  2286. !sbi->total_valid_node_count)) {
  2287. f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u",
  2288. sbi->total_valid_block_count,
  2289. sbi->total_valid_node_count);
  2290. set_sbi_flag(sbi, SBI_NEED_FSCK);
  2291. } else {
  2292. sbi->total_valid_block_count--;
  2293. sbi->total_valid_node_count--;
  2294. }
  2295. if (sbi->reserved_blocks &&
  2296. sbi->current_reserved_blocks < sbi->reserved_blocks)
  2297. sbi->current_reserved_blocks++;
  2298. spin_unlock(&sbi->stat_lock);
  2299. if (is_inode) {
  2300. dquot_free_inode(inode);
  2301. } else {
  2302. if (unlikely(inode->i_blocks == 0)) {
  2303. f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
  2304. inode->i_ino,
  2305. (unsigned long long)inode->i_blocks);
  2306. set_sbi_flag(sbi, SBI_NEED_FSCK);
  2307. return;
  2308. }
  2309. f2fs_i_blocks_write(inode, 1, false, true);
  2310. }
  2311. }
  2312. static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
  2313. {
  2314. return sbi->total_valid_node_count;
  2315. }
  2316. static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
  2317. {
  2318. percpu_counter_inc(&sbi->total_valid_inode_count);
  2319. }
  2320. static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
  2321. {
  2322. percpu_counter_dec(&sbi->total_valid_inode_count);
  2323. }
  2324. static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
  2325. {
  2326. return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
  2327. }
  2328. static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
  2329. pgoff_t index, bool for_write)
  2330. {
  2331. struct page *page;
  2332. unsigned int flags;
  2333. if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
  2334. if (!for_write)
  2335. page = find_get_page_flags(mapping, index,
  2336. FGP_LOCK | FGP_ACCESSED);
  2337. else
  2338. page = find_lock_page(mapping, index);
  2339. if (page)
  2340. return page;
  2341. if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
  2342. return NULL;
  2343. }
  2344. if (!for_write)
  2345. return grab_cache_page(mapping, index);
  2346. flags = memalloc_nofs_save();
  2347. page = grab_cache_page_write_begin(mapping, index);
  2348. memalloc_nofs_restore(flags);
  2349. return page;
  2350. }
  2351. static inline struct page *f2fs_pagecache_get_page(
  2352. struct address_space *mapping, pgoff_t index,
  2353. int fgp_flags, gfp_t gfp_mask)
  2354. {
  2355. if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
  2356. return NULL;
  2357. return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
  2358. }
  2359. static inline void f2fs_put_page(struct page *page, int unlock)
  2360. {
  2361. if (!page)
  2362. return;
  2363. if (unlock) {
  2364. f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
  2365. unlock_page(page);
  2366. }
  2367. put_page(page);
  2368. }
  2369. static inline void f2fs_put_dnode(struct dnode_of_data *dn)
  2370. {
  2371. if (dn->node_page)
  2372. f2fs_put_page(dn->node_page, 1);
  2373. if (dn->inode_page && dn->node_page != dn->inode_page)
  2374. f2fs_put_page(dn->inode_page, 0);
  2375. dn->node_page = NULL;
  2376. dn->inode_page = NULL;
  2377. }
  2378. static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
  2379. size_t size)
  2380. {
  2381. return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
  2382. }
  2383. static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep,
  2384. gfp_t flags)
  2385. {
  2386. void *entry;
  2387. entry = kmem_cache_alloc(cachep, flags);
  2388. if (!entry)
  2389. entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
  2390. return entry;
  2391. }
  2392. static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
  2393. gfp_t flags, bool nofail, struct f2fs_sb_info *sbi)
  2394. {
  2395. if (nofail)
  2396. return f2fs_kmem_cache_alloc_nofail(cachep, flags);
  2397. if (time_to_inject(sbi, FAULT_SLAB_ALLOC))
  2398. return NULL;
  2399. return kmem_cache_alloc(cachep, flags);
  2400. }
  2401. static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type)
  2402. {
  2403. if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
  2404. get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
  2405. get_pages(sbi, F2FS_WB_CP_DATA) ||
  2406. get_pages(sbi, F2FS_DIO_READ) ||
  2407. get_pages(sbi, F2FS_DIO_WRITE))
  2408. return true;
  2409. if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
  2410. atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
  2411. return true;
  2412. if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
  2413. atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
  2414. return true;
  2415. return false;
  2416. }
  2417. static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
  2418. {
  2419. if (sbi->gc_mode == GC_URGENT_HIGH)
  2420. return true;
  2421. if (is_inflight_io(sbi, type))
  2422. return false;
  2423. if (sbi->gc_mode == GC_URGENT_MID)
  2424. return true;
  2425. if (sbi->gc_mode == GC_URGENT_LOW &&
  2426. (type == DISCARD_TIME || type == GC_TIME))
  2427. return true;
  2428. return f2fs_time_over(sbi, type);
  2429. }
  2430. static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
  2431. unsigned long index, void *item)
  2432. {
  2433. while (radix_tree_insert(root, index, item))
  2434. cond_resched();
  2435. }
  2436. #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
  2437. static inline bool IS_INODE(struct page *page)
  2438. {
  2439. struct f2fs_node *p = F2FS_NODE(page);
  2440. return RAW_IS_INODE(p);
  2441. }
  2442. static inline int offset_in_addr(struct f2fs_inode *i)
  2443. {
  2444. return (i->i_inline & F2FS_EXTRA_ATTR) ?
  2445. (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
  2446. }
  2447. static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
  2448. {
  2449. return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
  2450. }
  2451. static inline int f2fs_has_extra_attr(struct inode *inode);
  2452. static inline block_t data_blkaddr(struct inode *inode,
  2453. struct page *node_page, unsigned int offset)
  2454. {
  2455. struct f2fs_node *raw_node;
  2456. __le32 *addr_array;
  2457. int base = 0;
  2458. bool is_inode = IS_INODE(node_page);
  2459. raw_node = F2FS_NODE(node_page);
  2460. if (is_inode) {
  2461. if (!inode)
  2462. /* from GC path only */
  2463. base = offset_in_addr(&raw_node->i);
  2464. else if (f2fs_has_extra_attr(inode))
  2465. base = get_extra_isize(inode);
  2466. }
  2467. addr_array = blkaddr_in_node(raw_node);
  2468. return le32_to_cpu(addr_array[base + offset]);
  2469. }
  2470. static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
  2471. {
  2472. return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
  2473. }
  2474. static inline int f2fs_test_bit(unsigned int nr, char *addr)
  2475. {
  2476. int mask;
  2477. addr += (nr >> 3);
  2478. mask = BIT(7 - (nr & 0x07));
  2479. return mask & *addr;
  2480. }
  2481. static inline void f2fs_set_bit(unsigned int nr, char *addr)
  2482. {
  2483. int mask;
  2484. addr += (nr >> 3);
  2485. mask = BIT(7 - (nr & 0x07));
  2486. *addr |= mask;
  2487. }
  2488. static inline void f2fs_clear_bit(unsigned int nr, char *addr)
  2489. {
  2490. int mask;
  2491. addr += (nr >> 3);
  2492. mask = BIT(7 - (nr & 0x07));
  2493. *addr &= ~mask;
  2494. }
  2495. static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
  2496. {
  2497. int mask;
  2498. int ret;
  2499. addr += (nr >> 3);
  2500. mask = BIT(7 - (nr & 0x07));
  2501. ret = mask & *addr;
  2502. *addr |= mask;
  2503. return ret;
  2504. }
  2505. static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
  2506. {
  2507. int mask;
  2508. int ret;
  2509. addr += (nr >> 3);
  2510. mask = BIT(7 - (nr & 0x07));
  2511. ret = mask & *addr;
  2512. *addr &= ~mask;
  2513. return ret;
  2514. }
  2515. static inline void f2fs_change_bit(unsigned int nr, char *addr)
  2516. {
  2517. int mask;
  2518. addr += (nr >> 3);
  2519. mask = BIT(7 - (nr & 0x07));
  2520. *addr ^= mask;
  2521. }
  2522. /*
  2523. * On-disk inode flags (f2fs_inode::i_flags)
  2524. */
  2525. #define F2FS_COMPR_FL 0x00000004 /* Compress file */
  2526. #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
  2527. #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
  2528. #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
  2529. #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
  2530. #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
  2531. #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
  2532. #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
  2533. #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
  2534. #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
  2535. #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
  2536. /* Flags that should be inherited by new inodes from their parent. */
  2537. #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
  2538. F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
  2539. F2FS_CASEFOLD_FL)
  2540. /* Flags that are appropriate for regular files (all but dir-specific ones). */
  2541. #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
  2542. F2FS_CASEFOLD_FL))
  2543. /* Flags that are appropriate for non-directories/regular files. */
  2544. #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
  2545. static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
  2546. {
  2547. if (S_ISDIR(mode))
  2548. return flags;
  2549. else if (S_ISREG(mode))
  2550. return flags & F2FS_REG_FLMASK;
  2551. else
  2552. return flags & F2FS_OTHER_FLMASK;
  2553. }
  2554. static inline void __mark_inode_dirty_flag(struct inode *inode,
  2555. int flag, bool set)
  2556. {
  2557. switch (flag) {
  2558. case FI_INLINE_XATTR:
  2559. case FI_INLINE_DATA:
  2560. case FI_INLINE_DENTRY:
  2561. case FI_NEW_INODE:
  2562. if (set)
  2563. return;
  2564. fallthrough;
  2565. case FI_DATA_EXIST:
  2566. case FI_INLINE_DOTS:
  2567. case FI_PIN_FILE:
  2568. case FI_COMPRESS_RELEASED:
  2569. f2fs_mark_inode_dirty_sync(inode, true);
  2570. }
  2571. }
  2572. static inline void set_inode_flag(struct inode *inode, int flag)
  2573. {
  2574. set_bit(flag, F2FS_I(inode)->flags);
  2575. __mark_inode_dirty_flag(inode, flag, true);
  2576. }
  2577. static inline int is_inode_flag_set(struct inode *inode, int flag)
  2578. {
  2579. return test_bit(flag, F2FS_I(inode)->flags);
  2580. }
  2581. static inline void clear_inode_flag(struct inode *inode, int flag)
  2582. {
  2583. clear_bit(flag, F2FS_I(inode)->flags);
  2584. __mark_inode_dirty_flag(inode, flag, false);
  2585. }
  2586. static inline bool f2fs_verity_in_progress(struct inode *inode)
  2587. {
  2588. return IS_ENABLED(CONFIG_FS_VERITY) &&
  2589. is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
  2590. }
  2591. static inline void set_acl_inode(struct inode *inode, umode_t mode)
  2592. {
  2593. F2FS_I(inode)->i_acl_mode = mode;
  2594. set_inode_flag(inode, FI_ACL_MODE);
  2595. f2fs_mark_inode_dirty_sync(inode, false);
  2596. }
  2597. static inline void f2fs_i_links_write(struct inode *inode, bool inc)
  2598. {
  2599. if (inc)
  2600. inc_nlink(inode);
  2601. else
  2602. drop_nlink(inode);
  2603. f2fs_mark_inode_dirty_sync(inode, true);
  2604. }
  2605. static inline void f2fs_i_blocks_write(struct inode *inode,
  2606. block_t diff, bool add, bool claim)
  2607. {
  2608. bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
  2609. bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
  2610. /* add = 1, claim = 1 should be dquot_reserve_block in pair */
  2611. if (add) {
  2612. if (claim)
  2613. dquot_claim_block(inode, diff);
  2614. else
  2615. dquot_alloc_block_nofail(inode, diff);
  2616. } else {
  2617. dquot_free_block(inode, diff);
  2618. }
  2619. f2fs_mark_inode_dirty_sync(inode, true);
  2620. if (clean || recover)
  2621. set_inode_flag(inode, FI_AUTO_RECOVER);
  2622. }
  2623. static inline bool f2fs_is_atomic_file(struct inode *inode);
  2624. static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
  2625. {
  2626. bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
  2627. bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
  2628. if (i_size_read(inode) == i_size)
  2629. return;
  2630. i_size_write(inode, i_size);
  2631. if (f2fs_is_atomic_file(inode))
  2632. return;
  2633. f2fs_mark_inode_dirty_sync(inode, true);
  2634. if (clean || recover)
  2635. set_inode_flag(inode, FI_AUTO_RECOVER);
  2636. }
  2637. static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
  2638. {
  2639. F2FS_I(inode)->i_current_depth = depth;
  2640. f2fs_mark_inode_dirty_sync(inode, true);
  2641. }
  2642. static inline void f2fs_i_gc_failures_write(struct inode *inode,
  2643. unsigned int count)
  2644. {
  2645. F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
  2646. f2fs_mark_inode_dirty_sync(inode, true);
  2647. }
  2648. static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
  2649. {
  2650. F2FS_I(inode)->i_xattr_nid = xnid;
  2651. f2fs_mark_inode_dirty_sync(inode, true);
  2652. }
  2653. static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
  2654. {
  2655. F2FS_I(inode)->i_pino = pino;
  2656. f2fs_mark_inode_dirty_sync(inode, true);
  2657. }
  2658. static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
  2659. {
  2660. struct f2fs_inode_info *fi = F2FS_I(inode);
  2661. if (ri->i_inline & F2FS_INLINE_XATTR)
  2662. set_bit(FI_INLINE_XATTR, fi->flags);
  2663. if (ri->i_inline & F2FS_INLINE_DATA)
  2664. set_bit(FI_INLINE_DATA, fi->flags);
  2665. if (ri->i_inline & F2FS_INLINE_DENTRY)
  2666. set_bit(FI_INLINE_DENTRY, fi->flags);
  2667. if (ri->i_inline & F2FS_DATA_EXIST)
  2668. set_bit(FI_DATA_EXIST, fi->flags);
  2669. if (ri->i_inline & F2FS_INLINE_DOTS)
  2670. set_bit(FI_INLINE_DOTS, fi->flags);
  2671. if (ri->i_inline & F2FS_EXTRA_ATTR)
  2672. set_bit(FI_EXTRA_ATTR, fi->flags);
  2673. if (ri->i_inline & F2FS_PIN_FILE)
  2674. set_bit(FI_PIN_FILE, fi->flags);
  2675. if (ri->i_inline & F2FS_COMPRESS_RELEASED)
  2676. set_bit(FI_COMPRESS_RELEASED, fi->flags);
  2677. }
  2678. static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
  2679. {
  2680. ri->i_inline = 0;
  2681. if (is_inode_flag_set(inode, FI_INLINE_XATTR))
  2682. ri->i_inline |= F2FS_INLINE_XATTR;
  2683. if (is_inode_flag_set(inode, FI_INLINE_DATA))
  2684. ri->i_inline |= F2FS_INLINE_DATA;
  2685. if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
  2686. ri->i_inline |= F2FS_INLINE_DENTRY;
  2687. if (is_inode_flag_set(inode, FI_DATA_EXIST))
  2688. ri->i_inline |= F2FS_DATA_EXIST;
  2689. if (is_inode_flag_set(inode, FI_INLINE_DOTS))
  2690. ri->i_inline |= F2FS_INLINE_DOTS;
  2691. if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
  2692. ri->i_inline |= F2FS_EXTRA_ATTR;
  2693. if (is_inode_flag_set(inode, FI_PIN_FILE))
  2694. ri->i_inline |= F2FS_PIN_FILE;
  2695. if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
  2696. ri->i_inline |= F2FS_COMPRESS_RELEASED;
  2697. }
  2698. static inline int f2fs_has_extra_attr(struct inode *inode)
  2699. {
  2700. return is_inode_flag_set(inode, FI_EXTRA_ATTR);
  2701. }
  2702. static inline int f2fs_has_inline_xattr(struct inode *inode)
  2703. {
  2704. return is_inode_flag_set(inode, FI_INLINE_XATTR);
  2705. }
  2706. static inline int f2fs_compressed_file(struct inode *inode)
  2707. {
  2708. return S_ISREG(inode->i_mode) &&
  2709. is_inode_flag_set(inode, FI_COMPRESSED_FILE);
  2710. }
  2711. static inline bool f2fs_need_compress_data(struct inode *inode)
  2712. {
  2713. int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode;
  2714. if (!f2fs_compressed_file(inode))
  2715. return false;
  2716. if (compress_mode == COMPR_MODE_FS)
  2717. return true;
  2718. else if (compress_mode == COMPR_MODE_USER &&
  2719. is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
  2720. return true;
  2721. return false;
  2722. }
  2723. static inline unsigned int addrs_per_inode(struct inode *inode)
  2724. {
  2725. unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
  2726. get_inline_xattr_addrs(inode);
  2727. if (!f2fs_compressed_file(inode))
  2728. return addrs;
  2729. return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
  2730. }
  2731. static inline unsigned int addrs_per_block(struct inode *inode)
  2732. {
  2733. if (!f2fs_compressed_file(inode))
  2734. return DEF_ADDRS_PER_BLOCK;
  2735. return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
  2736. }
  2737. static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
  2738. {
  2739. struct f2fs_inode *ri = F2FS_INODE(page);
  2740. return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
  2741. get_inline_xattr_addrs(inode)]);
  2742. }
  2743. static inline int inline_xattr_size(struct inode *inode)
  2744. {
  2745. if (f2fs_has_inline_xattr(inode))
  2746. return get_inline_xattr_addrs(inode) * sizeof(__le32);
  2747. return 0;
  2748. }
  2749. /*
  2750. * Notice: check inline_data flag without inode page lock is unsafe.
  2751. * It could change at any time by f2fs_convert_inline_page().
  2752. */
  2753. static inline int f2fs_has_inline_data(struct inode *inode)
  2754. {
  2755. return is_inode_flag_set(inode, FI_INLINE_DATA);
  2756. }
  2757. static inline int f2fs_exist_data(struct inode *inode)
  2758. {
  2759. return is_inode_flag_set(inode, FI_DATA_EXIST);
  2760. }
  2761. static inline int f2fs_has_inline_dots(struct inode *inode)
  2762. {
  2763. return is_inode_flag_set(inode, FI_INLINE_DOTS);
  2764. }
  2765. static inline int f2fs_is_mmap_file(struct inode *inode)
  2766. {
  2767. return is_inode_flag_set(inode, FI_MMAP_FILE);
  2768. }
  2769. static inline bool f2fs_is_pinned_file(struct inode *inode)
  2770. {
  2771. return is_inode_flag_set(inode, FI_PIN_FILE);
  2772. }
  2773. static inline bool f2fs_is_atomic_file(struct inode *inode)
  2774. {
  2775. return is_inode_flag_set(inode, FI_ATOMIC_FILE);
  2776. }
  2777. static inline bool f2fs_is_cow_file(struct inode *inode)
  2778. {
  2779. return is_inode_flag_set(inode, FI_COW_FILE);
  2780. }
  2781. static inline bool f2fs_is_first_block_written(struct inode *inode)
  2782. {
  2783. return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
  2784. }
  2785. static inline bool f2fs_is_drop_cache(struct inode *inode)
  2786. {
  2787. return is_inode_flag_set(inode, FI_DROP_CACHE);
  2788. }
  2789. static inline void *inline_data_addr(struct inode *inode, struct page *page)
  2790. {
  2791. struct f2fs_inode *ri = F2FS_INODE(page);
  2792. int extra_size = get_extra_isize(inode);
  2793. return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
  2794. }
  2795. static inline int f2fs_has_inline_dentry(struct inode *inode)
  2796. {
  2797. return is_inode_flag_set(inode, FI_INLINE_DENTRY);
  2798. }
  2799. static inline int is_file(struct inode *inode, int type)
  2800. {
  2801. return F2FS_I(inode)->i_advise & type;
  2802. }
  2803. static inline void set_file(struct inode *inode, int type)
  2804. {
  2805. if (is_file(inode, type))
  2806. return;
  2807. F2FS_I(inode)->i_advise |= type;
  2808. f2fs_mark_inode_dirty_sync(inode, true);
  2809. }
  2810. static inline void clear_file(struct inode *inode, int type)
  2811. {
  2812. if (!is_file(inode, type))
  2813. return;
  2814. F2FS_I(inode)->i_advise &= ~type;
  2815. f2fs_mark_inode_dirty_sync(inode, true);
  2816. }
  2817. static inline bool f2fs_is_time_consistent(struct inode *inode)
  2818. {
  2819. if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
  2820. return false;
  2821. if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
  2822. return false;
  2823. if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
  2824. return false;
  2825. return true;
  2826. }
  2827. static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
  2828. {
  2829. bool ret;
  2830. if (dsync) {
  2831. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2832. spin_lock(&sbi->inode_lock[DIRTY_META]);
  2833. ret = list_empty(&F2FS_I(inode)->gdirty_list);
  2834. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  2835. return ret;
  2836. }
  2837. if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
  2838. file_keep_isize(inode) ||
  2839. i_size_read(inode) & ~PAGE_MASK)
  2840. return false;
  2841. if (!f2fs_is_time_consistent(inode))
  2842. return false;
  2843. spin_lock(&F2FS_I(inode)->i_size_lock);
  2844. ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
  2845. spin_unlock(&F2FS_I(inode)->i_size_lock);
  2846. return ret;
  2847. }
  2848. static inline bool f2fs_readonly(struct super_block *sb)
  2849. {
  2850. return sb_rdonly(sb);
  2851. }
  2852. static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
  2853. {
  2854. return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
  2855. }
  2856. static inline bool is_dot_dotdot(const u8 *name, size_t len)
  2857. {
  2858. if (len == 1 && name[0] == '.')
  2859. return true;
  2860. if (len == 2 && name[0] == '.' && name[1] == '.')
  2861. return true;
  2862. return false;
  2863. }
  2864. static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
  2865. size_t size, gfp_t flags)
  2866. {
  2867. if (time_to_inject(sbi, FAULT_KMALLOC))
  2868. return NULL;
  2869. return kmalloc(size, flags);
  2870. }
  2871. static inline void *f2fs_getname(struct f2fs_sb_info *sbi)
  2872. {
  2873. if (time_to_inject(sbi, FAULT_KMALLOC))
  2874. return NULL;
  2875. return __getname();
  2876. }
  2877. static inline void f2fs_putname(char *buf)
  2878. {
  2879. __putname(buf);
  2880. }
  2881. static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
  2882. size_t size, gfp_t flags)
  2883. {
  2884. return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
  2885. }
  2886. static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
  2887. size_t size, gfp_t flags)
  2888. {
  2889. if (time_to_inject(sbi, FAULT_KVMALLOC))
  2890. return NULL;
  2891. return kvmalloc(size, flags);
  2892. }
  2893. static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
  2894. size_t size, gfp_t flags)
  2895. {
  2896. return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
  2897. }
  2898. static inline int get_extra_isize(struct inode *inode)
  2899. {
  2900. return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
  2901. }
  2902. static inline int get_inline_xattr_addrs(struct inode *inode)
  2903. {
  2904. return F2FS_I(inode)->i_inline_xattr_size;
  2905. }
  2906. #define f2fs_get_inode_mode(i) \
  2907. ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
  2908. (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
  2909. #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
  2910. (offsetof(struct f2fs_inode, i_extra_end) - \
  2911. offsetof(struct f2fs_inode, i_extra_isize)) \
  2912. #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
  2913. #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
  2914. ((offsetof(typeof(*(f2fs_inode)), field) + \
  2915. sizeof((f2fs_inode)->field)) \
  2916. <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
  2917. #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
  2918. #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
  2919. bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
  2920. block_t blkaddr, int type);
  2921. static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
  2922. block_t blkaddr, int type)
  2923. {
  2924. if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
  2925. f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
  2926. blkaddr, type);
  2927. }
  2928. static inline bool __is_valid_data_blkaddr(block_t blkaddr)
  2929. {
  2930. if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
  2931. blkaddr == COMPRESS_ADDR)
  2932. return false;
  2933. return true;
  2934. }
  2935. /*
  2936. * file.c
  2937. */
  2938. int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
  2939. void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
  2940. int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
  2941. int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
  2942. int f2fs_truncate(struct inode *inode);
  2943. int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
  2944. struct kstat *stat, u32 request_mask, unsigned int flags);
  2945. int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
  2946. struct iattr *attr);
  2947. int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
  2948. void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
  2949. int f2fs_precache_extents(struct inode *inode);
  2950. int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
  2951. int f2fs_fileattr_set(struct user_namespace *mnt_userns,
  2952. struct dentry *dentry, struct fileattr *fa);
  2953. long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
  2954. long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
  2955. int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
  2956. int f2fs_pin_file_control(struct inode *inode, bool inc);
  2957. /*
  2958. * inode.c
  2959. */
  2960. void f2fs_set_inode_flags(struct inode *inode);
  2961. bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
  2962. void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
  2963. struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
  2964. struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
  2965. int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
  2966. void f2fs_update_inode(struct inode *inode, struct page *node_page);
  2967. void f2fs_update_inode_page(struct inode *inode);
  2968. int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
  2969. void f2fs_evict_inode(struct inode *inode);
  2970. void f2fs_handle_failed_inode(struct inode *inode);
  2971. /*
  2972. * namei.c
  2973. */
  2974. int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
  2975. bool hot, bool set);
  2976. struct dentry *f2fs_get_parent(struct dentry *child);
  2977. int f2fs_get_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
  2978. struct inode **new_inode);
  2979. /*
  2980. * dir.c
  2981. */
  2982. int f2fs_init_casefolded_name(const struct inode *dir,
  2983. struct f2fs_filename *fname);
  2984. int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
  2985. int lookup, struct f2fs_filename *fname);
  2986. int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
  2987. struct f2fs_filename *fname);
  2988. void f2fs_free_filename(struct f2fs_filename *fname);
  2989. struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
  2990. const struct f2fs_filename *fname, int *max_slots);
  2991. int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
  2992. unsigned int start_pos, struct fscrypt_str *fstr);
  2993. void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
  2994. struct f2fs_dentry_ptr *d);
  2995. struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
  2996. const struct f2fs_filename *fname, struct page *dpage);
  2997. void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
  2998. unsigned int current_depth);
  2999. int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
  3000. void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
  3001. struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
  3002. const struct f2fs_filename *fname,
  3003. struct page **res_page);
  3004. struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
  3005. const struct qstr *child, struct page **res_page);
  3006. struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
  3007. ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
  3008. struct page **page);
  3009. void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
  3010. struct page *page, struct inode *inode);
  3011. bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
  3012. const struct f2fs_filename *fname);
  3013. void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
  3014. const struct fscrypt_str *name, f2fs_hash_t name_hash,
  3015. unsigned int bit_pos);
  3016. int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
  3017. struct inode *inode, nid_t ino, umode_t mode);
  3018. int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
  3019. struct inode *inode, nid_t ino, umode_t mode);
  3020. int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
  3021. struct inode *inode, nid_t ino, umode_t mode);
  3022. void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
  3023. struct inode *dir, struct inode *inode);
  3024. int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
  3025. bool f2fs_empty_dir(struct inode *dir);
  3026. static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
  3027. {
  3028. if (fscrypt_is_nokey_name(dentry))
  3029. return -ENOKEY;
  3030. return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
  3031. inode, inode->i_ino, inode->i_mode);
  3032. }
  3033. /*
  3034. * super.c
  3035. */
  3036. int f2fs_inode_dirtied(struct inode *inode, bool sync);
  3037. void f2fs_inode_synced(struct inode *inode);
  3038. int f2fs_dquot_initialize(struct inode *inode);
  3039. int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
  3040. int f2fs_quota_sync(struct super_block *sb, int type);
  3041. loff_t max_file_blocks(struct inode *inode);
  3042. void f2fs_quota_off_umount(struct super_block *sb);
  3043. void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason);
  3044. void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
  3045. void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
  3046. int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
  3047. int f2fs_sync_fs(struct super_block *sb, int sync);
  3048. int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
  3049. /*
  3050. * hash.c
  3051. */
  3052. void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
  3053. /*
  3054. * node.c
  3055. */
  3056. struct node_info;
  3057. int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
  3058. bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
  3059. bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
  3060. void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
  3061. void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
  3062. void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
  3063. int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
  3064. bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
  3065. bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
  3066. int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
  3067. struct node_info *ni, bool checkpoint_context);
  3068. pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
  3069. int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
  3070. int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
  3071. int f2fs_truncate_xattr_node(struct inode *inode);
  3072. int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
  3073. unsigned int seq_id);
  3074. bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi);
  3075. int f2fs_remove_inode_page(struct inode *inode);
  3076. struct page *f2fs_new_inode_page(struct inode *inode);
  3077. struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
  3078. void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
  3079. struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
  3080. struct page *f2fs_get_node_page_ra(struct page *parent, int start);
  3081. int f2fs_move_node_page(struct page *node_page, int gc_type);
  3082. void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
  3083. int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
  3084. struct writeback_control *wbc, bool atomic,
  3085. unsigned int *seq_id);
  3086. int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
  3087. struct writeback_control *wbc,
  3088. bool do_balance, enum iostat_type io_type);
  3089. int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
  3090. bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
  3091. void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
  3092. void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
  3093. int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
  3094. int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
  3095. int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
  3096. int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
  3097. int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
  3098. unsigned int segno, struct f2fs_summary_block *sum);
  3099. void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi);
  3100. int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
  3101. int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
  3102. void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
  3103. int __init f2fs_create_node_manager_caches(void);
  3104. void f2fs_destroy_node_manager_caches(void);
  3105. /*
  3106. * segment.c
  3107. */
  3108. bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
  3109. int f2fs_commit_atomic_write(struct inode *inode);
  3110. void f2fs_abort_atomic_write(struct inode *inode, bool clean);
  3111. void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
  3112. void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
  3113. int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
  3114. int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
  3115. int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
  3116. void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
  3117. void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
  3118. bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
  3119. int f2fs_start_discard_thread(struct f2fs_sb_info *sbi);
  3120. void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
  3121. void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
  3122. bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
  3123. void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
  3124. struct cp_control *cpc);
  3125. void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
  3126. block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
  3127. int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
  3128. void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
  3129. int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
  3130. bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
  3131. void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
  3132. void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
  3133. void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
  3134. void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
  3135. unsigned int *newseg, bool new_sec, int dir);
  3136. void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
  3137. unsigned int start, unsigned int end);
  3138. void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force);
  3139. void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
  3140. int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
  3141. bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
  3142. struct cp_control *cpc);
  3143. struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
  3144. void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
  3145. block_t blk_addr);
  3146. void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
  3147. enum iostat_type io_type);
  3148. void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
  3149. void f2fs_outplace_write_data(struct dnode_of_data *dn,
  3150. struct f2fs_io_info *fio);
  3151. int f2fs_inplace_write_data(struct f2fs_io_info *fio);
  3152. void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
  3153. block_t old_blkaddr, block_t new_blkaddr,
  3154. bool recover_curseg, bool recover_newaddr,
  3155. bool from_gc);
  3156. void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
  3157. block_t old_addr, block_t new_addr,
  3158. unsigned char version, bool recover_curseg,
  3159. bool recover_newaddr);
  3160. void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
  3161. block_t old_blkaddr, block_t *new_blkaddr,
  3162. struct f2fs_summary *sum, int type,
  3163. struct f2fs_io_info *fio);
  3164. void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
  3165. block_t blkaddr, unsigned int blkcnt);
  3166. void f2fs_wait_on_page_writeback(struct page *page,
  3167. enum page_type type, bool ordered, bool locked);
  3168. void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
  3169. void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
  3170. block_t len);
  3171. void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
  3172. void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
  3173. int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
  3174. unsigned int val, int alloc);
  3175. void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
  3176. int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
  3177. int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
  3178. int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
  3179. void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
  3180. int __init f2fs_create_segment_manager_caches(void);
  3181. void f2fs_destroy_segment_manager_caches(void);
  3182. int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
  3183. unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
  3184. unsigned int segno);
  3185. unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
  3186. unsigned int segno);
  3187. #define DEF_FRAGMENT_SIZE 4
  3188. #define MIN_FRAGMENT_SIZE 1
  3189. #define MAX_FRAGMENT_SIZE 512
  3190. static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
  3191. {
  3192. return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
  3193. F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
  3194. }
  3195. /*
  3196. * checkpoint.c
  3197. */
  3198. void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
  3199. unsigned char reason);
  3200. void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
  3201. struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
  3202. struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
  3203. struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
  3204. struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
  3205. bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
  3206. block_t blkaddr, int type);
  3207. bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
  3208. block_t blkaddr, int type);
  3209. int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
  3210. int type, bool sync);
  3211. void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
  3212. unsigned int ra_blocks);
  3213. long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
  3214. long nr_to_write, enum iostat_type io_type);
  3215. void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
  3216. void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
  3217. void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
  3218. bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
  3219. void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
  3220. unsigned int devidx, int type);
  3221. bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
  3222. unsigned int devidx, int type);
  3223. int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
  3224. void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
  3225. void f2fs_add_orphan_inode(struct inode *inode);
  3226. void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
  3227. int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
  3228. int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
  3229. void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio);
  3230. void f2fs_remove_dirty_inode(struct inode *inode);
  3231. int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
  3232. bool from_cp);
  3233. void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
  3234. u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi);
  3235. int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
  3236. void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
  3237. int __init f2fs_create_checkpoint_caches(void);
  3238. void f2fs_destroy_checkpoint_caches(void);
  3239. int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
  3240. int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
  3241. void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
  3242. void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
  3243. /*
  3244. * data.c
  3245. */
  3246. int __init f2fs_init_bioset(void);
  3247. void f2fs_destroy_bioset(void);
  3248. bool f2fs_is_cp_guaranteed(struct page *page);
  3249. int f2fs_init_bio_entry_cache(void);
  3250. void f2fs_destroy_bio_entry_cache(void);
  3251. void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
  3252. enum page_type type);
  3253. int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi);
  3254. void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
  3255. void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
  3256. struct inode *inode, struct page *page,
  3257. nid_t ino, enum page_type type);
  3258. void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
  3259. struct bio **bio, struct page *page);
  3260. void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
  3261. int f2fs_submit_page_bio(struct f2fs_io_info *fio);
  3262. int f2fs_merge_page_bio(struct f2fs_io_info *fio);
  3263. void f2fs_submit_page_write(struct f2fs_io_info *fio);
  3264. struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
  3265. block_t blk_addr, sector_t *sector);
  3266. int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
  3267. void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
  3268. void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
  3269. int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
  3270. int f2fs_reserve_new_block(struct dnode_of_data *dn);
  3271. int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index);
  3272. int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
  3273. struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
  3274. blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs);
  3275. struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
  3276. pgoff_t *next_pgofs);
  3277. struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
  3278. bool for_write);
  3279. struct page *f2fs_get_new_data_page(struct inode *inode,
  3280. struct page *ipage, pgoff_t index, bool new_i_size);
  3281. int f2fs_do_write_data_page(struct f2fs_io_info *fio);
  3282. int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag);
  3283. int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  3284. u64 start, u64 len);
  3285. int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
  3286. bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
  3287. bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
  3288. int f2fs_write_single_data_page(struct page *page, int *submitted,
  3289. struct bio **bio, sector_t *last_block,
  3290. struct writeback_control *wbc,
  3291. enum iostat_type io_type,
  3292. int compr_blocks, bool allow_balance);
  3293. void f2fs_write_failed(struct inode *inode, loff_t to);
  3294. void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
  3295. bool f2fs_release_folio(struct folio *folio, gfp_t wait);
  3296. bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
  3297. void f2fs_clear_page_cache_dirty_tag(struct page *page);
  3298. int f2fs_init_post_read_processing(void);
  3299. void f2fs_destroy_post_read_processing(void);
  3300. int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
  3301. void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
  3302. extern const struct iomap_ops f2fs_iomap_ops;
  3303. /*
  3304. * gc.c
  3305. */
  3306. int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
  3307. void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
  3308. block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
  3309. int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control);
  3310. void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
  3311. int f2fs_resize_fs(struct file *filp, __u64 block_count);
  3312. int __init f2fs_create_garbage_collection_cache(void);
  3313. void f2fs_destroy_garbage_collection_cache(void);
  3314. /* victim selection function for cleaning and SSR */
  3315. int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
  3316. int gc_type, int type, char alloc_mode,
  3317. unsigned long long age);
  3318. /*
  3319. * recovery.c
  3320. */
  3321. int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
  3322. bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
  3323. int __init f2fs_create_recovery_cache(void);
  3324. void f2fs_destroy_recovery_cache(void);
  3325. /*
  3326. * debug.c
  3327. */
  3328. #ifdef CONFIG_F2FS_STAT_FS
  3329. struct f2fs_stat_info {
  3330. struct list_head stat_list;
  3331. struct f2fs_sb_info *sbi;
  3332. int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
  3333. int main_area_segs, main_area_sections, main_area_zones;
  3334. unsigned long long hit_cached[NR_EXTENT_CACHES];
  3335. unsigned long long hit_rbtree[NR_EXTENT_CACHES];
  3336. unsigned long long total_ext[NR_EXTENT_CACHES];
  3337. unsigned long long hit_total[NR_EXTENT_CACHES];
  3338. int ext_tree[NR_EXTENT_CACHES];
  3339. int zombie_tree[NR_EXTENT_CACHES];
  3340. int ext_node[NR_EXTENT_CACHES];
  3341. /* to count memory footprint */
  3342. unsigned long long ext_mem[NR_EXTENT_CACHES];
  3343. /* for read extent cache */
  3344. unsigned long long hit_largest;
  3345. /* for block age extent cache */
  3346. unsigned long long allocated_data_blocks;
  3347. int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
  3348. int ndirty_data, ndirty_qdata;
  3349. unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
  3350. int nats, dirty_nats, sits, dirty_sits;
  3351. int free_nids, avail_nids, alloc_nids;
  3352. int total_count, utilization;
  3353. int bg_gc, nr_wb_cp_data, nr_wb_data;
  3354. int nr_rd_data, nr_rd_node, nr_rd_meta;
  3355. int nr_dio_read, nr_dio_write;
  3356. unsigned int io_skip_bggc, other_skip_bggc;
  3357. int nr_flushing, nr_flushed, flush_list_empty;
  3358. int nr_discarding, nr_discarded;
  3359. int nr_discard_cmd;
  3360. unsigned int undiscard_blks;
  3361. int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
  3362. unsigned int cur_ckpt_time, peak_ckpt_time;
  3363. int inline_xattr, inline_inode, inline_dir, append, update, orphans;
  3364. int compr_inode, swapfile_inode;
  3365. unsigned long long compr_blocks;
  3366. int aw_cnt, max_aw_cnt;
  3367. unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
  3368. unsigned int bimodal, avg_vblocks;
  3369. int util_free, util_valid, util_invalid;
  3370. int rsvd_segs, overp_segs;
  3371. int dirty_count, node_pages, meta_pages, compress_pages;
  3372. int compress_page_hit;
  3373. int prefree_count, call_count, cp_count, bg_cp_count;
  3374. int tot_segs, node_segs, data_segs, free_segs, free_secs;
  3375. int bg_node_segs, bg_data_segs;
  3376. int tot_blks, data_blks, node_blks;
  3377. int bg_data_blks, bg_node_blks;
  3378. int curseg[NR_CURSEG_TYPE];
  3379. int cursec[NR_CURSEG_TYPE];
  3380. int curzone[NR_CURSEG_TYPE];
  3381. unsigned int dirty_seg[NR_CURSEG_TYPE];
  3382. unsigned int full_seg[NR_CURSEG_TYPE];
  3383. unsigned int valid_blks[NR_CURSEG_TYPE];
  3384. unsigned int meta_count[META_MAX];
  3385. unsigned int segment_count[2];
  3386. unsigned int block_count[2];
  3387. unsigned int inplace_count;
  3388. unsigned long long base_mem, cache_mem, page_mem;
  3389. };
  3390. static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
  3391. {
  3392. return (struct f2fs_stat_info *)sbi->stat_info;
  3393. }
  3394. #define stat_inc_cp_count(si) ((si)->cp_count++)
  3395. #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
  3396. #define stat_inc_call_count(si) ((si)->call_count++)
  3397. #define stat_inc_bggc_count(si) ((si)->bg_gc++)
  3398. #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
  3399. #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
  3400. #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
  3401. #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
  3402. #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type]))
  3403. #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type]))
  3404. #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
  3405. #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type]))
  3406. #define stat_inc_inline_xattr(inode) \
  3407. do { \
  3408. if (f2fs_has_inline_xattr(inode)) \
  3409. (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
  3410. } while (0)
  3411. #define stat_dec_inline_xattr(inode) \
  3412. do { \
  3413. if (f2fs_has_inline_xattr(inode)) \
  3414. (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
  3415. } while (0)
  3416. #define stat_inc_inline_inode(inode) \
  3417. do { \
  3418. if (f2fs_has_inline_data(inode)) \
  3419. (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
  3420. } while (0)
  3421. #define stat_dec_inline_inode(inode) \
  3422. do { \
  3423. if (f2fs_has_inline_data(inode)) \
  3424. (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
  3425. } while (0)
  3426. #define stat_inc_inline_dir(inode) \
  3427. do { \
  3428. if (f2fs_has_inline_dentry(inode)) \
  3429. (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
  3430. } while (0)
  3431. #define stat_dec_inline_dir(inode) \
  3432. do { \
  3433. if (f2fs_has_inline_dentry(inode)) \
  3434. (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
  3435. } while (0)
  3436. #define stat_inc_compr_inode(inode) \
  3437. do { \
  3438. if (f2fs_compressed_file(inode)) \
  3439. (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
  3440. } while (0)
  3441. #define stat_dec_compr_inode(inode) \
  3442. do { \
  3443. if (f2fs_compressed_file(inode)) \
  3444. (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
  3445. } while (0)
  3446. #define stat_add_compr_blocks(inode, blocks) \
  3447. (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
  3448. #define stat_sub_compr_blocks(inode, blocks) \
  3449. (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
  3450. #define stat_inc_swapfile_inode(inode) \
  3451. (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode))
  3452. #define stat_dec_swapfile_inode(inode) \
  3453. (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode))
  3454. #define stat_inc_atomic_inode(inode) \
  3455. (atomic_inc(&F2FS_I_SB(inode)->atomic_files))
  3456. #define stat_dec_atomic_inode(inode) \
  3457. (atomic_dec(&F2FS_I_SB(inode)->atomic_files))
  3458. #define stat_inc_meta_count(sbi, blkaddr) \
  3459. do { \
  3460. if (blkaddr < SIT_I(sbi)->sit_base_addr) \
  3461. atomic_inc(&(sbi)->meta_count[META_CP]); \
  3462. else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
  3463. atomic_inc(&(sbi)->meta_count[META_SIT]); \
  3464. else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
  3465. atomic_inc(&(sbi)->meta_count[META_NAT]); \
  3466. else if (blkaddr < SM_I(sbi)->main_blkaddr) \
  3467. atomic_inc(&(sbi)->meta_count[META_SSA]); \
  3468. } while (0)
  3469. #define stat_inc_seg_type(sbi, curseg) \
  3470. ((sbi)->segment_count[(curseg)->alloc_type]++)
  3471. #define stat_inc_block_count(sbi, curseg) \
  3472. ((sbi)->block_count[(curseg)->alloc_type]++)
  3473. #define stat_inc_inplace_blocks(sbi) \
  3474. (atomic_inc(&(sbi)->inplace_count))
  3475. #define stat_update_max_atomic_write(inode) \
  3476. do { \
  3477. int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \
  3478. int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
  3479. if (cur > max) \
  3480. atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
  3481. } while (0)
  3482. #define stat_inc_seg_count(sbi, type, gc_type) \
  3483. do { \
  3484. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  3485. si->tot_segs++; \
  3486. if ((type) == SUM_TYPE_DATA) { \
  3487. si->data_segs++; \
  3488. si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
  3489. } else { \
  3490. si->node_segs++; \
  3491. si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
  3492. } \
  3493. } while (0)
  3494. #define stat_inc_tot_blk_count(si, blks) \
  3495. ((si)->tot_blks += (blks))
  3496. #define stat_inc_data_blk_count(sbi, blks, gc_type) \
  3497. do { \
  3498. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  3499. stat_inc_tot_blk_count(si, blks); \
  3500. si->data_blks += (blks); \
  3501. si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
  3502. } while (0)
  3503. #define stat_inc_node_blk_count(sbi, blks, gc_type) \
  3504. do { \
  3505. struct f2fs_stat_info *si = F2FS_STAT(sbi); \
  3506. stat_inc_tot_blk_count(si, blks); \
  3507. si->node_blks += (blks); \
  3508. si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
  3509. } while (0)
  3510. int f2fs_build_stats(struct f2fs_sb_info *sbi);
  3511. void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
  3512. void __init f2fs_create_root_stats(void);
  3513. void f2fs_destroy_root_stats(void);
  3514. void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
  3515. #else
  3516. #define stat_inc_cp_count(si) do { } while (0)
  3517. #define stat_inc_bg_cp_count(si) do { } while (0)
  3518. #define stat_inc_call_count(si) do { } while (0)
  3519. #define stat_inc_bggc_count(si) do { } while (0)
  3520. #define stat_io_skip_bggc_count(sbi) do { } while (0)
  3521. #define stat_other_skip_bggc_count(sbi) do { } while (0)
  3522. #define stat_inc_dirty_inode(sbi, type) do { } while (0)
  3523. #define stat_dec_dirty_inode(sbi, type) do { } while (0)
  3524. #define stat_inc_total_hit(sbi, type) do { } while (0)
  3525. #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0)
  3526. #define stat_inc_largest_node_hit(sbi) do { } while (0)
  3527. #define stat_inc_cached_node_hit(sbi, type) do { } while (0)
  3528. #define stat_inc_inline_xattr(inode) do { } while (0)
  3529. #define stat_dec_inline_xattr(inode) do { } while (0)
  3530. #define stat_inc_inline_inode(inode) do { } while (0)
  3531. #define stat_dec_inline_inode(inode) do { } while (0)
  3532. #define stat_inc_inline_dir(inode) do { } while (0)
  3533. #define stat_dec_inline_dir(inode) do { } while (0)
  3534. #define stat_inc_compr_inode(inode) do { } while (0)
  3535. #define stat_dec_compr_inode(inode) do { } while (0)
  3536. #define stat_add_compr_blocks(inode, blocks) do { } while (0)
  3537. #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
  3538. #define stat_inc_swapfile_inode(inode) do { } while (0)
  3539. #define stat_dec_swapfile_inode(inode) do { } while (0)
  3540. #define stat_inc_atomic_inode(inode) do { } while (0)
  3541. #define stat_dec_atomic_inode(inode) do { } while (0)
  3542. #define stat_update_max_atomic_write(inode) do { } while (0)
  3543. #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
  3544. #define stat_inc_seg_type(sbi, curseg) do { } while (0)
  3545. #define stat_inc_block_count(sbi, curseg) do { } while (0)
  3546. #define stat_inc_inplace_blocks(sbi) do { } while (0)
  3547. #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
  3548. #define stat_inc_tot_blk_count(si, blks) do { } while (0)
  3549. #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
  3550. #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
  3551. static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
  3552. static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
  3553. static inline void __init f2fs_create_root_stats(void) { }
  3554. static inline void f2fs_destroy_root_stats(void) { }
  3555. static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
  3556. #endif
  3557. extern const struct file_operations f2fs_dir_operations;
  3558. extern const struct file_operations f2fs_file_operations;
  3559. extern const struct inode_operations f2fs_file_inode_operations;
  3560. extern const struct address_space_operations f2fs_dblock_aops;
  3561. extern const struct address_space_operations f2fs_node_aops;
  3562. extern const struct address_space_operations f2fs_meta_aops;
  3563. extern const struct inode_operations f2fs_dir_inode_operations;
  3564. extern const struct inode_operations f2fs_symlink_inode_operations;
  3565. extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
  3566. extern const struct inode_operations f2fs_special_inode_operations;
  3567. extern struct kmem_cache *f2fs_inode_entry_slab;
  3568. /*
  3569. * inline.c
  3570. */
  3571. bool f2fs_may_inline_data(struct inode *inode);
  3572. bool f2fs_sanity_check_inline_data(struct inode *inode);
  3573. bool f2fs_may_inline_dentry(struct inode *inode);
  3574. void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
  3575. void f2fs_truncate_inline_inode(struct inode *inode,
  3576. struct page *ipage, u64 from);
  3577. int f2fs_read_inline_data(struct inode *inode, struct page *page);
  3578. int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
  3579. int f2fs_convert_inline_inode(struct inode *inode);
  3580. int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
  3581. int f2fs_write_inline_data(struct inode *inode, struct page *page);
  3582. int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
  3583. struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
  3584. const struct f2fs_filename *fname,
  3585. struct page **res_page);
  3586. int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
  3587. struct page *ipage);
  3588. int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
  3589. struct inode *inode, nid_t ino, umode_t mode);
  3590. void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
  3591. struct page *page, struct inode *dir,
  3592. struct inode *inode);
  3593. bool f2fs_empty_inline_dir(struct inode *dir);
  3594. int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
  3595. struct fscrypt_str *fstr);
  3596. int f2fs_inline_data_fiemap(struct inode *inode,
  3597. struct fiemap_extent_info *fieinfo,
  3598. __u64 start, __u64 len);
  3599. /*
  3600. * shrinker.c
  3601. */
  3602. unsigned long f2fs_shrink_count(struct shrinker *shrink,
  3603. struct shrink_control *sc);
  3604. unsigned long f2fs_shrink_scan(struct shrinker *shrink,
  3605. struct shrink_control *sc);
  3606. void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
  3607. void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
  3608. /*
  3609. * extent_cache.c
  3610. */
  3611. bool sanity_check_extent_cache(struct inode *inode);
  3612. void f2fs_init_extent_tree(struct inode *inode);
  3613. void f2fs_drop_extent_tree(struct inode *inode);
  3614. void f2fs_destroy_extent_node(struct inode *inode);
  3615. void f2fs_destroy_extent_tree(struct inode *inode);
  3616. void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
  3617. int __init f2fs_create_extent_cache(void);
  3618. void f2fs_destroy_extent_cache(void);
  3619. /* read extent cache ops */
  3620. void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage);
  3621. bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
  3622. struct extent_info *ei);
  3623. bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index,
  3624. block_t *blkaddr);
  3625. void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
  3626. void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
  3627. pgoff_t fofs, block_t blkaddr, unsigned int len);
  3628. unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
  3629. int nr_shrink);
  3630. /* block age extent cache ops */
  3631. void f2fs_init_age_extent_tree(struct inode *inode);
  3632. bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
  3633. struct extent_info *ei);
  3634. void f2fs_update_age_extent_cache(struct dnode_of_data *dn);
  3635. void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
  3636. pgoff_t fofs, unsigned int len);
  3637. unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi,
  3638. int nr_shrink);
  3639. /*
  3640. * sysfs.c
  3641. */
  3642. #define MIN_RA_MUL 2
  3643. #define MAX_RA_MUL 256
  3644. int __init f2fs_init_sysfs(void);
  3645. void f2fs_exit_sysfs(void);
  3646. int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
  3647. void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
  3648. /* verity.c */
  3649. extern const struct fsverity_operations f2fs_verityops;
  3650. /*
  3651. * crypto support
  3652. */
  3653. static inline bool f2fs_encrypted_file(struct inode *inode)
  3654. {
  3655. return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
  3656. }
  3657. static inline void f2fs_set_encrypted_inode(struct inode *inode)
  3658. {
  3659. #ifdef CONFIG_FS_ENCRYPTION
  3660. file_set_encrypt(inode);
  3661. f2fs_set_inode_flags(inode);
  3662. #endif
  3663. }
  3664. /*
  3665. * Returns true if the reads of the inode's data need to undergo some
  3666. * postprocessing step, like decryption or authenticity verification.
  3667. */
  3668. static inline bool f2fs_post_read_required(struct inode *inode)
  3669. {
  3670. return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
  3671. f2fs_compressed_file(inode);
  3672. }
  3673. /*
  3674. * compress.c
  3675. */
  3676. #ifdef CONFIG_F2FS_FS_COMPRESSION
  3677. bool f2fs_is_compressed_page(struct page *page);
  3678. struct page *f2fs_compress_control_page(struct page *page);
  3679. int f2fs_prepare_compress_overwrite(struct inode *inode,
  3680. struct page **pagep, pgoff_t index, void **fsdata);
  3681. bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
  3682. pgoff_t index, unsigned copied);
  3683. int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
  3684. void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
  3685. bool f2fs_is_compress_backend_ready(struct inode *inode);
  3686. int __init f2fs_init_compress_mempool(void);
  3687. void f2fs_destroy_compress_mempool(void);
  3688. void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
  3689. void f2fs_end_read_compressed_page(struct page *page, bool failed,
  3690. block_t blkaddr, bool in_task);
  3691. bool f2fs_cluster_is_empty(struct compress_ctx *cc);
  3692. bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
  3693. bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
  3694. int index, int nr_pages, bool uptodate);
  3695. bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
  3696. void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
  3697. int f2fs_write_multi_pages(struct compress_ctx *cc,
  3698. int *submitted,
  3699. struct writeback_control *wbc,
  3700. enum iostat_type io_type);
  3701. int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
  3702. void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
  3703. pgoff_t fofs, block_t blkaddr,
  3704. unsigned int llen, unsigned int c_len);
  3705. int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
  3706. unsigned nr_pages, sector_t *last_block_in_bio,
  3707. bool is_readahead, bool for_write);
  3708. struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
  3709. void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
  3710. bool in_task);
  3711. void f2fs_put_page_dic(struct page *page, bool in_task);
  3712. unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
  3713. int f2fs_init_compress_ctx(struct compress_ctx *cc);
  3714. void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
  3715. void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
  3716. int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
  3717. void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
  3718. int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
  3719. void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
  3720. int __init f2fs_init_compress_cache(void);
  3721. void f2fs_destroy_compress_cache(void);
  3722. struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
  3723. void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
  3724. void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
  3725. nid_t ino, block_t blkaddr);
  3726. bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
  3727. block_t blkaddr);
  3728. void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
  3729. #define inc_compr_inode_stat(inode) \
  3730. do { \
  3731. struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
  3732. sbi->compr_new_inode++; \
  3733. } while (0)
  3734. #define add_compr_block_stat(inode, blocks) \
  3735. do { \
  3736. struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
  3737. int diff = F2FS_I(inode)->i_cluster_size - blocks; \
  3738. sbi->compr_written_block += blocks; \
  3739. sbi->compr_saved_block += diff; \
  3740. } while (0)
  3741. #else
  3742. static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
  3743. static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
  3744. {
  3745. if (!f2fs_compressed_file(inode))
  3746. return true;
  3747. /* not support compression */
  3748. return false;
  3749. }
  3750. static inline struct page *f2fs_compress_control_page(struct page *page)
  3751. {
  3752. WARN_ON_ONCE(1);
  3753. return ERR_PTR(-EINVAL);
  3754. }
  3755. static inline int __init f2fs_init_compress_mempool(void) { return 0; }
  3756. static inline void f2fs_destroy_compress_mempool(void) { }
  3757. static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
  3758. bool in_task) { }
  3759. static inline void f2fs_end_read_compressed_page(struct page *page,
  3760. bool failed, block_t blkaddr, bool in_task)
  3761. {
  3762. WARN_ON_ONCE(1);
  3763. }
  3764. static inline void f2fs_put_page_dic(struct page *page, bool in_task)
  3765. {
  3766. WARN_ON_ONCE(1);
  3767. }
  3768. static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
  3769. static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
  3770. static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
  3771. static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
  3772. static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
  3773. static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
  3774. static inline int __init f2fs_init_compress_cache(void) { return 0; }
  3775. static inline void f2fs_destroy_compress_cache(void) { }
  3776. static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
  3777. block_t blkaddr) { }
  3778. static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
  3779. struct page *page, nid_t ino, block_t blkaddr) { }
  3780. static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
  3781. struct page *page, block_t blkaddr) { return false; }
  3782. static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
  3783. nid_t ino) { }
  3784. #define inc_compr_inode_stat(inode) do { } while (0)
  3785. static inline void f2fs_update_read_extent_tree_range_compressed(
  3786. struct inode *inode,
  3787. pgoff_t fofs, block_t blkaddr,
  3788. unsigned int llen, unsigned int c_len) { }
  3789. #endif
  3790. static inline int set_compress_context(struct inode *inode)
  3791. {
  3792. #ifdef CONFIG_F2FS_FS_COMPRESSION
  3793. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  3794. F2FS_I(inode)->i_compress_algorithm =
  3795. F2FS_OPTION(sbi).compress_algorithm;
  3796. F2FS_I(inode)->i_log_cluster_size =
  3797. F2FS_OPTION(sbi).compress_log_size;
  3798. F2FS_I(inode)->i_compress_flag =
  3799. F2FS_OPTION(sbi).compress_chksum ?
  3800. BIT(COMPRESS_CHKSUM) : 0;
  3801. F2FS_I(inode)->i_cluster_size =
  3802. BIT(F2FS_I(inode)->i_log_cluster_size);
  3803. if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 ||
  3804. F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) &&
  3805. F2FS_OPTION(sbi).compress_level)
  3806. F2FS_I(inode)->i_compress_level =
  3807. F2FS_OPTION(sbi).compress_level;
  3808. F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
  3809. set_inode_flag(inode, FI_COMPRESSED_FILE);
  3810. stat_inc_compr_inode(inode);
  3811. inc_compr_inode_stat(inode);
  3812. f2fs_mark_inode_dirty_sync(inode, true);
  3813. return 0;
  3814. #else
  3815. return -EOPNOTSUPP;
  3816. #endif
  3817. }
  3818. static inline bool f2fs_disable_compressed_file(struct inode *inode)
  3819. {
  3820. struct f2fs_inode_info *fi = F2FS_I(inode);
  3821. if (!f2fs_compressed_file(inode))
  3822. return true;
  3823. if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
  3824. return false;
  3825. fi->i_flags &= ~F2FS_COMPR_FL;
  3826. stat_dec_compr_inode(inode);
  3827. clear_inode_flag(inode, FI_COMPRESSED_FILE);
  3828. f2fs_mark_inode_dirty_sync(inode, true);
  3829. return true;
  3830. }
  3831. #define F2FS_FEATURE_FUNCS(name, flagname) \
  3832. static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
  3833. { \
  3834. return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
  3835. }
  3836. F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
  3837. F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
  3838. F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
  3839. F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
  3840. F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
  3841. F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
  3842. F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
  3843. F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
  3844. F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
  3845. F2FS_FEATURE_FUNCS(verity, VERITY);
  3846. F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
  3847. F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
  3848. F2FS_FEATURE_FUNCS(compression, COMPRESSION);
  3849. F2FS_FEATURE_FUNCS(readonly, RO);
  3850. #ifdef CONFIG_BLK_DEV_ZONED
  3851. static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
  3852. block_t blkaddr)
  3853. {
  3854. unsigned int zno = blkaddr / sbi->blocks_per_blkz;
  3855. return test_bit(zno, FDEV(devi).blkz_seq);
  3856. }
  3857. #endif
  3858. static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
  3859. {
  3860. return f2fs_sb_has_blkzoned(sbi);
  3861. }
  3862. static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
  3863. {
  3864. return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev);
  3865. }
  3866. static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
  3867. {
  3868. int i;
  3869. if (!f2fs_is_multi_device(sbi))
  3870. return f2fs_bdev_support_discard(sbi->sb->s_bdev);
  3871. for (i = 0; i < sbi->s_ndevs; i++)
  3872. if (f2fs_bdev_support_discard(FDEV(i).bdev))
  3873. return true;
  3874. return false;
  3875. }
  3876. static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
  3877. {
  3878. return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
  3879. f2fs_hw_should_discard(sbi);
  3880. }
  3881. static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
  3882. {
  3883. int i;
  3884. if (!f2fs_is_multi_device(sbi))
  3885. return bdev_read_only(sbi->sb->s_bdev);
  3886. for (i = 0; i < sbi->s_ndevs; i++)
  3887. if (bdev_read_only(FDEV(i).bdev))
  3888. return true;
  3889. return false;
  3890. }
  3891. static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi)
  3892. {
  3893. return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi);
  3894. }
  3895. static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
  3896. {
  3897. return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
  3898. }
  3899. static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
  3900. {
  3901. return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
  3902. }
  3903. static inline bool f2fs_may_compress(struct inode *inode)
  3904. {
  3905. if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
  3906. f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
  3907. f2fs_is_mmap_file(inode))
  3908. return false;
  3909. return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
  3910. }
  3911. static inline void f2fs_i_compr_blocks_update(struct inode *inode,
  3912. u64 blocks, bool add)
  3913. {
  3914. struct f2fs_inode_info *fi = F2FS_I(inode);
  3915. int diff = fi->i_cluster_size - blocks;
  3916. /* don't update i_compr_blocks if saved blocks were released */
  3917. if (!add && !atomic_read(&fi->i_compr_blocks))
  3918. return;
  3919. if (add) {
  3920. atomic_add(diff, &fi->i_compr_blocks);
  3921. stat_add_compr_blocks(inode, diff);
  3922. } else {
  3923. atomic_sub(diff, &fi->i_compr_blocks);
  3924. stat_sub_compr_blocks(inode, diff);
  3925. }
  3926. f2fs_mark_inode_dirty_sync(inode, true);
  3927. }
  3928. static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
  3929. int flag)
  3930. {
  3931. if (!f2fs_is_multi_device(sbi))
  3932. return false;
  3933. if (flag != F2FS_GET_BLOCK_DIO)
  3934. return false;
  3935. return sbi->aligned_blksize;
  3936. }
  3937. static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
  3938. {
  3939. return fsverity_active(inode) &&
  3940. idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
  3941. }
  3942. #ifdef CONFIG_F2FS_FAULT_INJECTION
  3943. extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
  3944. unsigned int type);
  3945. #else
  3946. #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
  3947. #endif
  3948. static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
  3949. {
  3950. #ifdef CONFIG_QUOTA
  3951. if (f2fs_sb_has_quota_ino(sbi))
  3952. return true;
  3953. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
  3954. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
  3955. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  3956. return true;
  3957. #endif
  3958. return false;
  3959. }
  3960. static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi)
  3961. {
  3962. return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK;
  3963. }
  3964. static inline void f2fs_io_schedule_timeout(long timeout)
  3965. {
  3966. set_current_state(TASK_UNINTERRUPTIBLE);
  3967. io_schedule_timeout(timeout);
  3968. }
  3969. static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs,
  3970. enum page_type type)
  3971. {
  3972. if (unlikely(f2fs_cp_error(sbi)))
  3973. return;
  3974. if (ofs == sbi->page_eio_ofs[type]) {
  3975. if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO)
  3976. set_ckpt_flags(sbi, CP_ERROR_FLAG);
  3977. } else {
  3978. sbi->page_eio_ofs[type] = ofs;
  3979. sbi->page_eio_cnt[type] = 0;
  3980. }
  3981. }
  3982. static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
  3983. {
  3984. return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
  3985. }
  3986. #define EFSBADCRC EBADMSG /* Bad CRC detected */
  3987. #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
  3988. #endif /* _LINUX_F2FS_H */