1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454 |
- /*
- * Copyright (C) 2012 Red Hat. All rights reserved.
- *
- * This file is released under the GPL.
- */
- #include "dm.h"
- #include "dm-bio-prison-v2.h"
- #include "dm-bio-record.h"
- #include "dm-cache-metadata.h"
- #include "dm-io-tracker.h"
- #include <linux/dm-io.h>
- #include <linux/dm-kcopyd.h>
- #include <linux/jiffies.h>
- #include <linux/init.h>
- #include <linux/mempool.h>
- #include <linux/module.h>
- #include <linux/rwsem.h>
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
- #define DM_MSG_PREFIX "cache"
- DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
- "A percentage of time allocated for copying to and/or from cache");
- /*----------------------------------------------------------------*/
- /*
- * Glossary:
- *
- * oblock: index of an origin block
- * cblock: index of a cache block
- * promotion: movement of a block from origin to cache
- * demotion: movement of a block from cache to origin
- * migration: movement of a block between the origin and cache device,
- * either direction
- */
- /*----------------------------------------------------------------*/
- /*
- * Represents a chunk of future work. 'input' allows continuations to pass
- * values between themselves, typically error values.
- */
- struct continuation {
- struct work_struct ws;
- blk_status_t input;
- };
- static inline void init_continuation(struct continuation *k,
- void (*fn)(struct work_struct *))
- {
- INIT_WORK(&k->ws, fn);
- k->input = 0;
- }
- static inline void queue_continuation(struct workqueue_struct *wq,
- struct continuation *k)
- {
- queue_work(wq, &k->ws);
- }
- /*----------------------------------------------------------------*/
- /*
- * The batcher collects together pieces of work that need a particular
- * operation to occur before they can proceed (typically a commit).
- */
- struct batcher {
- /*
- * The operation that everyone is waiting for.
- */
- blk_status_t (*commit_op)(void *context);
- void *commit_context;
- /*
- * This is how bios should be issued once the commit op is complete
- * (accounted_request).
- */
- void (*issue_op)(struct bio *bio, void *context);
- void *issue_context;
- /*
- * Queued work gets put on here after commit.
- */
- struct workqueue_struct *wq;
- spinlock_t lock;
- struct list_head work_items;
- struct bio_list bios;
- struct work_struct commit_work;
- bool commit_scheduled;
- };
- static void __commit(struct work_struct *_ws)
- {
- struct batcher *b = container_of(_ws, struct batcher, commit_work);
- blk_status_t r;
- struct list_head work_items;
- struct work_struct *ws, *tmp;
- struct continuation *k;
- struct bio *bio;
- struct bio_list bios;
- INIT_LIST_HEAD(&work_items);
- bio_list_init(&bios);
- /*
- * We have to grab these before the commit_op to avoid a race
- * condition.
- */
- spin_lock_irq(&b->lock);
- list_splice_init(&b->work_items, &work_items);
- bio_list_merge(&bios, &b->bios);
- bio_list_init(&b->bios);
- b->commit_scheduled = false;
- spin_unlock_irq(&b->lock);
- r = b->commit_op(b->commit_context);
- list_for_each_entry_safe(ws, tmp, &work_items, entry) {
- k = container_of(ws, struct continuation, ws);
- k->input = r;
- INIT_LIST_HEAD(&ws->entry); /* to avoid a WARN_ON */
- queue_work(b->wq, ws);
- }
- while ((bio = bio_list_pop(&bios))) {
- if (r) {
- bio->bi_status = r;
- bio_endio(bio);
- } else
- b->issue_op(bio, b->issue_context);
- }
- }
- static void batcher_init(struct batcher *b,
- blk_status_t (*commit_op)(void *),
- void *commit_context,
- void (*issue_op)(struct bio *bio, void *),
- void *issue_context,
- struct workqueue_struct *wq)
- {
- b->commit_op = commit_op;
- b->commit_context = commit_context;
- b->issue_op = issue_op;
- b->issue_context = issue_context;
- b->wq = wq;
- spin_lock_init(&b->lock);
- INIT_LIST_HEAD(&b->work_items);
- bio_list_init(&b->bios);
- INIT_WORK(&b->commit_work, __commit);
- b->commit_scheduled = false;
- }
- static void async_commit(struct batcher *b)
- {
- queue_work(b->wq, &b->commit_work);
- }
- static void continue_after_commit(struct batcher *b, struct continuation *k)
- {
- bool commit_scheduled;
- spin_lock_irq(&b->lock);
- commit_scheduled = b->commit_scheduled;
- list_add_tail(&k->ws.entry, &b->work_items);
- spin_unlock_irq(&b->lock);
- if (commit_scheduled)
- async_commit(b);
- }
- /*
- * Bios are errored if commit failed.
- */
- static void issue_after_commit(struct batcher *b, struct bio *bio)
- {
- bool commit_scheduled;
- spin_lock_irq(&b->lock);
- commit_scheduled = b->commit_scheduled;
- bio_list_add(&b->bios, bio);
- spin_unlock_irq(&b->lock);
- if (commit_scheduled)
- async_commit(b);
- }
- /*
- * Call this if some urgent work is waiting for the commit to complete.
- */
- static void schedule_commit(struct batcher *b)
- {
- bool immediate;
- spin_lock_irq(&b->lock);
- immediate = !list_empty(&b->work_items) || !bio_list_empty(&b->bios);
- b->commit_scheduled = true;
- spin_unlock_irq(&b->lock);
- if (immediate)
- async_commit(b);
- }
- /*
- * There are a couple of places where we let a bio run, but want to do some
- * work before calling its endio function. We do this by temporarily
- * changing the endio fn.
- */
- struct dm_hook_info {
- bio_end_io_t *bi_end_io;
- };
- static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
- bio_end_io_t *bi_end_io, void *bi_private)
- {
- h->bi_end_io = bio->bi_end_io;
- bio->bi_end_io = bi_end_io;
- bio->bi_private = bi_private;
- }
- static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
- {
- bio->bi_end_io = h->bi_end_io;
- }
- /*----------------------------------------------------------------*/
- #define MIGRATION_POOL_SIZE 128
- #define COMMIT_PERIOD HZ
- #define MIGRATION_COUNT_WINDOW 10
- /*
- * The block size of the device holding cache data must be
- * between 32KB and 1GB.
- */
- #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
- #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
- enum cache_metadata_mode {
- CM_WRITE, /* metadata may be changed */
- CM_READ_ONLY, /* metadata may not be changed */
- CM_FAIL
- };
- enum cache_io_mode {
- /*
- * Data is written to cached blocks only. These blocks are marked
- * dirty. If you lose the cache device you will lose data.
- * Potential performance increase for both reads and writes.
- */
- CM_IO_WRITEBACK,
- /*
- * Data is written to both cache and origin. Blocks are never
- * dirty. Potential performance benfit for reads only.
- */
- CM_IO_WRITETHROUGH,
- /*
- * A degraded mode useful for various cache coherency situations
- * (eg, rolling back snapshots). Reads and writes always go to the
- * origin. If a write goes to a cached oblock, then the cache
- * block is invalidated.
- */
- CM_IO_PASSTHROUGH
- };
- struct cache_features {
- enum cache_metadata_mode mode;
- enum cache_io_mode io_mode;
- unsigned int metadata_version;
- bool discard_passdown:1;
- };
- struct cache_stats {
- atomic_t read_hit;
- atomic_t read_miss;
- atomic_t write_hit;
- atomic_t write_miss;
- atomic_t demotion;
- atomic_t promotion;
- atomic_t writeback;
- atomic_t copies_avoided;
- atomic_t cache_cell_clash;
- atomic_t commit_count;
- atomic_t discard_count;
- };
- struct cache {
- struct dm_target *ti;
- spinlock_t lock;
- /*
- * Fields for converting from sectors to blocks.
- */
- int sectors_per_block_shift;
- sector_t sectors_per_block;
- struct dm_cache_metadata *cmd;
- /*
- * Metadata is written to this device.
- */
- struct dm_dev *metadata_dev;
- /*
- * The slower of the two data devices. Typically a spindle.
- */
- struct dm_dev *origin_dev;
- /*
- * The faster of the two data devices. Typically an SSD.
- */
- struct dm_dev *cache_dev;
- /*
- * Size of the origin device in _complete_ blocks and native sectors.
- */
- dm_oblock_t origin_blocks;
- sector_t origin_sectors;
- /*
- * Size of the cache device in blocks.
- */
- dm_cblock_t cache_size;
- /*
- * Invalidation fields.
- */
- spinlock_t invalidation_lock;
- struct list_head invalidation_requests;
- sector_t migration_threshold;
- wait_queue_head_t migration_wait;
- atomic_t nr_allocated_migrations;
- /*
- * The number of in flight migrations that are performing
- * background io. eg, promotion, writeback.
- */
- atomic_t nr_io_migrations;
- struct bio_list deferred_bios;
- struct rw_semaphore quiesce_lock;
- /*
- * origin_blocks entries, discarded if set.
- */
- dm_dblock_t discard_nr_blocks;
- unsigned long *discard_bitset;
- uint32_t discard_block_size; /* a power of 2 times sectors per block */
- /*
- * Rather than reconstructing the table line for the status we just
- * save it and regurgitate.
- */
- unsigned int nr_ctr_args;
- const char **ctr_args;
- struct dm_kcopyd_client *copier;
- struct work_struct deferred_bio_worker;
- struct work_struct migration_worker;
- struct workqueue_struct *wq;
- struct delayed_work waker;
- struct dm_bio_prison_v2 *prison;
- /*
- * cache_size entries, dirty if set
- */
- unsigned long *dirty_bitset;
- atomic_t nr_dirty;
- unsigned int policy_nr_args;
- struct dm_cache_policy *policy;
- /*
- * Cache features such as write-through.
- */
- struct cache_features features;
- struct cache_stats stats;
- bool need_tick_bio:1;
- bool sized:1;
- bool invalidate:1;
- bool commit_requested:1;
- bool loaded_mappings:1;
- bool loaded_discards:1;
- struct rw_semaphore background_work_lock;
- struct batcher committer;
- struct work_struct commit_ws;
- struct dm_io_tracker tracker;
- mempool_t migration_pool;
- struct bio_set bs;
- };
- struct per_bio_data {
- bool tick:1;
- unsigned int req_nr:2;
- struct dm_bio_prison_cell_v2 *cell;
- struct dm_hook_info hook_info;
- sector_t len;
- };
- struct dm_cache_migration {
- struct continuation k;
- struct cache *cache;
- struct policy_work *op;
- struct bio *overwrite_bio;
- struct dm_bio_prison_cell_v2 *cell;
- dm_cblock_t invalidate_cblock;
- dm_oblock_t invalidate_oblock;
- };
- /*----------------------------------------------------------------*/
- static bool writethrough_mode(struct cache *cache)
- {
- return cache->features.io_mode == CM_IO_WRITETHROUGH;
- }
- static bool writeback_mode(struct cache *cache)
- {
- return cache->features.io_mode == CM_IO_WRITEBACK;
- }
- static inline bool passthrough_mode(struct cache *cache)
- {
- return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
- }
- /*----------------------------------------------------------------*/
- static void wake_deferred_bio_worker(struct cache *cache)
- {
- queue_work(cache->wq, &cache->deferred_bio_worker);
- }
- static void wake_migration_worker(struct cache *cache)
- {
- if (passthrough_mode(cache))
- return;
- queue_work(cache->wq, &cache->migration_worker);
- }
- /*----------------------------------------------------------------*/
- static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
- {
- return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
- }
- static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
- {
- dm_bio_prison_free_cell_v2(cache->prison, cell);
- }
- static struct dm_cache_migration *alloc_migration(struct cache *cache)
- {
- struct dm_cache_migration *mg;
- mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
- memset(mg, 0, sizeof(*mg));
- mg->cache = cache;
- atomic_inc(&cache->nr_allocated_migrations);
- return mg;
- }
- static void free_migration(struct dm_cache_migration *mg)
- {
- struct cache *cache = mg->cache;
- if (atomic_dec_and_test(&cache->nr_allocated_migrations))
- wake_up(&cache->migration_wait);
- mempool_free(mg, &cache->migration_pool);
- }
- /*----------------------------------------------------------------*/
- static inline dm_oblock_t oblock_succ(dm_oblock_t b)
- {
- return to_oblock(from_oblock(b) + 1ull);
- }
- static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2 *key)
- {
- key->virtual = 0;
- key->dev = 0;
- key->block_begin = from_oblock(begin);
- key->block_end = from_oblock(end);
- }
- /*
- * We have two lock levels. Level 0, which is used to prevent WRITEs, and
- * level 1 which prevents *both* READs and WRITEs.
- */
- #define WRITE_LOCK_LEVEL 0
- #define READ_WRITE_LOCK_LEVEL 1
- static unsigned int lock_level(struct bio *bio)
- {
- return bio_data_dir(bio) == WRITE ?
- WRITE_LOCK_LEVEL :
- READ_WRITE_LOCK_LEVEL;
- }
- /*----------------------------------------------------------------
- * Per bio data
- *--------------------------------------------------------------*/
- static struct per_bio_data *get_per_bio_data(struct bio *bio)
- {
- struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
- BUG_ON(!pb);
- return pb;
- }
- static struct per_bio_data *init_per_bio_data(struct bio *bio)
- {
- struct per_bio_data *pb = get_per_bio_data(bio);
- pb->tick = false;
- pb->req_nr = dm_bio_get_target_bio_nr(bio);
- pb->cell = NULL;
- pb->len = 0;
- return pb;
- }
- /*----------------------------------------------------------------*/
- static void defer_bio(struct cache *cache, struct bio *bio)
- {
- spin_lock_irq(&cache->lock);
- bio_list_add(&cache->deferred_bios, bio);
- spin_unlock_irq(&cache->lock);
- wake_deferred_bio_worker(cache);
- }
- static void defer_bios(struct cache *cache, struct bio_list *bios)
- {
- spin_lock_irq(&cache->lock);
- bio_list_merge(&cache->deferred_bios, bios);
- bio_list_init(bios);
- spin_unlock_irq(&cache->lock);
- wake_deferred_bio_worker(cache);
- }
- /*----------------------------------------------------------------*/
- static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
- {
- bool r;
- struct per_bio_data *pb;
- struct dm_cell_key_v2 key;
- dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
- struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
- cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
- build_key(oblock, end, &key);
- r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
- if (!r) {
- /*
- * Failed to get the lock.
- */
- free_prison_cell(cache, cell_prealloc);
- return r;
- }
- if (cell != cell_prealloc)
- free_prison_cell(cache, cell_prealloc);
- pb = get_per_bio_data(bio);
- pb->cell = cell;
- return r;
- }
- /*----------------------------------------------------------------*/
- static bool is_dirty(struct cache *cache, dm_cblock_t b)
- {
- return test_bit(from_cblock(b), cache->dirty_bitset);
- }
- static void set_dirty(struct cache *cache, dm_cblock_t cblock)
- {
- if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
- atomic_inc(&cache->nr_dirty);
- policy_set_dirty(cache->policy, cblock);
- }
- }
- /*
- * These two are called when setting after migrations to force the policy
- * and dirty bitset to be in sync.
- */
- static void force_set_dirty(struct cache *cache, dm_cblock_t cblock)
- {
- if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset))
- atomic_inc(&cache->nr_dirty);
- policy_set_dirty(cache->policy, cblock);
- }
- static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock)
- {
- if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
- if (atomic_dec_return(&cache->nr_dirty) == 0)
- dm_table_event(cache->ti->table);
- }
- policy_clear_dirty(cache->policy, cblock);
- }
- /*----------------------------------------------------------------*/
- static bool block_size_is_power_of_two(struct cache *cache)
- {
- return cache->sectors_per_block_shift >= 0;
- }
- static dm_block_t block_div(dm_block_t b, uint32_t n)
- {
- do_div(b, n);
- return b;
- }
- static dm_block_t oblocks_per_dblock(struct cache *cache)
- {
- dm_block_t oblocks = cache->discard_block_size;
- if (block_size_is_power_of_two(cache))
- oblocks >>= cache->sectors_per_block_shift;
- else
- oblocks = block_div(oblocks, cache->sectors_per_block);
- return oblocks;
- }
- static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
- {
- return to_dblock(block_div(from_oblock(oblock),
- oblocks_per_dblock(cache)));
- }
- static void set_discard(struct cache *cache, dm_dblock_t b)
- {
- BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
- atomic_inc(&cache->stats.discard_count);
- spin_lock_irq(&cache->lock);
- set_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irq(&cache->lock);
- }
- static void clear_discard(struct cache *cache, dm_dblock_t b)
- {
- spin_lock_irq(&cache->lock);
- clear_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irq(&cache->lock);
- }
- static bool is_discarded(struct cache *cache, dm_dblock_t b)
- {
- int r;
- spin_lock_irq(&cache->lock);
- r = test_bit(from_dblock(b), cache->discard_bitset);
- spin_unlock_irq(&cache->lock);
- return r;
- }
- static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
- {
- int r;
- spin_lock_irq(&cache->lock);
- r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
- cache->discard_bitset);
- spin_unlock_irq(&cache->lock);
- return r;
- }
- /*----------------------------------------------------------------
- * Remapping
- *--------------------------------------------------------------*/
- static void remap_to_origin(struct cache *cache, struct bio *bio)
- {
- bio_set_dev(bio, cache->origin_dev->bdev);
- }
- static void remap_to_cache(struct cache *cache, struct bio *bio,
- dm_cblock_t cblock)
- {
- sector_t bi_sector = bio->bi_iter.bi_sector;
- sector_t block = from_cblock(cblock);
- bio_set_dev(bio, cache->cache_dev->bdev);
- if (!block_size_is_power_of_two(cache))
- bio->bi_iter.bi_sector =
- (block * cache->sectors_per_block) +
- sector_div(bi_sector, cache->sectors_per_block);
- else
- bio->bi_iter.bi_sector =
- (block << cache->sectors_per_block_shift) |
- (bi_sector & (cache->sectors_per_block - 1));
- }
- static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
- {
- struct per_bio_data *pb;
- spin_lock_irq(&cache->lock);
- if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
- bio_op(bio) != REQ_OP_DISCARD) {
- pb = get_per_bio_data(bio);
- pb->tick = true;
- cache->need_tick_bio = false;
- }
- spin_unlock_irq(&cache->lock);
- }
- static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock)
- {
- // FIXME: check_if_tick_bio_needed() is called way too much through this interface
- check_if_tick_bio_needed(cache, bio);
- remap_to_origin(cache, bio);
- if (bio_data_dir(bio) == WRITE)
- clear_discard(cache, oblock_to_dblock(cache, oblock));
- }
- static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, dm_cblock_t cblock)
- {
- check_if_tick_bio_needed(cache, bio);
- remap_to_cache(cache, bio, cblock);
- if (bio_data_dir(bio) == WRITE) {
- set_dirty(cache, cblock);
- clear_discard(cache, oblock_to_dblock(cache, oblock));
- }
- }
- static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
- {
- sector_t block_nr = bio->bi_iter.bi_sector;
- if (!block_size_is_power_of_two(cache))
- (void) sector_div(block_nr, cache->sectors_per_block);
- else
- block_nr >>= cache->sectors_per_block_shift;
- return to_oblock(block_nr);
- }
- static bool accountable_bio(struct cache *cache, struct bio *bio)
- {
- return bio_op(bio) != REQ_OP_DISCARD;
- }
- static void accounted_begin(struct cache *cache, struct bio *bio)
- {
- struct per_bio_data *pb;
- if (accountable_bio(cache, bio)) {
- pb = get_per_bio_data(bio);
- pb->len = bio_sectors(bio);
- dm_iot_io_begin(&cache->tracker, pb->len);
- }
- }
- static void accounted_complete(struct cache *cache, struct bio *bio)
- {
- struct per_bio_data *pb = get_per_bio_data(bio);
- dm_iot_io_end(&cache->tracker, pb->len);
- }
- static void accounted_request(struct cache *cache, struct bio *bio)
- {
- accounted_begin(cache, bio);
- dm_submit_bio_remap(bio, NULL);
- }
- static void issue_op(struct bio *bio, void *context)
- {
- struct cache *cache = context;
- accounted_request(cache, bio);
- }
- /*
- * When running in writethrough mode we need to send writes to clean blocks
- * to both the cache and origin devices. Clone the bio and send them in parallel.
- */
- static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
- dm_oblock_t oblock, dm_cblock_t cblock)
- {
- struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
- GFP_NOIO, &cache->bs);
- BUG_ON(!origin_bio);
- bio_chain(origin_bio, bio);
- if (bio_data_dir(origin_bio) == WRITE)
- clear_discard(cache, oblock_to_dblock(cache, oblock));
- submit_bio(origin_bio);
- remap_to_cache(cache, bio, cblock);
- }
- /*----------------------------------------------------------------
- * Failure modes
- *--------------------------------------------------------------*/
- static enum cache_metadata_mode get_cache_mode(struct cache *cache)
- {
- return cache->features.mode;
- }
- static const char *cache_device_name(struct cache *cache)
- {
- return dm_table_device_name(cache->ti->table);
- }
- static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
- {
- const char *descs[] = {
- "write",
- "read-only",
- "fail"
- };
- dm_table_event(cache->ti->table);
- DMINFO("%s: switching cache to %s mode",
- cache_device_name(cache), descs[(int)mode]);
- }
- static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode)
- {
- bool needs_check;
- enum cache_metadata_mode old_mode = get_cache_mode(cache);
- if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
- DMERR("%s: unable to read needs_check flag, setting failure mode.",
- cache_device_name(cache));
- new_mode = CM_FAIL;
- }
- if (new_mode == CM_WRITE && needs_check) {
- DMERR("%s: unable to switch cache to write mode until repaired.",
- cache_device_name(cache));
- if (old_mode != new_mode)
- new_mode = old_mode;
- else
- new_mode = CM_READ_ONLY;
- }
- /* Never move out of fail mode */
- if (old_mode == CM_FAIL)
- new_mode = CM_FAIL;
- switch (new_mode) {
- case CM_FAIL:
- case CM_READ_ONLY:
- dm_cache_metadata_set_read_only(cache->cmd);
- break;
- case CM_WRITE:
- dm_cache_metadata_set_read_write(cache->cmd);
- break;
- }
- cache->features.mode = new_mode;
- if (new_mode != old_mode)
- notify_mode_switch(cache, new_mode);
- }
- static void abort_transaction(struct cache *cache)
- {
- const char *dev_name = cache_device_name(cache);
- if (get_cache_mode(cache) >= CM_READ_ONLY)
- return;
- DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
- if (dm_cache_metadata_abort(cache->cmd)) {
- DMERR("%s: failed to abort metadata transaction", dev_name);
- set_cache_mode(cache, CM_FAIL);
- }
- if (dm_cache_metadata_set_needs_check(cache->cmd)) {
- DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
- set_cache_mode(cache, CM_FAIL);
- }
- }
- static void metadata_operation_failed(struct cache *cache, const char *op, int r)
- {
- DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
- cache_device_name(cache), op, r);
- abort_transaction(cache);
- set_cache_mode(cache, CM_READ_ONLY);
- }
- /*----------------------------------------------------------------*/
- static void load_stats(struct cache *cache)
- {
- struct dm_cache_statistics stats;
- dm_cache_metadata_get_stats(cache->cmd, &stats);
- atomic_set(&cache->stats.read_hit, stats.read_hits);
- atomic_set(&cache->stats.read_miss, stats.read_misses);
- atomic_set(&cache->stats.write_hit, stats.write_hits);
- atomic_set(&cache->stats.write_miss, stats.write_misses);
- }
- static void save_stats(struct cache *cache)
- {
- struct dm_cache_statistics stats;
- if (get_cache_mode(cache) >= CM_READ_ONLY)
- return;
- stats.read_hits = atomic_read(&cache->stats.read_hit);
- stats.read_misses = atomic_read(&cache->stats.read_miss);
- stats.write_hits = atomic_read(&cache->stats.write_hit);
- stats.write_misses = atomic_read(&cache->stats.write_miss);
- dm_cache_metadata_set_stats(cache->cmd, &stats);
- }
- static void update_stats(struct cache_stats *stats, enum policy_operation op)
- {
- switch (op) {
- case POLICY_PROMOTE:
- atomic_inc(&stats->promotion);
- break;
- case POLICY_DEMOTE:
- atomic_inc(&stats->demotion);
- break;
- case POLICY_WRITEBACK:
- atomic_inc(&stats->writeback);
- break;
- }
- }
- /*----------------------------------------------------------------
- * Migration processing
- *
- * Migration covers moving data from the origin device to the cache, or
- * vice versa.
- *--------------------------------------------------------------*/
- static void inc_io_migrations(struct cache *cache)
- {
- atomic_inc(&cache->nr_io_migrations);
- }
- static void dec_io_migrations(struct cache *cache)
- {
- atomic_dec(&cache->nr_io_migrations);
- }
- static bool discard_or_flush(struct bio *bio)
- {
- return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
- }
- static void calc_discard_block_range(struct cache *cache, struct bio *bio,
- dm_dblock_t *b, dm_dblock_t *e)
- {
- sector_t sb = bio->bi_iter.bi_sector;
- sector_t se = bio_end_sector(bio);
- *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
- if (se - sb < cache->discard_block_size)
- *e = *b;
- else
- *e = to_dblock(block_div(se, cache->discard_block_size));
- }
- /*----------------------------------------------------------------*/
- static void prevent_background_work(struct cache *cache)
- {
- lockdep_off();
- down_write(&cache->background_work_lock);
- lockdep_on();
- }
- static void allow_background_work(struct cache *cache)
- {
- lockdep_off();
- up_write(&cache->background_work_lock);
- lockdep_on();
- }
- static bool background_work_begin(struct cache *cache)
- {
- bool r;
- lockdep_off();
- r = down_read_trylock(&cache->background_work_lock);
- lockdep_on();
- return r;
- }
- static void background_work_end(struct cache *cache)
- {
- lockdep_off();
- up_read(&cache->background_work_lock);
- lockdep_on();
- }
- /*----------------------------------------------------------------*/
- static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
- {
- return (bio_data_dir(bio) == WRITE) &&
- (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
- }
- static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
- {
- return writeback_mode(cache) &&
- (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
- }
- static void quiesce(struct dm_cache_migration *mg,
- void (*continuation)(struct work_struct *))
- {
- init_continuation(&mg->k, continuation);
- dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
- }
- static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
- {
- struct continuation *k = container_of(ws, struct continuation, ws);
- return container_of(k, struct dm_cache_migration, k);
- }
- static void copy_complete(int read_err, unsigned long write_err, void *context)
- {
- struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
- if (read_err || write_err)
- mg->k.input = BLK_STS_IOERR;
- queue_continuation(mg->cache->wq, &mg->k);
- }
- static void copy(struct dm_cache_migration *mg, bool promote)
- {
- struct dm_io_region o_region, c_region;
- struct cache *cache = mg->cache;
- o_region.bdev = cache->origin_dev->bdev;
- o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
- o_region.count = cache->sectors_per_block;
- c_region.bdev = cache->cache_dev->bdev;
- c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
- c_region.count = cache->sectors_per_block;
- if (promote)
- dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
- else
- dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
- }
- static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
- {
- struct per_bio_data *pb = get_per_bio_data(bio);
- if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell))
- free_prison_cell(cache, pb->cell);
- pb->cell = NULL;
- }
- static void overwrite_endio(struct bio *bio)
- {
- struct dm_cache_migration *mg = bio->bi_private;
- struct cache *cache = mg->cache;
- struct per_bio_data *pb = get_per_bio_data(bio);
- dm_unhook_bio(&pb->hook_info, bio);
- if (bio->bi_status)
- mg->k.input = bio->bi_status;
- queue_continuation(cache->wq, &mg->k);
- }
- static void overwrite(struct dm_cache_migration *mg,
- void (*continuation)(struct work_struct *))
- {
- struct bio *bio = mg->overwrite_bio;
- struct per_bio_data *pb = get_per_bio_data(bio);
- dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
- /*
- * The overwrite bio is part of the copy operation, as such it does
- * not set/clear discard or dirty flags.
- */
- if (mg->op->op == POLICY_PROMOTE)
- remap_to_cache(mg->cache, bio, mg->op->cblock);
- else
- remap_to_origin(mg->cache, bio);
- init_continuation(&mg->k, continuation);
- accounted_request(mg->cache, bio);
- }
- /*
- * Migration steps:
- *
- * 1) exclusive lock preventing WRITEs
- * 2) quiesce
- * 3) copy or issue overwrite bio
- * 4) upgrade to exclusive lock preventing READs and WRITEs
- * 5) quiesce
- * 6) update metadata and commit
- * 7) unlock
- */
- static void mg_complete(struct dm_cache_migration *mg, bool success)
- {
- struct bio_list bios;
- struct cache *cache = mg->cache;
- struct policy_work *op = mg->op;
- dm_cblock_t cblock = op->cblock;
- if (success)
- update_stats(&cache->stats, op->op);
- switch (op->op) {
- case POLICY_PROMOTE:
- clear_discard(cache, oblock_to_dblock(cache, op->oblock));
- policy_complete_background_work(cache->policy, op, success);
- if (mg->overwrite_bio) {
- if (success)
- force_set_dirty(cache, cblock);
- else if (mg->k.input)
- mg->overwrite_bio->bi_status = mg->k.input;
- else
- mg->overwrite_bio->bi_status = BLK_STS_IOERR;
- bio_endio(mg->overwrite_bio);
- } else {
- if (success)
- force_clear_dirty(cache, cblock);
- dec_io_migrations(cache);
- }
- break;
- case POLICY_DEMOTE:
- /*
- * We clear dirty here to update the nr_dirty counter.
- */
- if (success)
- force_clear_dirty(cache, cblock);
- policy_complete_background_work(cache->policy, op, success);
- dec_io_migrations(cache);
- break;
- case POLICY_WRITEBACK:
- if (success)
- force_clear_dirty(cache, cblock);
- policy_complete_background_work(cache->policy, op, success);
- dec_io_migrations(cache);
- break;
- }
- bio_list_init(&bios);
- if (mg->cell) {
- if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
- free_prison_cell(cache, mg->cell);
- }
- free_migration(mg);
- defer_bios(cache, &bios);
- wake_migration_worker(cache);
- background_work_end(cache);
- }
- static void mg_success(struct work_struct *ws)
- {
- struct dm_cache_migration *mg = ws_to_mg(ws);
- mg_complete(mg, mg->k.input == 0);
- }
- static void mg_update_metadata(struct work_struct *ws)
- {
- int r;
- struct dm_cache_migration *mg = ws_to_mg(ws);
- struct cache *cache = mg->cache;
- struct policy_work *op = mg->op;
- switch (op->op) {
- case POLICY_PROMOTE:
- r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock);
- if (r) {
- DMERR_LIMIT("%s: migration failed; couldn't insert mapping",
- cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_insert_mapping", r);
- mg_complete(mg, false);
- return;
- }
- mg_complete(mg, true);
- break;
- case POLICY_DEMOTE:
- r = dm_cache_remove_mapping(cache->cmd, op->cblock);
- if (r) {
- DMERR_LIMIT("%s: migration failed; couldn't update on disk metadata",
- cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
- mg_complete(mg, false);
- return;
- }
- /*
- * It would be nice if we only had to commit when a REQ_FLUSH
- * comes through. But there's one scenario that we have to
- * look out for:
- *
- * - vblock x in a cache block
- * - domotion occurs
- * - cache block gets reallocated and over written
- * - crash
- *
- * When we recover, because there was no commit the cache will
- * rollback to having the data for vblock x in the cache block.
- * But the cache block has since been overwritten, so it'll end
- * up pointing to data that was never in 'x' during the history
- * of the device.
- *
- * To avoid this issue we require a commit as part of the
- * demotion operation.
- */
- init_continuation(&mg->k, mg_success);
- continue_after_commit(&cache->committer, &mg->k);
- schedule_commit(&cache->committer);
- break;
- case POLICY_WRITEBACK:
- mg_complete(mg, true);
- break;
- }
- }
- static void mg_update_metadata_after_copy(struct work_struct *ws)
- {
- struct dm_cache_migration *mg = ws_to_mg(ws);
- /*
- * Did the copy succeed?
- */
- if (mg->k.input)
- mg_complete(mg, false);
- else
- mg_update_metadata(ws);
- }
- static void mg_upgrade_lock(struct work_struct *ws)
- {
- int r;
- struct dm_cache_migration *mg = ws_to_mg(ws);
- /*
- * Did the copy succeed?
- */
- if (mg->k.input)
- mg_complete(mg, false);
- else {
- /*
- * Now we want the lock to prevent both reads and writes.
- */
- r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
- READ_WRITE_LOCK_LEVEL);
- if (r < 0)
- mg_complete(mg, false);
- else if (r)
- quiesce(mg, mg_update_metadata);
- else
- mg_update_metadata(ws);
- }
- }
- static void mg_full_copy(struct work_struct *ws)
- {
- struct dm_cache_migration *mg = ws_to_mg(ws);
- struct cache *cache = mg->cache;
- struct policy_work *op = mg->op;
- bool is_policy_promote = (op->op == POLICY_PROMOTE);
- if ((!is_policy_promote && !is_dirty(cache, op->cblock)) ||
- is_discarded_oblock(cache, op->oblock)) {
- mg_upgrade_lock(ws);
- return;
- }
- init_continuation(&mg->k, mg_upgrade_lock);
- copy(mg, is_policy_promote);
- }
- static void mg_copy(struct work_struct *ws)
- {
- struct dm_cache_migration *mg = ws_to_mg(ws);
- if (mg->overwrite_bio) {
- /*
- * No exclusive lock was held when we last checked if the bio
- * was optimisable. So we have to check again in case things
- * have changed (eg, the block may no longer be discarded).
- */
- if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
- /*
- * Fallback to a real full copy after doing some tidying up.
- */
- bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
- BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
- mg->overwrite_bio = NULL;
- inc_io_migrations(mg->cache);
- mg_full_copy(ws);
- return;
- }
- /*
- * It's safe to do this here, even though it's new data
- * because all IO has been locked out of the block.
- *
- * mg_lock_writes() already took READ_WRITE_LOCK_LEVEL
- * so _not_ using mg_upgrade_lock() as continutation.
- */
- overwrite(mg, mg_update_metadata_after_copy);
- } else
- mg_full_copy(ws);
- }
- static int mg_lock_writes(struct dm_cache_migration *mg)
- {
- int r;
- struct dm_cell_key_v2 key;
- struct cache *cache = mg->cache;
- struct dm_bio_prison_cell_v2 *prealloc;
- prealloc = alloc_prison_cell(cache);
- /*
- * Prevent writes to the block, but allow reads to continue.
- * Unless we're using an overwrite bio, in which case we lock
- * everything.
- */
- build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
- r = dm_cell_lock_v2(cache->prison, &key,
- mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
- prealloc, &mg->cell);
- if (r < 0) {
- free_prison_cell(cache, prealloc);
- mg_complete(mg, false);
- return r;
- }
- if (mg->cell != prealloc)
- free_prison_cell(cache, prealloc);
- if (r == 0)
- mg_copy(&mg->k.ws);
- else
- quiesce(mg, mg_copy);
- return 0;
- }
- static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
- {
- struct dm_cache_migration *mg;
- if (!background_work_begin(cache)) {
- policy_complete_background_work(cache->policy, op, false);
- return -EPERM;
- }
- mg = alloc_migration(cache);
- mg->op = op;
- mg->overwrite_bio = bio;
- if (!bio)
- inc_io_migrations(cache);
- return mg_lock_writes(mg);
- }
- /*----------------------------------------------------------------
- * invalidation processing
- *--------------------------------------------------------------*/
- static void invalidate_complete(struct dm_cache_migration *mg, bool success)
- {
- struct bio_list bios;
- struct cache *cache = mg->cache;
- bio_list_init(&bios);
- if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
- free_prison_cell(cache, mg->cell);
- if (!success && mg->overwrite_bio)
- bio_io_error(mg->overwrite_bio);
- free_migration(mg);
- defer_bios(cache, &bios);
- background_work_end(cache);
- }
- static void invalidate_completed(struct work_struct *ws)
- {
- struct dm_cache_migration *mg = ws_to_mg(ws);
- invalidate_complete(mg, !mg->k.input);
- }
- static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
- {
- int r = policy_invalidate_mapping(cache->policy, cblock);
- if (!r) {
- r = dm_cache_remove_mapping(cache->cmd, cblock);
- if (r) {
- DMERR_LIMIT("%s: invalidation failed; couldn't update on disk metadata",
- cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_remove_mapping", r);
- }
- } else if (r == -ENODATA) {
- /*
- * Harmless, already unmapped.
- */
- r = 0;
- } else
- DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache));
- return r;
- }
- static void invalidate_remove(struct work_struct *ws)
- {
- int r;
- struct dm_cache_migration *mg = ws_to_mg(ws);
- struct cache *cache = mg->cache;
- r = invalidate_cblock(cache, mg->invalidate_cblock);
- if (r) {
- invalidate_complete(mg, false);
- return;
- }
- init_continuation(&mg->k, invalidate_completed);
- continue_after_commit(&cache->committer, &mg->k);
- remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
- mg->overwrite_bio = NULL;
- schedule_commit(&cache->committer);
- }
- static int invalidate_lock(struct dm_cache_migration *mg)
- {
- int r;
- struct dm_cell_key_v2 key;
- struct cache *cache = mg->cache;
- struct dm_bio_prison_cell_v2 *prealloc;
- prealloc = alloc_prison_cell(cache);
- build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
- r = dm_cell_lock_v2(cache->prison, &key,
- READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
- if (r < 0) {
- free_prison_cell(cache, prealloc);
- invalidate_complete(mg, false);
- return r;
- }
- if (mg->cell != prealloc)
- free_prison_cell(cache, prealloc);
- if (r)
- quiesce(mg, invalidate_remove);
- else {
- /*
- * We can't call invalidate_remove() directly here because we
- * might still be in request context.
- */
- init_continuation(&mg->k, invalidate_remove);
- queue_work(cache->wq, &mg->k.ws);
- }
- return 0;
- }
- static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
- dm_oblock_t oblock, struct bio *bio)
- {
- struct dm_cache_migration *mg;
- if (!background_work_begin(cache))
- return -EPERM;
- mg = alloc_migration(cache);
- mg->overwrite_bio = bio;
- mg->invalidate_cblock = cblock;
- mg->invalidate_oblock = oblock;
- return invalidate_lock(mg);
- }
- /*----------------------------------------------------------------
- * bio processing
- *--------------------------------------------------------------*/
- enum busy {
- IDLE,
- BUSY
- };
- static enum busy spare_migration_bandwidth(struct cache *cache)
- {
- bool idle = dm_iot_idle_for(&cache->tracker, HZ);
- sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
- cache->sectors_per_block;
- if (idle && current_volume <= cache->migration_threshold)
- return IDLE;
- else
- return BUSY;
- }
- static void inc_hit_counter(struct cache *cache, struct bio *bio)
- {
- atomic_inc(bio_data_dir(bio) == READ ?
- &cache->stats.read_hit : &cache->stats.write_hit);
- }
- static void inc_miss_counter(struct cache *cache, struct bio *bio)
- {
- atomic_inc(bio_data_dir(bio) == READ ?
- &cache->stats.read_miss : &cache->stats.write_miss);
- }
- /*----------------------------------------------------------------*/
- static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
- bool *commit_needed)
- {
- int r, data_dir;
- bool rb, background_queued;
- dm_cblock_t cblock;
- *commit_needed = false;
- rb = bio_detain_shared(cache, block, bio);
- if (!rb) {
- /*
- * An exclusive lock is held for this block, so we have to
- * wait. We set the commit_needed flag so the current
- * transaction will be committed asap, allowing this lock
- * to be dropped.
- */
- *commit_needed = true;
- return DM_MAPIO_SUBMITTED;
- }
- data_dir = bio_data_dir(bio);
- if (optimisable_bio(cache, bio, block)) {
- struct policy_work *op = NULL;
- r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op);
- if (unlikely(r && r != -ENOENT)) {
- DMERR_LIMIT("%s: policy_lookup_with_work() failed with r = %d",
- cache_device_name(cache), r);
- bio_io_error(bio);
- return DM_MAPIO_SUBMITTED;
- }
- if (r == -ENOENT && op) {
- bio_drop_shared_lock(cache, bio);
- BUG_ON(op->op != POLICY_PROMOTE);
- mg_start(cache, op, bio);
- return DM_MAPIO_SUBMITTED;
- }
- } else {
- r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued);
- if (unlikely(r && r != -ENOENT)) {
- DMERR_LIMIT("%s: policy_lookup() failed with r = %d",
- cache_device_name(cache), r);
- bio_io_error(bio);
- return DM_MAPIO_SUBMITTED;
- }
- if (background_queued)
- wake_migration_worker(cache);
- }
- if (r == -ENOENT) {
- struct per_bio_data *pb = get_per_bio_data(bio);
- /*
- * Miss.
- */
- inc_miss_counter(cache, bio);
- if (pb->req_nr == 0) {
- accounted_begin(cache, bio);
- remap_to_origin_clear_discard(cache, bio, block);
- } else {
- /*
- * This is a duplicate writethrough io that is no
- * longer needed because the block has been demoted.
- */
- bio_endio(bio);
- return DM_MAPIO_SUBMITTED;
- }
- } else {
- /*
- * Hit.
- */
- inc_hit_counter(cache, bio);
- /*
- * Passthrough always maps to the origin, invalidating any
- * cache blocks that are written to.
- */
- if (passthrough_mode(cache)) {
- if (bio_data_dir(bio) == WRITE) {
- bio_drop_shared_lock(cache, bio);
- atomic_inc(&cache->stats.demotion);
- invalidate_start(cache, cblock, block, bio);
- } else
- remap_to_origin_clear_discard(cache, bio, block);
- } else {
- if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
- !is_dirty(cache, cblock)) {
- remap_to_origin_and_cache(cache, bio, block, cblock);
- accounted_begin(cache, bio);
- } else
- remap_to_cache_dirty(cache, bio, block, cblock);
- }
- }
- /*
- * dm core turns FUA requests into a separate payload and FLUSH req.
- */
- if (bio->bi_opf & REQ_FUA) {
- /*
- * issue_after_commit will call accounted_begin a second time. So
- * we call accounted_complete() to avoid double accounting.
- */
- accounted_complete(cache, bio);
- issue_after_commit(&cache->committer, bio);
- *commit_needed = true;
- return DM_MAPIO_SUBMITTED;
- }
- return DM_MAPIO_REMAPPED;
- }
- static bool process_bio(struct cache *cache, struct bio *bio)
- {
- bool commit_needed;
- if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
- dm_submit_bio_remap(bio, NULL);
- return commit_needed;
- }
- /*
- * A non-zero return indicates read_only or fail_io mode.
- */
- static int commit(struct cache *cache, bool clean_shutdown)
- {
- int r;
- if (get_cache_mode(cache) >= CM_READ_ONLY)
- return -EINVAL;
- atomic_inc(&cache->stats.commit_count);
- r = dm_cache_commit(cache->cmd, clean_shutdown);
- if (r)
- metadata_operation_failed(cache, "dm_cache_commit", r);
- return r;
- }
- /*
- * Used by the batcher.
- */
- static blk_status_t commit_op(void *context)
- {
- struct cache *cache = context;
- if (dm_cache_changed_this_transaction(cache->cmd))
- return errno_to_blk_status(commit(cache, false));
- return 0;
- }
- /*----------------------------------------------------------------*/
- static bool process_flush_bio(struct cache *cache, struct bio *bio)
- {
- struct per_bio_data *pb = get_per_bio_data(bio);
- if (!pb->req_nr)
- remap_to_origin(cache, bio);
- else
- remap_to_cache(cache, bio, 0);
- issue_after_commit(&cache->committer, bio);
- return true;
- }
- static bool process_discard_bio(struct cache *cache, struct bio *bio)
- {
- dm_dblock_t b, e;
- // FIXME: do we need to lock the region? Or can we just assume the
- // user wont be so foolish as to issue discard concurrently with
- // other IO?
- calc_discard_block_range(cache, bio, &b, &e);
- while (b != e) {
- set_discard(cache, b);
- b = to_dblock(from_dblock(b) + 1);
- }
- if (cache->features.discard_passdown) {
- remap_to_origin(cache, bio);
- dm_submit_bio_remap(bio, NULL);
- } else
- bio_endio(bio);
- return false;
- }
- static void process_deferred_bios(struct work_struct *ws)
- {
- struct cache *cache = container_of(ws, struct cache, deferred_bio_worker);
- bool commit_needed = false;
- struct bio_list bios;
- struct bio *bio;
- bio_list_init(&bios);
- spin_lock_irq(&cache->lock);
- bio_list_merge(&bios, &cache->deferred_bios);
- bio_list_init(&cache->deferred_bios);
- spin_unlock_irq(&cache->lock);
- while ((bio = bio_list_pop(&bios))) {
- if (bio->bi_opf & REQ_PREFLUSH)
- commit_needed = process_flush_bio(cache, bio) || commit_needed;
- else if (bio_op(bio) == REQ_OP_DISCARD)
- commit_needed = process_discard_bio(cache, bio) || commit_needed;
- else
- commit_needed = process_bio(cache, bio) || commit_needed;
- cond_resched();
- }
- if (commit_needed)
- schedule_commit(&cache->committer);
- }
- /*----------------------------------------------------------------
- * Main worker loop
- *--------------------------------------------------------------*/
- static void requeue_deferred_bios(struct cache *cache)
- {
- struct bio *bio;
- struct bio_list bios;
- bio_list_init(&bios);
- bio_list_merge(&bios, &cache->deferred_bios);
- bio_list_init(&cache->deferred_bios);
- while ((bio = bio_list_pop(&bios))) {
- bio->bi_status = BLK_STS_DM_REQUEUE;
- bio_endio(bio);
- cond_resched();
- }
- }
- /*
- * We want to commit periodically so that not too much
- * unwritten metadata builds up.
- */
- static void do_waker(struct work_struct *ws)
- {
- struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
- policy_tick(cache->policy, true);
- wake_migration_worker(cache);
- schedule_commit(&cache->committer);
- queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
- }
- static void check_migrations(struct work_struct *ws)
- {
- int r;
- struct policy_work *op;
- struct cache *cache = container_of(ws, struct cache, migration_worker);
- enum busy b;
- for (;;) {
- b = spare_migration_bandwidth(cache);
- r = policy_get_background_work(cache->policy, b == IDLE, &op);
- if (r == -ENODATA)
- break;
- if (r) {
- DMERR_LIMIT("%s: policy_background_work failed",
- cache_device_name(cache));
- break;
- }
- r = mg_start(cache, op, NULL);
- if (r)
- break;
- cond_resched();
- }
- }
- /*----------------------------------------------------------------
- * Target methods
- *--------------------------------------------------------------*/
- /*
- * This function gets called on the error paths of the constructor, so we
- * have to cope with a partially initialised struct.
- */
- static void destroy(struct cache *cache)
- {
- unsigned int i;
- mempool_exit(&cache->migration_pool);
- if (cache->prison)
- dm_bio_prison_destroy_v2(cache->prison);
- cancel_delayed_work_sync(&cache->waker);
- if (cache->wq)
- destroy_workqueue(cache->wq);
- if (cache->dirty_bitset)
- free_bitset(cache->dirty_bitset);
- if (cache->discard_bitset)
- free_bitset(cache->discard_bitset);
- if (cache->copier)
- dm_kcopyd_client_destroy(cache->copier);
- if (cache->cmd)
- dm_cache_metadata_close(cache->cmd);
- if (cache->metadata_dev)
- dm_put_device(cache->ti, cache->metadata_dev);
- if (cache->origin_dev)
- dm_put_device(cache->ti, cache->origin_dev);
- if (cache->cache_dev)
- dm_put_device(cache->ti, cache->cache_dev);
- if (cache->policy)
- dm_cache_policy_destroy(cache->policy);
- for (i = 0; i < cache->nr_ctr_args ; i++)
- kfree(cache->ctr_args[i]);
- kfree(cache->ctr_args);
- bioset_exit(&cache->bs);
- kfree(cache);
- }
- static void cache_dtr(struct dm_target *ti)
- {
- struct cache *cache = ti->private;
- destroy(cache);
- }
- static sector_t get_dev_size(struct dm_dev *dev)
- {
- return bdev_nr_sectors(dev->bdev);
- }
- /*----------------------------------------------------------------*/
- /*
- * Construct a cache device mapping.
- *
- * cache <metadata dev> <cache dev> <origin dev> <block size>
- * <#feature args> [<feature arg>]*
- * <policy> <#policy args> [<policy arg>]*
- *
- * metadata dev : fast device holding the persistent metadata
- * cache dev : fast device holding cached data blocks
- * origin dev : slow device holding original data blocks
- * block size : cache unit size in sectors
- *
- * #feature args : number of feature arguments passed
- * feature args : writethrough. (The default is writeback.)
- *
- * policy : the replacement policy to use
- * #policy args : an even number of policy arguments corresponding
- * to key/value pairs passed to the policy
- * policy args : key/value pairs passed to the policy
- * E.g. 'sequential_threshold 1024'
- * See cache-policies.txt for details.
- *
- * Optional feature arguments are:
- * writethrough : write through caching that prohibits cache block
- * content from being different from origin block content.
- * Without this argument, the default behaviour is to write
- * back cache block contents later for performance reasons,
- * so they may differ from the corresponding origin blocks.
- */
- struct cache_args {
- struct dm_target *ti;
- struct dm_dev *metadata_dev;
- struct dm_dev *cache_dev;
- sector_t cache_sectors;
- struct dm_dev *origin_dev;
- sector_t origin_sectors;
- uint32_t block_size;
- const char *policy_name;
- int policy_argc;
- const char **policy_argv;
- struct cache_features features;
- };
- static void destroy_cache_args(struct cache_args *ca)
- {
- if (ca->metadata_dev)
- dm_put_device(ca->ti, ca->metadata_dev);
- if (ca->cache_dev)
- dm_put_device(ca->ti, ca->cache_dev);
- if (ca->origin_dev)
- dm_put_device(ca->ti, ca->origin_dev);
- kfree(ca);
- }
- static bool at_least_one_arg(struct dm_arg_set *as, char **error)
- {
- if (!as->argc) {
- *error = "Insufficient args";
- return false;
- }
- return true;
- }
- static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
- char **error)
- {
- int r;
- sector_t metadata_dev_size;
- if (!at_least_one_arg(as, error))
- return -EINVAL;
- r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &ca->metadata_dev);
- if (r) {
- *error = "Error opening metadata device";
- return r;
- }
- metadata_dev_size = get_dev_size(ca->metadata_dev);
- if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
- DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
- ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS);
- return 0;
- }
- static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
- char **error)
- {
- int r;
- if (!at_least_one_arg(as, error))
- return -EINVAL;
- r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &ca->cache_dev);
- if (r) {
- *error = "Error opening cache device";
- return r;
- }
- ca->cache_sectors = get_dev_size(ca->cache_dev);
- return 0;
- }
- static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
- char **error)
- {
- int r;
- if (!at_least_one_arg(as, error))
- return -EINVAL;
- r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
- &ca->origin_dev);
- if (r) {
- *error = "Error opening origin device";
- return r;
- }
- ca->origin_sectors = get_dev_size(ca->origin_dev);
- if (ca->ti->len > ca->origin_sectors) {
- *error = "Device size larger than cached device";
- return -EINVAL;
- }
- return 0;
- }
- static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
- char **error)
- {
- unsigned long block_size;
- if (!at_least_one_arg(as, error))
- return -EINVAL;
- if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
- block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
- block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
- block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
- *error = "Invalid data block size";
- return -EINVAL;
- }
- if (block_size > ca->cache_sectors) {
- *error = "Data block size is larger than the cache device";
- return -EINVAL;
- }
- ca->block_size = block_size;
- return 0;
- }
- static void init_features(struct cache_features *cf)
- {
- cf->mode = CM_WRITE;
- cf->io_mode = CM_IO_WRITEBACK;
- cf->metadata_version = 1;
- cf->discard_passdown = true;
- }
- static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
- char **error)
- {
- static const struct dm_arg _args[] = {
- {0, 3, "Invalid number of cache feature arguments"},
- };
- int r, mode_ctr = 0;
- unsigned int argc;
- const char *arg;
- struct cache_features *cf = &ca->features;
- init_features(cf);
- r = dm_read_arg_group(_args, as, &argc, error);
- if (r)
- return -EINVAL;
- while (argc--) {
- arg = dm_shift_arg(as);
- if (!strcasecmp(arg, "writeback")) {
- cf->io_mode = CM_IO_WRITEBACK;
- mode_ctr++;
- }
- else if (!strcasecmp(arg, "writethrough")) {
- cf->io_mode = CM_IO_WRITETHROUGH;
- mode_ctr++;
- }
- else if (!strcasecmp(arg, "passthrough")) {
- cf->io_mode = CM_IO_PASSTHROUGH;
- mode_ctr++;
- }
- else if (!strcasecmp(arg, "metadata2"))
- cf->metadata_version = 2;
- else if (!strcasecmp(arg, "no_discard_passdown"))
- cf->discard_passdown = false;
- else {
- *error = "Unrecognised cache feature requested";
- return -EINVAL;
- }
- }
- if (mode_ctr > 1) {
- *error = "Duplicate cache io_mode features requested";
- return -EINVAL;
- }
- return 0;
- }
- static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
- char **error)
- {
- static const struct dm_arg _args[] = {
- {0, 1024, "Invalid number of policy arguments"},
- };
- int r;
- if (!at_least_one_arg(as, error))
- return -EINVAL;
- ca->policy_name = dm_shift_arg(as);
- r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
- if (r)
- return -EINVAL;
- ca->policy_argv = (const char **)as->argv;
- dm_consume_args(as, ca->policy_argc);
- return 0;
- }
- static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
- char **error)
- {
- int r;
- struct dm_arg_set as;
- as.argc = argc;
- as.argv = argv;
- r = parse_metadata_dev(ca, &as, error);
- if (r)
- return r;
- r = parse_cache_dev(ca, &as, error);
- if (r)
- return r;
- r = parse_origin_dev(ca, &as, error);
- if (r)
- return r;
- r = parse_block_size(ca, &as, error);
- if (r)
- return r;
- r = parse_features(ca, &as, error);
- if (r)
- return r;
- r = parse_policy(ca, &as, error);
- if (r)
- return r;
- return 0;
- }
- /*----------------------------------------------------------------*/
- static struct kmem_cache *migration_cache;
- #define NOT_CORE_OPTION 1
- static int process_config_option(struct cache *cache, const char *key, const char *value)
- {
- unsigned long tmp;
- if (!strcasecmp(key, "migration_threshold")) {
- if (kstrtoul(value, 10, &tmp))
- return -EINVAL;
- cache->migration_threshold = tmp;
- return 0;
- }
- return NOT_CORE_OPTION;
- }
- static int set_config_value(struct cache *cache, const char *key, const char *value)
- {
- int r = process_config_option(cache, key, value);
- if (r == NOT_CORE_OPTION)
- r = policy_set_config_value(cache->policy, key, value);
- if (r)
- DMWARN("bad config value for %s: %s", key, value);
- return r;
- }
- static int set_config_values(struct cache *cache, int argc, const char **argv)
- {
- int r = 0;
- if (argc & 1) {
- DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
- return -EINVAL;
- }
- while (argc) {
- r = set_config_value(cache, argv[0], argv[1]);
- if (r)
- break;
- argc -= 2;
- argv += 2;
- }
- return r;
- }
- static int create_cache_policy(struct cache *cache, struct cache_args *ca,
- char **error)
- {
- struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
- cache->cache_size,
- cache->origin_sectors,
- cache->sectors_per_block);
- if (IS_ERR(p)) {
- *error = "Error creating cache's policy";
- return PTR_ERR(p);
- }
- cache->policy = p;
- BUG_ON(!cache->policy);
- return 0;
- }
- /*
- * We want the discard block size to be at least the size of the cache
- * block size and have no more than 2^14 discard blocks across the origin.
- */
- #define MAX_DISCARD_BLOCKS (1 << 14)
- static bool too_many_discard_blocks(sector_t discard_block_size,
- sector_t origin_size)
- {
- (void) sector_div(origin_size, discard_block_size);
- return origin_size > MAX_DISCARD_BLOCKS;
- }
- static sector_t calculate_discard_block_size(sector_t cache_block_size,
- sector_t origin_size)
- {
- sector_t discard_block_size = cache_block_size;
- if (origin_size)
- while (too_many_discard_blocks(discard_block_size, origin_size))
- discard_block_size *= 2;
- return discard_block_size;
- }
- static void set_cache_size(struct cache *cache, dm_cblock_t size)
- {
- dm_block_t nr_blocks = from_cblock(size);
- if (nr_blocks > (1 << 20) && cache->cache_size != size)
- DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
- "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
- "Please consider increasing the cache block size to reduce the overall cache block count.",
- (unsigned long long) nr_blocks);
- cache->cache_size = size;
- }
- #define DEFAULT_MIGRATION_THRESHOLD 2048
- static int cache_create(struct cache_args *ca, struct cache **result)
- {
- int r = 0;
- char **error = &ca->ti->error;
- struct cache *cache;
- struct dm_target *ti = ca->ti;
- dm_block_t origin_blocks;
- struct dm_cache_metadata *cmd;
- bool may_format = ca->features.mode == CM_WRITE;
- cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
- return -ENOMEM;
- cache->ti = ca->ti;
- ti->private = cache;
- ti->accounts_remapped_io = true;
- ti->num_flush_bios = 2;
- ti->flush_supported = true;
- ti->num_discard_bios = 1;
- ti->discards_supported = true;
- ti->per_io_data_size = sizeof(struct per_bio_data);
- cache->features = ca->features;
- if (writethrough_mode(cache)) {
- /* Create bioset for writethrough bios issued to origin */
- r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0);
- if (r)
- goto bad;
- }
- cache->metadata_dev = ca->metadata_dev;
- cache->origin_dev = ca->origin_dev;
- cache->cache_dev = ca->cache_dev;
- ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
- origin_blocks = cache->origin_sectors = ca->origin_sectors;
- origin_blocks = block_div(origin_blocks, ca->block_size);
- cache->origin_blocks = to_oblock(origin_blocks);
- cache->sectors_per_block = ca->block_size;
- if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
- r = -EINVAL;
- goto bad;
- }
- if (ca->block_size & (ca->block_size - 1)) {
- dm_block_t cache_size = ca->cache_sectors;
- cache->sectors_per_block_shift = -1;
- cache_size = block_div(cache_size, ca->block_size);
- set_cache_size(cache, to_cblock(cache_size));
- } else {
- cache->sectors_per_block_shift = __ffs(ca->block_size);
- set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
- }
- r = create_cache_policy(cache, ca, error);
- if (r)
- goto bad;
- cache->policy_nr_args = ca->policy_argc;
- cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
- r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
- if (r) {
- *error = "Error setting cache policy's config values";
- goto bad;
- }
- cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
- ca->block_size, may_format,
- dm_cache_policy_get_hint_size(cache->policy),
- ca->features.metadata_version);
- if (IS_ERR(cmd)) {
- *error = "Error creating metadata object";
- r = PTR_ERR(cmd);
- goto bad;
- }
- cache->cmd = cmd;
- set_cache_mode(cache, CM_WRITE);
- if (get_cache_mode(cache) != CM_WRITE) {
- *error = "Unable to get write access to metadata, please check/repair metadata.";
- r = -EINVAL;
- goto bad;
- }
- if (passthrough_mode(cache)) {
- bool all_clean;
- r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
- if (r) {
- *error = "dm_cache_metadata_all_clean() failed";
- goto bad;
- }
- if (!all_clean) {
- *error = "Cannot enter passthrough mode unless all blocks are clean";
- r = -EINVAL;
- goto bad;
- }
- policy_allow_migrations(cache->policy, false);
- }
- spin_lock_init(&cache->lock);
- bio_list_init(&cache->deferred_bios);
- atomic_set(&cache->nr_allocated_migrations, 0);
- atomic_set(&cache->nr_io_migrations, 0);
- init_waitqueue_head(&cache->migration_wait);
- r = -ENOMEM;
- atomic_set(&cache->nr_dirty, 0);
- cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
- if (!cache->dirty_bitset) {
- *error = "could not allocate dirty bitset";
- goto bad;
- }
- clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
- cache->discard_block_size =
- calculate_discard_block_size(cache->sectors_per_block,
- cache->origin_sectors);
- cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
- cache->discard_block_size));
- cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
- if (!cache->discard_bitset) {
- *error = "could not allocate discard bitset";
- goto bad;
- }
- clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
- cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
- if (IS_ERR(cache->copier)) {
- *error = "could not create kcopyd client";
- r = PTR_ERR(cache->copier);
- goto bad;
- }
- cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0);
- if (!cache->wq) {
- *error = "could not create workqueue for metadata object";
- goto bad;
- }
- INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios);
- INIT_WORK(&cache->migration_worker, check_migrations);
- INIT_DELAYED_WORK(&cache->waker, do_waker);
- cache->prison = dm_bio_prison_create_v2(cache->wq);
- if (!cache->prison) {
- *error = "could not create bio prison";
- goto bad;
- }
- r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE,
- migration_cache);
- if (r) {
- *error = "Error creating cache's migration mempool";
- goto bad;
- }
- cache->need_tick_bio = true;
- cache->sized = false;
- cache->invalidate = false;
- cache->commit_requested = false;
- cache->loaded_mappings = false;
- cache->loaded_discards = false;
- load_stats(cache);
- atomic_set(&cache->stats.demotion, 0);
- atomic_set(&cache->stats.promotion, 0);
- atomic_set(&cache->stats.copies_avoided, 0);
- atomic_set(&cache->stats.cache_cell_clash, 0);
- atomic_set(&cache->stats.commit_count, 0);
- atomic_set(&cache->stats.discard_count, 0);
- spin_lock_init(&cache->invalidation_lock);
- INIT_LIST_HEAD(&cache->invalidation_requests);
- batcher_init(&cache->committer, commit_op, cache,
- issue_op, cache, cache->wq);
- dm_iot_init(&cache->tracker);
- init_rwsem(&cache->background_work_lock);
- prevent_background_work(cache);
- *result = cache;
- return 0;
- bad:
- destroy(cache);
- return r;
- }
- static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
- {
- unsigned int i;
- const char **copy;
- copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
- if (!copy)
- return -ENOMEM;
- for (i = 0; i < argc; i++) {
- copy[i] = kstrdup(argv[i], GFP_KERNEL);
- if (!copy[i]) {
- while (i--)
- kfree(copy[i]);
- kfree(copy);
- return -ENOMEM;
- }
- }
- cache->nr_ctr_args = argc;
- cache->ctr_args = copy;
- return 0;
- }
- static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
- {
- int r = -EINVAL;
- struct cache_args *ca;
- struct cache *cache = NULL;
- ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- if (!ca) {
- ti->error = "Error allocating memory for cache";
- return -ENOMEM;
- }
- ca->ti = ti;
- r = parse_cache_args(ca, argc, argv, &ti->error);
- if (r)
- goto out;
- r = cache_create(ca, &cache);
- if (r)
- goto out;
- r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
- if (r) {
- destroy(cache);
- goto out;
- }
- ti->private = cache;
- out:
- destroy_cache_args(ca);
- return r;
- }
- /*----------------------------------------------------------------*/
- static int cache_map(struct dm_target *ti, struct bio *bio)
- {
- struct cache *cache = ti->private;
- int r;
- bool commit_needed;
- dm_oblock_t block = get_bio_block(cache, bio);
- init_per_bio_data(bio);
- if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
- /*
- * This can only occur if the io goes to a partial block at
- * the end of the origin device. We don't cache these.
- * Just remap to the origin and carry on.
- */
- remap_to_origin(cache, bio);
- accounted_begin(cache, bio);
- return DM_MAPIO_REMAPPED;
- }
- if (discard_or_flush(bio)) {
- defer_bio(cache, bio);
- return DM_MAPIO_SUBMITTED;
- }
- r = map_bio(cache, bio, block, &commit_needed);
- if (commit_needed)
- schedule_commit(&cache->committer);
- return r;
- }
- static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
- {
- struct cache *cache = ti->private;
- unsigned long flags;
- struct per_bio_data *pb = get_per_bio_data(bio);
- if (pb->tick) {
- policy_tick(cache->policy, false);
- spin_lock_irqsave(&cache->lock, flags);
- cache->need_tick_bio = true;
- spin_unlock_irqrestore(&cache->lock, flags);
- }
- bio_drop_shared_lock(cache, bio);
- accounted_complete(cache, bio);
- return DM_ENDIO_DONE;
- }
- static int write_dirty_bitset(struct cache *cache)
- {
- int r;
- if (get_cache_mode(cache) >= CM_READ_ONLY)
- return -EINVAL;
- r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
- if (r)
- metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
- return r;
- }
- static int write_discard_bitset(struct cache *cache)
- {
- unsigned int i, r;
- if (get_cache_mode(cache) >= CM_READ_ONLY)
- return -EINVAL;
- r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
- cache->discard_nr_blocks);
- if (r) {
- DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r);
- return r;
- }
- for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
- r = dm_cache_set_discard(cache->cmd, to_dblock(i),
- is_discarded(cache, to_dblock(i)));
- if (r) {
- metadata_operation_failed(cache, "dm_cache_set_discard", r);
- return r;
- }
- }
- return 0;
- }
- static int write_hints(struct cache *cache)
- {
- int r;
- if (get_cache_mode(cache) >= CM_READ_ONLY)
- return -EINVAL;
- r = dm_cache_write_hints(cache->cmd, cache->policy);
- if (r) {
- metadata_operation_failed(cache, "dm_cache_write_hints", r);
- return r;
- }
- return 0;
- }
- /*
- * returns true on success
- */
- static bool sync_metadata(struct cache *cache)
- {
- int r1, r2, r3, r4;
- r1 = write_dirty_bitset(cache);
- if (r1)
- DMERR("%s: could not write dirty bitset", cache_device_name(cache));
- r2 = write_discard_bitset(cache);
- if (r2)
- DMERR("%s: could not write discard bitset", cache_device_name(cache));
- save_stats(cache);
- r3 = write_hints(cache);
- if (r3)
- DMERR("%s: could not write hints", cache_device_name(cache));
- /*
- * If writing the above metadata failed, we still commit, but don't
- * set the clean shutdown flag. This will effectively force every
- * dirty bit to be set on reload.
- */
- r4 = commit(cache, !r1 && !r2 && !r3);
- if (r4)
- DMERR("%s: could not write cache metadata", cache_device_name(cache));
- return !r1 && !r2 && !r3 && !r4;
- }
- static void cache_postsuspend(struct dm_target *ti)
- {
- struct cache *cache = ti->private;
- prevent_background_work(cache);
- BUG_ON(atomic_read(&cache->nr_io_migrations));
- cancel_delayed_work_sync(&cache->waker);
- drain_workqueue(cache->wq);
- WARN_ON(cache->tracker.in_flight);
- /*
- * If it's a flush suspend there won't be any deferred bios, so this
- * call is harmless.
- */
- requeue_deferred_bios(cache);
- if (get_cache_mode(cache) == CM_WRITE)
- (void) sync_metadata(cache);
- }
- static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
- bool dirty, uint32_t hint, bool hint_valid)
- {
- struct cache *cache = context;
- if (dirty) {
- set_bit(from_cblock(cblock), cache->dirty_bitset);
- atomic_inc(&cache->nr_dirty);
- } else
- clear_bit(from_cblock(cblock), cache->dirty_bitset);
- return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
- }
- /*
- * The discard block size in the on disk metadata is not
- * necessarily the same as we're currently using. So we have to
- * be careful to only set the discarded attribute if we know it
- * covers a complete block of the new size.
- */
- struct discard_load_info {
- struct cache *cache;
- /*
- * These blocks are sized using the on disk dblock size, rather
- * than the current one.
- */
- dm_block_t block_size;
- dm_block_t discard_begin, discard_end;
- };
- static void discard_load_info_init(struct cache *cache,
- struct discard_load_info *li)
- {
- li->cache = cache;
- li->discard_begin = li->discard_end = 0;
- }
- static void set_discard_range(struct discard_load_info *li)
- {
- sector_t b, e;
- if (li->discard_begin == li->discard_end)
- return;
- /*
- * Convert to sectors.
- */
- b = li->discard_begin * li->block_size;
- e = li->discard_end * li->block_size;
- /*
- * Then convert back to the current dblock size.
- */
- b = dm_sector_div_up(b, li->cache->discard_block_size);
- sector_div(e, li->cache->discard_block_size);
- /*
- * The origin may have shrunk, so we need to check we're still in
- * bounds.
- */
- if (e > from_dblock(li->cache->discard_nr_blocks))
- e = from_dblock(li->cache->discard_nr_blocks);
- for (; b < e; b++)
- set_discard(li->cache, to_dblock(b));
- }
- static int load_discard(void *context, sector_t discard_block_size,
- dm_dblock_t dblock, bool discard)
- {
- struct discard_load_info *li = context;
- li->block_size = discard_block_size;
- if (discard) {
- if (from_dblock(dblock) == li->discard_end)
- /*
- * We're already in a discard range, just extend it.
- */
- li->discard_end = li->discard_end + 1ULL;
- else {
- /*
- * Emit the old range and start a new one.
- */
- set_discard_range(li);
- li->discard_begin = from_dblock(dblock);
- li->discard_end = li->discard_begin + 1ULL;
- }
- } else {
- set_discard_range(li);
- li->discard_begin = li->discard_end = 0;
- }
- return 0;
- }
- static dm_cblock_t get_cache_dev_size(struct cache *cache)
- {
- sector_t size = get_dev_size(cache->cache_dev);
- (void) sector_div(size, cache->sectors_per_block);
- return to_cblock(size);
- }
- static bool can_resize(struct cache *cache, dm_cblock_t new_size)
- {
- if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
- if (cache->sized) {
- DMERR("%s: unable to extend cache due to missing cache table reload",
- cache_device_name(cache));
- return false;
- }
- }
- /*
- * We can't drop a dirty block when shrinking the cache.
- */
- while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
- new_size = to_cblock(from_cblock(new_size) + 1);
- if (is_dirty(cache, new_size)) {
- DMERR("%s: unable to shrink cache; cache block %llu is dirty",
- cache_device_name(cache),
- (unsigned long long) from_cblock(new_size));
- return false;
- }
- }
- return true;
- }
- static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
- {
- int r;
- r = dm_cache_resize(cache->cmd, new_size);
- if (r) {
- DMERR("%s: could not resize cache metadata", cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_resize", r);
- return r;
- }
- set_cache_size(cache, new_size);
- return 0;
- }
- static int cache_preresume(struct dm_target *ti)
- {
- int r = 0;
- struct cache *cache = ti->private;
- dm_cblock_t csize = get_cache_dev_size(cache);
- /*
- * Check to see if the cache has resized.
- */
- if (!cache->sized) {
- r = resize_cache_dev(cache, csize);
- if (r)
- return r;
- cache->sized = true;
- } else if (csize != cache->cache_size) {
- if (!can_resize(cache, csize))
- return -EINVAL;
- r = resize_cache_dev(cache, csize);
- if (r)
- return r;
- }
- if (!cache->loaded_mappings) {
- r = dm_cache_load_mappings(cache->cmd, cache->policy,
- load_mapping, cache);
- if (r) {
- DMERR("%s: could not load cache mappings", cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_load_mappings", r);
- return r;
- }
- cache->loaded_mappings = true;
- }
- if (!cache->loaded_discards) {
- struct discard_load_info li;
- /*
- * The discard bitset could have been resized, or the
- * discard block size changed. To be safe we start by
- * setting every dblock to not discarded.
- */
- clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
- discard_load_info_init(cache, &li);
- r = dm_cache_load_discards(cache->cmd, load_discard, &li);
- if (r) {
- DMERR("%s: could not load origin discards", cache_device_name(cache));
- metadata_operation_failed(cache, "dm_cache_load_discards", r);
- return r;
- }
- set_discard_range(&li);
- cache->loaded_discards = true;
- }
- return r;
- }
- static void cache_resume(struct dm_target *ti)
- {
- struct cache *cache = ti->private;
- cache->need_tick_bio = true;
- allow_background_work(cache);
- do_waker(&cache->waker.work);
- }
- static void emit_flags(struct cache *cache, char *result,
- unsigned int maxlen, ssize_t *sz_ptr)
- {
- ssize_t sz = *sz_ptr;
- struct cache_features *cf = &cache->features;
- unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
- DMEMIT("%u ", count);
- if (cf->metadata_version == 2)
- DMEMIT("metadata2 ");
- if (writethrough_mode(cache))
- DMEMIT("writethrough ");
- else if (passthrough_mode(cache))
- DMEMIT("passthrough ");
- else if (writeback_mode(cache))
- DMEMIT("writeback ");
- else {
- DMEMIT("unknown ");
- DMERR("%s: internal error: unknown io mode: %d",
- cache_device_name(cache), (int) cf->io_mode);
- }
- if (!cf->discard_passdown)
- DMEMIT("no_discard_passdown ");
- *sz_ptr = sz;
- }
- /*
- * Status format:
- *
- * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
- * <cache block size> <#used cache blocks>/<#total cache blocks>
- * <#read hits> <#read misses> <#write hits> <#write misses>
- * <#demotions> <#promotions> <#dirty>
- * <#features> <features>*
- * <#core args> <core args>
- * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
- */
- static void cache_status(struct dm_target *ti, status_type_t type,
- unsigned int status_flags, char *result, unsigned int maxlen)
- {
- int r = 0;
- unsigned int i;
- ssize_t sz = 0;
- dm_block_t nr_free_blocks_metadata = 0;
- dm_block_t nr_blocks_metadata = 0;
- char buf[BDEVNAME_SIZE];
- struct cache *cache = ti->private;
- dm_cblock_t residency;
- bool needs_check;
- switch (type) {
- case STATUSTYPE_INFO:
- if (get_cache_mode(cache) == CM_FAIL) {
- DMEMIT("Fail");
- break;
- }
- /* Commit to ensure statistics aren't out-of-date */
- if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
- (void) commit(cache, false);
- r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata);
- if (r) {
- DMERR("%s: dm_cache_get_free_metadata_block_count returned %d",
- cache_device_name(cache), r);
- goto err;
- }
- r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
- if (r) {
- DMERR("%s: dm_cache_get_metadata_dev_size returned %d",
- cache_device_name(cache), r);
- goto err;
- }
- residency = policy_residency(cache->policy);
- DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
- (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
- (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
- (unsigned long long)nr_blocks_metadata,
- (unsigned long long)cache->sectors_per_block,
- (unsigned long long) from_cblock(residency),
- (unsigned long long) from_cblock(cache->cache_size),
- (unsigned int) atomic_read(&cache->stats.read_hit),
- (unsigned int) atomic_read(&cache->stats.read_miss),
- (unsigned int) atomic_read(&cache->stats.write_hit),
- (unsigned int) atomic_read(&cache->stats.write_miss),
- (unsigned int) atomic_read(&cache->stats.demotion),
- (unsigned int) atomic_read(&cache->stats.promotion),
- (unsigned long) atomic_read(&cache->nr_dirty));
- emit_flags(cache, result, maxlen, &sz);
- DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
- DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
- if (sz < maxlen) {
- r = policy_emit_config_values(cache->policy, result, maxlen, &sz);
- if (r)
- DMERR("%s: policy_emit_config_values returned %d",
- cache_device_name(cache), r);
- }
- if (get_cache_mode(cache) == CM_READ_ONLY)
- DMEMIT("ro ");
- else
- DMEMIT("rw ");
- r = dm_cache_metadata_needs_check(cache->cmd, &needs_check);
- if (r || needs_check)
- DMEMIT("needs_check ");
- else
- DMEMIT("- ");
- break;
- case STATUSTYPE_TABLE:
- format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
- DMEMIT("%s ", buf);
- format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
- DMEMIT("%s ", buf);
- format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
- DMEMIT("%s", buf);
- for (i = 0; i < cache->nr_ctr_args - 1; i++)
- DMEMIT(" %s", cache->ctr_args[i]);
- if (cache->nr_ctr_args)
- DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
- break;
- case STATUSTYPE_IMA:
- DMEMIT_TARGET_NAME_VERSION(ti->type);
- if (get_cache_mode(cache) == CM_FAIL)
- DMEMIT(",metadata_mode=fail");
- else if (get_cache_mode(cache) == CM_READ_ONLY)
- DMEMIT(",metadata_mode=ro");
- else
- DMEMIT(",metadata_mode=rw");
- format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
- DMEMIT(",cache_metadata_device=%s", buf);
- format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
- DMEMIT(",cache_device=%s", buf);
- format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
- DMEMIT(",cache_origin_device=%s", buf);
- DMEMIT(",writethrough=%c", writethrough_mode(cache) ? 'y' : 'n');
- DMEMIT(",writeback=%c", writeback_mode(cache) ? 'y' : 'n');
- DMEMIT(",passthrough=%c", passthrough_mode(cache) ? 'y' : 'n');
- DMEMIT(",metadata2=%c", cache->features.metadata_version == 2 ? 'y' : 'n');
- DMEMIT(",no_discard_passdown=%c", cache->features.discard_passdown ? 'n' : 'y');
- DMEMIT(";");
- break;
- }
- return;
- err:
- DMEMIT("Error");
- }
- /*
- * Defines a range of cblocks, begin to (end - 1) are in the range. end is
- * the one-past-the-end value.
- */
- struct cblock_range {
- dm_cblock_t begin;
- dm_cblock_t end;
- };
- /*
- * A cache block range can take two forms:
- *
- * i) A single cblock, eg. '3456'
- * ii) A begin and end cblock with a dash between, eg. 123-234
- */
- static int parse_cblock_range(struct cache *cache, const char *str,
- struct cblock_range *result)
- {
- char dummy;
- uint64_t b, e;
- int r;
- /*
- * Try and parse form (ii) first.
- */
- r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
- if (r < 0)
- return r;
- if (r == 2) {
- result->begin = to_cblock(b);
- result->end = to_cblock(e);
- return 0;
- }
- /*
- * That didn't work, try form (i).
- */
- r = sscanf(str, "%llu%c", &b, &dummy);
- if (r < 0)
- return r;
- if (r == 1) {
- result->begin = to_cblock(b);
- result->end = to_cblock(from_cblock(result->begin) + 1u);
- return 0;
- }
- DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str);
- return -EINVAL;
- }
- static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
- {
- uint64_t b = from_cblock(range->begin);
- uint64_t e = from_cblock(range->end);
- uint64_t n = from_cblock(cache->cache_size);
- if (b >= n) {
- DMERR("%s: begin cblock out of range: %llu >= %llu",
- cache_device_name(cache), b, n);
- return -EINVAL;
- }
- if (e > n) {
- DMERR("%s: end cblock out of range: %llu > %llu",
- cache_device_name(cache), e, n);
- return -EINVAL;
- }
- if (b >= e) {
- DMERR("%s: invalid cblock range: %llu >= %llu",
- cache_device_name(cache), b, e);
- return -EINVAL;
- }
- return 0;
- }
- static inline dm_cblock_t cblock_succ(dm_cblock_t b)
- {
- return to_cblock(from_cblock(b) + 1);
- }
- static int request_invalidation(struct cache *cache, struct cblock_range *range)
- {
- int r = 0;
- /*
- * We don't need to do any locking here because we know we're in
- * passthrough mode. There's is potential for a race between an
- * invalidation triggered by an io and an invalidation message. This
- * is harmless, we must not worry if the policy call fails.
- */
- while (range->begin != range->end) {
- r = invalidate_cblock(cache, range->begin);
- if (r)
- return r;
- range->begin = cblock_succ(range->begin);
- }
- cache->commit_requested = true;
- return r;
- }
- static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
- const char **cblock_ranges)
- {
- int r = 0;
- unsigned int i;
- struct cblock_range range;
- if (!passthrough_mode(cache)) {
- DMERR("%s: cache has to be in passthrough mode for invalidation",
- cache_device_name(cache));
- return -EPERM;
- }
- for (i = 0; i < count; i++) {
- r = parse_cblock_range(cache, cblock_ranges[i], &range);
- if (r)
- break;
- r = validate_cblock_range(cache, &range);
- if (r)
- break;
- /*
- * Pass begin and end origin blocks to the worker and wake it.
- */
- r = request_invalidation(cache, &range);
- if (r)
- break;
- }
- return r;
- }
- /*
- * Supports
- * "<key> <value>"
- * and
- * "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
- *
- * The key migration_threshold is supported by the cache target core.
- */
- static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
- char *result, unsigned int maxlen)
- {
- struct cache *cache = ti->private;
- if (!argc)
- return -EINVAL;
- if (get_cache_mode(cache) >= CM_READ_ONLY) {
- DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode",
- cache_device_name(cache));
- return -EOPNOTSUPP;
- }
- if (!strcasecmp(argv[0], "invalidate_cblocks"))
- return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
- if (argc != 2)
- return -EINVAL;
- return set_config_value(cache, argv[0], argv[1]);
- }
- static int cache_iterate_devices(struct dm_target *ti,
- iterate_devices_callout_fn fn, void *data)
- {
- int r = 0;
- struct cache *cache = ti->private;
- r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
- if (!r)
- r = fn(ti, cache->origin_dev, 0, ti->len, data);
- return r;
- }
- /*
- * If discard_passdown was enabled verify that the origin device
- * supports discards. Disable discard_passdown if not.
- */
- static void disable_passdown_if_not_supported(struct cache *cache)
- {
- struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
- const char *reason = NULL;
- if (!cache->features.discard_passdown)
- return;
- if (!bdev_max_discard_sectors(origin_bdev))
- reason = "discard unsupported";
- else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
- reason = "max discard sectors smaller than a block";
- if (reason) {
- DMWARN("Origin device (%pg) %s: Disabling discard passdown.",
- origin_bdev, reason);
- cache->features.discard_passdown = false;
- }
- }
- static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
- {
- struct block_device *origin_bdev = cache->origin_dev->bdev;
- struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
- if (!cache->features.discard_passdown) {
- /* No passdown is done so setting own virtual limits */
- limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
- cache->origin_sectors);
- limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
- return;
- }
- /*
- * cache_iterate_devices() is stacking both origin and fast device limits
- * but discards aren't passed to fast device, so inherit origin's limits.
- */
- limits->max_discard_sectors = origin_limits->max_discard_sectors;
- limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
- limits->discard_granularity = origin_limits->discard_granularity;
- limits->discard_alignment = origin_limits->discard_alignment;
- limits->discard_misaligned = origin_limits->discard_misaligned;
- }
- static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
- {
- struct cache *cache = ti->private;
- uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
- /*
- * If the system-determined stacked limits are compatible with the
- * cache's blocksize (io_opt is a factor) do not override them.
- */
- if (io_opt_sectors < cache->sectors_per_block ||
- do_div(io_opt_sectors, cache->sectors_per_block)) {
- blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
- blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
- }
- disable_passdown_if_not_supported(cache);
- set_discard_limits(cache, limits);
- }
- /*----------------------------------------------------------------*/
- static struct target_type cache_target = {
- .name = "cache",
- .version = {2, 2, 0},
- .module = THIS_MODULE,
- .ctr = cache_ctr,
- .dtr = cache_dtr,
- .map = cache_map,
- .end_io = cache_end_io,
- .postsuspend = cache_postsuspend,
- .preresume = cache_preresume,
- .resume = cache_resume,
- .status = cache_status,
- .message = cache_message,
- .iterate_devices = cache_iterate_devices,
- .io_hints = cache_io_hints,
- };
- static int __init dm_cache_init(void)
- {
- int r;
- migration_cache = KMEM_CACHE(dm_cache_migration, 0);
- if (!migration_cache)
- return -ENOMEM;
- r = dm_register_target(&cache_target);
- if (r) {
- DMERR("cache target registration failed: %d", r);
- kmem_cache_destroy(migration_cache);
- return r;
- }
- return 0;
- }
- static void __exit dm_cache_exit(void)
- {
- dm_unregister_target(&cache_target);
- kmem_cache_destroy(migration_cache);
- }
- module_init(dm_cache_init);
- module_exit(dm_cache_exit);
- MODULE_DESCRIPTION(DM_NAME " cache target");
- MODULE_AUTHOR("Joe Thornber <[email protected]>");
- MODULE_LICENSE("GPL");
|