12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826 |
- // SPDX-License-Identifier: GPL-2.0+
- /*
- * Sleepable Read-Copy Update mechanism for mutual exclusion.
- *
- * Copyright (C) IBM Corporation, 2006
- * Copyright (C) Fujitsu, 2012
- *
- * Authors: Paul McKenney <[email protected]>
- * Lai Jiangshan <[email protected]>
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU/ *.txt
- *
- */
- #define pr_fmt(fmt) "rcu: " fmt
- #include <linux/export.h>
- #include <linux/mutex.h>
- #include <linux/percpu.h>
- #include <linux/preempt.h>
- #include <linux/rcupdate_wait.h>
- #include <linux/sched.h>
- #include <linux/smp.h>
- #include <linux/delay.h>
- #include <linux/module.h>
- #include <linux/slab.h>
- #include <linux/srcu.h>
- #include "rcu.h"
- #include "rcu_segcblist.h"
- /* Holdoff in nanoseconds for auto-expediting. */
- #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
- static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
- module_param(exp_holdoff, ulong, 0444);
- /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
- static ulong counter_wrap_check = (ULONG_MAX >> 2);
- module_param(counter_wrap_check, ulong, 0444);
- /*
- * Control conversion to SRCU_SIZE_BIG:
- * 0: Don't convert at all.
- * 1: Convert at init_srcu_struct() time.
- * 2: Convert when rcutorture invokes srcu_torture_stats_print().
- * 3: Decide at boot time based on system shape (default).
- * 0x1x: Convert when excessive contention encountered.
- */
- #define SRCU_SIZING_NONE 0
- #define SRCU_SIZING_INIT 1
- #define SRCU_SIZING_TORTURE 2
- #define SRCU_SIZING_AUTO 3
- #define SRCU_SIZING_CONTEND 0x10
- #define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
- #define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
- #define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
- #define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
- #define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
- static int convert_to_big = SRCU_SIZING_AUTO;
- module_param(convert_to_big, int, 0444);
- /* Number of CPUs to trigger init_srcu_struct()-time transition to big. */
- static int big_cpu_lim __read_mostly = 128;
- module_param(big_cpu_lim, int, 0444);
- /* Contention events per jiffy to initiate transition to big. */
- static int small_contention_lim __read_mostly = 100;
- module_param(small_contention_lim, int, 0444);
- /* Early-boot callback-management, so early that no lock is required! */
- static LIST_HEAD(srcu_boot_list);
- static bool __read_mostly srcu_init_done;
- static void srcu_invoke_callbacks(struct work_struct *work);
- static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
- static void process_srcu(struct work_struct *work);
- static void srcu_delay_timer(struct timer_list *t);
- /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
- #define spin_lock_rcu_node(p) \
- do { \
- spin_lock(&ACCESS_PRIVATE(p, lock)); \
- smp_mb__after_unlock_lock(); \
- } while (0)
- #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
- #define spin_lock_irq_rcu_node(p) \
- do { \
- spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
- smp_mb__after_unlock_lock(); \
- } while (0)
- #define spin_unlock_irq_rcu_node(p) \
- spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
- #define spin_lock_irqsave_rcu_node(p, flags) \
- do { \
- spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
- smp_mb__after_unlock_lock(); \
- } while (0)
- #define spin_trylock_irqsave_rcu_node(p, flags) \
- ({ \
- bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
- \
- if (___locked) \
- smp_mb__after_unlock_lock(); \
- ___locked; \
- })
- #define spin_unlock_irqrestore_rcu_node(p, flags) \
- spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
- /*
- * Initialize SRCU per-CPU data. Note that statically allocated
- * srcu_struct structures might already have srcu_read_lock() and
- * srcu_read_unlock() running against them. So if the is_static parameter
- * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
- */
- static void init_srcu_struct_data(struct srcu_struct *ssp)
- {
- int cpu;
- struct srcu_data *sdp;
- /*
- * Initialize the per-CPU srcu_data array, which feeds into the
- * leaves of the srcu_node tree.
- */
- WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
- ARRAY_SIZE(sdp->srcu_unlock_count));
- for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(ssp->sda, cpu);
- spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
- rcu_segcblist_init(&sdp->srcu_cblist);
- sdp->srcu_cblist_invoking = false;
- sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
- sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
- sdp->mynode = NULL;
- sdp->cpu = cpu;
- INIT_WORK(&sdp->work, srcu_invoke_callbacks);
- timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
- sdp->ssp = ssp;
- }
- }
- /* Invalid seq state, used during snp node initialization */
- #define SRCU_SNP_INIT_SEQ 0x2
- /*
- * Check whether sequence number corresponding to snp node,
- * is invalid.
- */
- static inline bool srcu_invl_snp_seq(unsigned long s)
- {
- return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
- }
- /*
- * Allocated and initialize SRCU combining tree. Returns @true if
- * allocation succeeded and @false otherwise.
- */
- static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
- {
- int cpu;
- int i;
- int level = 0;
- int levelspread[RCU_NUM_LVLS];
- struct srcu_data *sdp;
- struct srcu_node *snp;
- struct srcu_node *snp_first;
- /* Initialize geometry if it has not already been initialized. */
- rcu_init_geometry();
- ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
- if (!ssp->node)
- return false;
- /* Work out the overall tree geometry. */
- ssp->level[0] = &ssp->node[0];
- for (i = 1; i < rcu_num_lvls; i++)
- ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
- rcu_init_levelspread(levelspread, num_rcu_lvl);
- /* Each pass through this loop initializes one srcu_node structure. */
- srcu_for_each_node_breadth_first(ssp, snp) {
- spin_lock_init(&ACCESS_PRIVATE(snp, lock));
- WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
- ARRAY_SIZE(snp->srcu_data_have_cbs));
- for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
- snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
- snp->srcu_data_have_cbs[i] = 0;
- }
- snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
- snp->grplo = -1;
- snp->grphi = -1;
- if (snp == &ssp->node[0]) {
- /* Root node, special case. */
- snp->srcu_parent = NULL;
- continue;
- }
- /* Non-root node. */
- if (snp == ssp->level[level + 1])
- level++;
- snp->srcu_parent = ssp->level[level - 1] +
- (snp - ssp->level[level]) /
- levelspread[level - 1];
- }
- /*
- * Initialize the per-CPU srcu_data array, which feeds into the
- * leaves of the srcu_node tree.
- */
- level = rcu_num_lvls - 1;
- snp_first = ssp->level[level];
- for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(ssp->sda, cpu);
- sdp->mynode = &snp_first[cpu / levelspread[level]];
- for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
- if (snp->grplo < 0)
- snp->grplo = cpu;
- snp->grphi = cpu;
- }
- sdp->grpmask = 1UL << (cpu - sdp->mynode->grplo);
- }
- smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
- return true;
- }
- /*
- * Initialize non-compile-time initialized fields, including the
- * associated srcu_node and srcu_data structures. The is_static parameter
- * tells us that ->sda has already been wired up to srcu_data.
- */
- static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
- {
- ssp->srcu_size_state = SRCU_SIZE_SMALL;
- ssp->node = NULL;
- mutex_init(&ssp->srcu_cb_mutex);
- mutex_init(&ssp->srcu_gp_mutex);
- ssp->srcu_idx = 0;
- ssp->srcu_gp_seq = 0;
- ssp->srcu_barrier_seq = 0;
- mutex_init(&ssp->srcu_barrier_mutex);
- atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
- INIT_DELAYED_WORK(&ssp->work, process_srcu);
- ssp->sda_is_static = is_static;
- if (!is_static)
- ssp->sda = alloc_percpu(struct srcu_data);
- if (!ssp->sda)
- return -ENOMEM;
- init_srcu_struct_data(ssp);
- ssp->srcu_gp_seq_needed_exp = 0;
- ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
- if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
- if (!ssp->sda_is_static) {
- free_percpu(ssp->sda);
- ssp->sda = NULL;
- return -ENOMEM;
- }
- } else {
- WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
- }
- }
- smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
- return 0;
- }
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
- struct lock_class_key *key)
- {
- /* Don't re-initialize a lock while it is held. */
- debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
- lockdep_init_map(&ssp->dep_map, name, key, 0);
- spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
- return init_srcu_struct_fields(ssp, false);
- }
- EXPORT_SYMBOL_GPL(__init_srcu_struct);
- #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
- /**
- * init_srcu_struct - initialize a sleep-RCU structure
- * @ssp: structure to initialize.
- *
- * Must invoke this on a given srcu_struct before passing that srcu_struct
- * to any other function. Each srcu_struct represents a separate domain
- * of SRCU protection.
- */
- int init_srcu_struct(struct srcu_struct *ssp)
- {
- spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
- return init_srcu_struct_fields(ssp, false);
- }
- EXPORT_SYMBOL_GPL(init_srcu_struct);
- #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
- /*
- * Initiate a transition to SRCU_SIZE_BIG with lock held.
- */
- static void __srcu_transition_to_big(struct srcu_struct *ssp)
- {
- lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
- smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
- }
- /*
- * Initiate an idempotent transition to SRCU_SIZE_BIG.
- */
- static void srcu_transition_to_big(struct srcu_struct *ssp)
- {
- unsigned long flags;
- /* Double-checked locking on ->srcu_size-state. */
- if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
- return;
- spin_lock_irqsave_rcu_node(ssp, flags);
- if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
- spin_unlock_irqrestore_rcu_node(ssp, flags);
- return;
- }
- __srcu_transition_to_big(ssp);
- spin_unlock_irqrestore_rcu_node(ssp, flags);
- }
- /*
- * Check to see if the just-encountered contention event justifies
- * a transition to SRCU_SIZE_BIG.
- */
- static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
- {
- unsigned long j;
- if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
- return;
- j = jiffies;
- if (ssp->srcu_size_jiffies != j) {
- ssp->srcu_size_jiffies = j;
- ssp->srcu_n_lock_retries = 0;
- }
- if (++ssp->srcu_n_lock_retries <= small_contention_lim)
- return;
- __srcu_transition_to_big(ssp);
- }
- /*
- * Acquire the specified srcu_data structure's ->lock, but check for
- * excessive contention, which results in initiation of a transition
- * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
- * parameter permits this.
- */
- static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
- {
- struct srcu_struct *ssp = sdp->ssp;
- if (spin_trylock_irqsave_rcu_node(sdp, *flags))
- return;
- spin_lock_irqsave_rcu_node(ssp, *flags);
- spin_lock_irqsave_check_contention(ssp);
- spin_unlock_irqrestore_rcu_node(ssp, *flags);
- spin_lock_irqsave_rcu_node(sdp, *flags);
- }
- /*
- * Acquire the specified srcu_struct structure's ->lock, but check for
- * excessive contention, which results in initiation of a transition
- * to SRCU_SIZE_BIG. But only if the srcutree.convert_to_big module
- * parameter permits this.
- */
- static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
- {
- if (spin_trylock_irqsave_rcu_node(ssp, *flags))
- return;
- spin_lock_irqsave_rcu_node(ssp, *flags);
- spin_lock_irqsave_check_contention(ssp);
- }
- /*
- * First-use initialization of statically allocated srcu_struct
- * structure. Wiring up the combining tree is more than can be
- * done with compile-time initialization, so this check is added
- * to each update-side SRCU primitive. Use ssp->lock, which -is-
- * compile-time initialized, to resolve races involving multiple
- * CPUs trying to garner first-use privileges.
- */
- static void check_init_srcu_struct(struct srcu_struct *ssp)
- {
- unsigned long flags;
- /* The smp_load_acquire() pairs with the smp_store_release(). */
- if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
- return; /* Already initialized. */
- spin_lock_irqsave_rcu_node(ssp, flags);
- if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
- spin_unlock_irqrestore_rcu_node(ssp, flags);
- return;
- }
- init_srcu_struct_fields(ssp, true);
- spin_unlock_irqrestore_rcu_node(ssp, flags);
- }
- /*
- * Returns approximate total of the readers' ->srcu_lock_count[] values
- * for the rank of per-CPU counters specified by idx.
- */
- static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
- {
- int cpu;
- unsigned long sum = 0;
- for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
- sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
- }
- return sum;
- }
- /*
- * Returns approximate total of the readers' ->srcu_unlock_count[] values
- * for the rank of per-CPU counters specified by idx.
- */
- static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
- {
- int cpu;
- unsigned long sum = 0;
- for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
- sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
- }
- return sum;
- }
- /*
- * Return true if the number of pre-existing readers is determined to
- * be zero.
- */
- static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
- {
- unsigned long unlocks;
- unlocks = srcu_readers_unlock_idx(ssp, idx);
- /*
- * Make sure that a lock is always counted if the corresponding
- * unlock is counted. Needs to be a smp_mb() as the read side may
- * contain a read from a variable that is written to before the
- * synchronize_srcu() in the write side. In this case smp_mb()s
- * A and B act like the store buffering pattern.
- *
- * This smp_mb() also pairs with smp_mb() C to prevent accesses
- * after the synchronize_srcu() from being executed before the
- * grace period ends.
- */
- smp_mb(); /* A */
- /*
- * If the locks are the same as the unlocks, then there must have
- * been no readers on this index at some time in between. This does
- * not mean that there are no more readers, as one could have read
- * the current index but not have incremented the lock counter yet.
- *
- * So suppose that the updater is preempted here for so long
- * that more than ULONG_MAX non-nested readers come and go in
- * the meantime. It turns out that this cannot result in overflow
- * because if a reader modifies its unlock count after we read it
- * above, then that reader's next load of ->srcu_idx is guaranteed
- * to get the new value, which will cause it to operate on the
- * other bank of counters, where it cannot contribute to the
- * overflow of these counters. This means that there is a maximum
- * of 2*NR_CPUS increments, which cannot overflow given current
- * systems, especially not on 64-bit systems.
- *
- * OK, how about nesting? This does impose a limit on nesting
- * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
- * especially on 64-bit systems.
- */
- return srcu_readers_lock_idx(ssp, idx) == unlocks;
- }
- /**
- * srcu_readers_active - returns true if there are readers. and false
- * otherwise
- * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
- *
- * Note that this is not an atomic primitive, and can therefore suffer
- * severe errors when invoked on an active srcu_struct. That said, it
- * can be useful as an error check at cleanup time.
- */
- static bool srcu_readers_active(struct srcu_struct *ssp)
- {
- int cpu;
- unsigned long sum = 0;
- for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
- sum += READ_ONCE(cpuc->srcu_lock_count[0]);
- sum += READ_ONCE(cpuc->srcu_lock_count[1]);
- sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
- sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
- }
- return sum;
- }
- /*
- * We use an adaptive strategy for synchronize_srcu() and especially for
- * synchronize_srcu_expedited(). We spin for a fixed time period
- * (defined below, boot time configurable) to allow SRCU readers to exit
- * their read-side critical sections. If there are still some readers
- * after one jiffy, we repeatedly block for one jiffy time periods.
- * The blocking time is increased as the grace-period age increases,
- * with max blocking time capped at 10 jiffies.
- */
- #define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
- static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
- module_param(srcu_retry_check_delay, ulong, 0444);
- #define SRCU_INTERVAL 1 // Base delay if no expedited GPs pending.
- #define SRCU_MAX_INTERVAL 10 // Maximum incremental delay from slow readers.
- #define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL // Lowmark on default per-GP-phase
- // no-delay instances.
- #define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL // Highmark on default per-GP-phase
- // no-delay instances.
- #define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
- #define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
- #define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
- // per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
- // one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
- // called from process_srcu().
- #define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
- (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
- // Maximum per-GP-phase consecutive no-delay instances.
- #define SRCU_DEFAULT_MAX_NODELAY_PHASE \
- SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
- SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
- SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
- static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
- module_param(srcu_max_nodelay_phase, ulong, 0444);
- // Maximum consecutive no-delay instances.
- #define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
- SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
- static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
- module_param(srcu_max_nodelay, ulong, 0444);
- /*
- * Return grace-period delay, zero if there are expedited grace
- * periods pending, SRCU_INTERVAL otherwise.
- */
- static unsigned long srcu_get_delay(struct srcu_struct *ssp)
- {
- unsigned long gpstart;
- unsigned long j;
- unsigned long jbase = SRCU_INTERVAL;
- if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
- jbase = 0;
- if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
- j = jiffies - 1;
- gpstart = READ_ONCE(ssp->srcu_gp_start);
- if (time_after(j, gpstart))
- jbase += j - gpstart;
- if (!jbase) {
- WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
- if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
- jbase = 1;
- }
- }
- return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
- }
- /**
- * cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @ssp: structure to clean up.
- *
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory.
- */
- void cleanup_srcu_struct(struct srcu_struct *ssp)
- {
- int cpu;
- if (WARN_ON(!srcu_get_delay(ssp)))
- return; /* Just leak it! */
- if (WARN_ON(srcu_readers_active(ssp)))
- return; /* Just leak it! */
- flush_delayed_work(&ssp->work);
- for_each_possible_cpu(cpu) {
- struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
- del_timer_sync(&sdp->delay_work);
- flush_work(&sdp->work);
- if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
- return; /* Forgot srcu_barrier(), so just leak it! */
- }
- if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
- WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
- WARN_ON(srcu_readers_active(ssp))) {
- pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
- __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
- rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
- return; /* Caller forgot to stop doing call_srcu()? */
- }
- if (!ssp->sda_is_static) {
- free_percpu(ssp->sda);
- ssp->sda = NULL;
- }
- kfree(ssp->node);
- ssp->node = NULL;
- ssp->srcu_size_state = SRCU_SIZE_SMALL;
- }
- EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
- /*
- * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.
- * Returns an index that must be passed to the matching srcu_read_unlock().
- */
- int __srcu_read_lock(struct srcu_struct *ssp)
- {
- int idx;
- idx = READ_ONCE(ssp->srcu_idx) & 0x1;
- this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
- smp_mb(); /* B */ /* Avoid leaking the critical section. */
- return idx;
- }
- EXPORT_SYMBOL_GPL(__srcu_read_lock);
- /*
- * Removes the count for the old reader from the appropriate per-CPU
- * element of the srcu_struct. Note that this may well be a different
- * CPU than that which was incremented by the corresponding srcu_read_lock().
- */
- void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
- {
- smp_mb(); /* C */ /* Avoid leaking the critical section. */
- this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
- }
- EXPORT_SYMBOL_GPL(__srcu_read_unlock);
- /*
- * Start an SRCU grace period.
- */
- static void srcu_gp_start(struct srcu_struct *ssp)
- {
- struct srcu_data *sdp;
- int state;
- if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
- sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
- else
- sdp = this_cpu_ptr(ssp->sda);
- lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
- WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
- spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&ssp->srcu_gp_seq));
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&ssp->srcu_gp_seq));
- spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
- WRITE_ONCE(ssp->srcu_gp_start, jiffies);
- WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
- smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
- rcu_seq_start(&ssp->srcu_gp_seq);
- state = rcu_seq_state(ssp->srcu_gp_seq);
- WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
- }
- static void srcu_delay_timer(struct timer_list *t)
- {
- struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
- queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
- }
- static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
- unsigned long delay)
- {
- if (!delay) {
- queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
- return;
- }
- timer_reduce(&sdp->delay_work, jiffies + delay);
- }
- /*
- * Schedule callback invocation for the specified srcu_data structure,
- * if possible, on the corresponding CPU.
- */
- static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
- {
- srcu_queue_delayed_work_on(sdp, delay);
- }
- /*
- * Schedule callback invocation for all srcu_data structures associated
- * with the specified srcu_node structure that have callbacks for the
- * just-completed grace period, the one corresponding to idx. If possible,
- * schedule this invocation on the corresponding CPUs.
- */
- static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
- unsigned long mask, unsigned long delay)
- {
- int cpu;
- for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
- if (!(mask & (1UL << (cpu - snp->grplo))))
- continue;
- srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
- }
- }
- /*
- * Note the end of an SRCU grace period. Initiates callback invocation
- * and starts a new grace period if needed.
- *
- * The ->srcu_cb_mutex acquisition does not protect any data, but
- * instead prevents more than one grace period from starting while we
- * are initiating callback invocation. This allows the ->srcu_have_cbs[]
- * array to have a finite number of elements.
- */
- static void srcu_gp_end(struct srcu_struct *ssp)
- {
- unsigned long cbdelay = 1;
- bool cbs;
- bool last_lvl;
- int cpu;
- unsigned long flags;
- unsigned long gpseq;
- int idx;
- unsigned long mask;
- struct srcu_data *sdp;
- unsigned long sgsne;
- struct srcu_node *snp;
- int ss_state;
- /* Prevent more than one additional grace period. */
- mutex_lock(&ssp->srcu_cb_mutex);
- /* End the current grace period. */
- spin_lock_irq_rcu_node(ssp);
- idx = rcu_seq_state(ssp->srcu_gp_seq);
- WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
- if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
- cbdelay = 0;
- WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
- rcu_seq_end(&ssp->srcu_gp_seq);
- gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
- if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
- WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
- spin_unlock_irq_rcu_node(ssp);
- mutex_unlock(&ssp->srcu_gp_mutex);
- /* A new grace period can start at this point. But only one. */
- /* Initiate callback invocation as needed. */
- ss_state = smp_load_acquire(&ssp->srcu_size_state);
- if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
- srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
- cbdelay);
- } else {
- idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
- srcu_for_each_node_breadth_first(ssp, snp) {
- spin_lock_irq_rcu_node(snp);
- cbs = false;
- last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
- if (last_lvl)
- cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
- snp->srcu_have_cbs[idx] = gpseq;
- rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
- sgsne = snp->srcu_gp_seq_needed_exp;
- if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
- WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
- if (ss_state < SRCU_SIZE_BIG)
- mask = ~0;
- else
- mask = snp->srcu_data_have_cbs[idx];
- snp->srcu_data_have_cbs[idx] = 0;
- spin_unlock_irq_rcu_node(snp);
- if (cbs)
- srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
- }
- }
- /* Occasionally prevent srcu_data counter wrap. */
- if (!(gpseq & counter_wrap_check))
- for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(ssp->sda, cpu);
- spin_lock_irqsave_rcu_node(sdp, flags);
- if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
- sdp->srcu_gp_seq_needed = gpseq;
- if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
- sdp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- }
- /* Callback initiation done, allow grace periods after next. */
- mutex_unlock(&ssp->srcu_cb_mutex);
- /* Start a new grace period if needed. */
- spin_lock_irq_rcu_node(ssp);
- gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
- if (!rcu_seq_state(gpseq) &&
- ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
- srcu_gp_start(ssp);
- spin_unlock_irq_rcu_node(ssp);
- srcu_reschedule(ssp, 0);
- } else {
- spin_unlock_irq_rcu_node(ssp);
- }
- /* Transition to big if needed. */
- if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
- if (ss_state == SRCU_SIZE_ALLOC)
- init_srcu_struct_nodes(ssp, GFP_KERNEL);
- else
- smp_store_release(&ssp->srcu_size_state, ss_state + 1);
- }
- }
- /*
- * Funnel-locking scheme to scalably mediate many concurrent expedited
- * grace-period requests. This function is invoked for the first known
- * expedited request for a grace period that has already been requested,
- * but without expediting. To start a completely new grace period,
- * whether expedited or not, use srcu_funnel_gp_start() instead.
- */
- static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
- unsigned long s)
- {
- unsigned long flags;
- unsigned long sgsne;
- if (snp)
- for (; snp != NULL; snp = snp->srcu_parent) {
- sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
- if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
- (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
- return;
- spin_lock_irqsave_rcu_node(snp, flags);
- sgsne = snp->srcu_gp_seq_needed_exp;
- if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
- spin_unlock_irqrestore_rcu_node(snp, flags);
- return;
- }
- WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
- spin_unlock_irqrestore_rcu_node(snp, flags);
- }
- spin_lock_irqsave_ssp_contention(ssp, &flags);
- if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
- WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
- spin_unlock_irqrestore_rcu_node(ssp, flags);
- }
- /*
- * Funnel-locking scheme to scalably mediate many concurrent grace-period
- * requests. The winner has to do the work of actually starting grace
- * period s. Losers must either ensure that their desired grace-period
- * number is recorded on at least their leaf srcu_node structure, or they
- * must take steps to invoke their own callbacks.
- *
- * Note that this function also does the work of srcu_funnel_exp_start(),
- * in some cases by directly invoking it.
- */
- static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
- unsigned long s, bool do_norm)
- {
- unsigned long flags;
- int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
- unsigned long sgsne;
- struct srcu_node *snp;
- struct srcu_node *snp_leaf;
- unsigned long snp_seq;
- /* Ensure that snp node tree is fully initialized before traversing it */
- if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
- snp_leaf = NULL;
- else
- snp_leaf = sdp->mynode;
- if (snp_leaf)
- /* Each pass through the loop does one level of the srcu_node tree. */
- for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
- return; /* GP already done and CBs recorded. */
- spin_lock_irqsave_rcu_node(snp, flags);
- snp_seq = snp->srcu_have_cbs[idx];
- if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
- if (snp == snp_leaf && snp_seq == s)
- snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
- spin_unlock_irqrestore_rcu_node(snp, flags);
- if (snp == snp_leaf && snp_seq != s) {
- srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
- return;
- }
- if (!do_norm)
- srcu_funnel_exp_start(ssp, snp, s);
- return;
- }
- snp->srcu_have_cbs[idx] = s;
- if (snp == snp_leaf)
- snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
- sgsne = snp->srcu_gp_seq_needed_exp;
- if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
- WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
- spin_unlock_irqrestore_rcu_node(snp, flags);
- }
- /* Top of tree, must ensure the grace period will be started. */
- spin_lock_irqsave_ssp_contention(ssp, &flags);
- if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
- /*
- * Record need for grace period s. Pair with load
- * acquire setting up for initialization.
- */
- smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
- }
- if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
- WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
- /* If grace period not already done and none in progress, start it. */
- if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
- rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
- WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
- srcu_gp_start(ssp);
- // And how can that list_add() in the "else" clause
- // possibly be safe for concurrent execution? Well,
- // it isn't. And it does not have to be. After all, it
- // can only be executed during early boot when there is only
- // the one boot CPU running with interrupts still disabled.
- if (likely(srcu_init_done))
- queue_delayed_work(rcu_gp_wq, &ssp->work,
- !!srcu_get_delay(ssp));
- else if (list_empty(&ssp->work.work.entry))
- list_add(&ssp->work.work.entry, &srcu_boot_list);
- }
- spin_unlock_irqrestore_rcu_node(ssp, flags);
- }
- /*
- * Wait until all readers counted by array index idx complete, but
- * loop an additional time if there is an expedited grace period pending.
- * The caller must ensure that ->srcu_idx is not changed while checking.
- */
- static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
- {
- unsigned long curdelay;
- curdelay = !srcu_get_delay(ssp);
- for (;;) {
- if (srcu_readers_active_idx_check(ssp, idx))
- return true;
- if ((--trycount + curdelay) <= 0)
- return false;
- udelay(srcu_retry_check_delay);
- }
- }
- /*
- * Increment the ->srcu_idx counter so that future SRCU readers will
- * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
- * us to wait for pre-existing readers in a starvation-free manner.
- */
- static void srcu_flip(struct srcu_struct *ssp)
- {
- /*
- * Ensure that if this updater saw a given reader's increment
- * from __srcu_read_lock(), that reader was using an old value
- * of ->srcu_idx. Also ensure that if a given reader sees the
- * new value of ->srcu_idx, this updater's earlier scans cannot
- * have seen that reader's increments (which is OK, because this
- * grace period need not wait on that reader).
- */
- smp_mb(); /* E */ /* Pairs with B and C. */
- WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
- /*
- * Ensure that if the updater misses an __srcu_read_unlock()
- * increment, that task's next __srcu_read_lock() will see the
- * above counter update. Note that both this memory barrier
- * and the one in srcu_readers_active_idx_check() provide the
- * guarantee for __srcu_read_lock().
- */
- smp_mb(); /* D */ /* Pairs with C. */
- }
- /*
- * If SRCU is likely idle, return true, otherwise return false.
- *
- * Note that it is OK for several current from-idle requests for a new
- * grace period from idle to specify expediting because they will all end
- * up requesting the same grace period anyhow. So no loss.
- *
- * Note also that if any CPU (including the current one) is still invoking
- * callbacks, this function will nevertheless say "idle". This is not
- * ideal, but the overhead of checking all CPUs' callback lists is even
- * less ideal, especially on large systems. Furthermore, the wakeup
- * can happen before the callback is fully removed, so we have no choice
- * but to accept this type of error.
- *
- * This function is also subject to counter-wrap errors, but let's face
- * it, if this function was preempted for enough time for the counters
- * to wrap, it really doesn't matter whether or not we expedite the grace
- * period. The extra overhead of a needlessly expedited grace period is
- * negligible when amortized over that time period, and the extra latency
- * of a needlessly non-expedited grace period is similarly negligible.
- */
- static bool srcu_might_be_idle(struct srcu_struct *ssp)
- {
- unsigned long curseq;
- unsigned long flags;
- struct srcu_data *sdp;
- unsigned long t;
- unsigned long tlast;
- check_init_srcu_struct(ssp);
- /* If the local srcu_data structure has callbacks, not idle. */
- sdp = raw_cpu_ptr(ssp->sda);
- spin_lock_irqsave_rcu_node(sdp, flags);
- if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- return false; /* Callbacks already present, so not idle. */
- }
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- /*
- * No local callbacks, so probabilistically probe global state.
- * Exact information would require acquiring locks, which would
- * kill scalability, hence the probabilistic nature of the probe.
- */
- /* First, see if enough time has passed since the last GP. */
- t = ktime_get_mono_fast_ns();
- tlast = READ_ONCE(ssp->srcu_last_gp_end);
- if (exp_holdoff == 0 ||
- time_in_range_open(t, tlast, tlast + exp_holdoff))
- return false; /* Too soon after last GP. */
- /* Next, check for probable idleness. */
- curseq = rcu_seq_current(&ssp->srcu_gp_seq);
- smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
- if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
- return false; /* Grace period in progress, so not idle. */
- smp_mb(); /* Order ->srcu_gp_seq with prior access. */
- if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
- return false; /* GP # changed, so not idle. */
- return true; /* With reasonable probability, idle! */
- }
- /*
- * SRCU callback function to leak a callback.
- */
- static void srcu_leak_callback(struct rcu_head *rhp)
- {
- }
- /*
- * Start an SRCU grace period, and also queue the callback if non-NULL.
- */
- static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
- struct rcu_head *rhp, bool do_norm)
- {
- unsigned long flags;
- int idx;
- bool needexp = false;
- bool needgp = false;
- unsigned long s;
- struct srcu_data *sdp;
- struct srcu_node *sdp_mynode;
- int ss_state;
- check_init_srcu_struct(ssp);
- idx = srcu_read_lock(ssp);
- ss_state = smp_load_acquire(&ssp->srcu_size_state);
- if (ss_state < SRCU_SIZE_WAIT_CALL)
- sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
- else
- sdp = raw_cpu_ptr(ssp->sda);
- spin_lock_irqsave_sdp_contention(sdp, &flags);
- if (rhp)
- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&ssp->srcu_gp_seq));
- s = rcu_seq_snap(&ssp->srcu_gp_seq);
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
- sdp->srcu_gp_seq_needed = s;
- needgp = true;
- }
- if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
- sdp->srcu_gp_seq_needed_exp = s;
- needexp = true;
- }
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- /* Ensure that snp node tree is fully initialized before traversing it */
- if (ss_state < SRCU_SIZE_WAIT_BARRIER)
- sdp_mynode = NULL;
- else
- sdp_mynode = sdp->mynode;
- if (needgp)
- srcu_funnel_gp_start(ssp, sdp, s, do_norm);
- else if (needexp)
- srcu_funnel_exp_start(ssp, sdp_mynode, s);
- srcu_read_unlock(ssp, idx);
- return s;
- }
- /*
- * Enqueue an SRCU callback on the srcu_data structure associated with
- * the current CPU and the specified srcu_struct structure, initiating
- * grace-period processing if it is not already running.
- *
- * Note that all CPUs must agree that the grace period extended beyond
- * all pre-existing SRCU read-side critical section. On systems with
- * more than one CPU, this means that when "func()" is invoked, each CPU
- * is guaranteed to have executed a full memory barrier since the end of
- * its last corresponding SRCU read-side critical section whose beginning
- * preceded the call to call_srcu(). It also means that each CPU executing
- * an SRCU read-side critical section that continues beyond the start of
- * "func()" must have executed a memory barrier after the call_srcu()
- * but before the beginning of that SRCU read-side critical section.
- * Note that these guarantees include CPUs that are offline, idle, or
- * executing in user mode, as well as CPUs that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
- * resulting SRCU callback function "func()", then both CPU A and CPU
- * B are guaranteed to execute a full memory barrier during the time
- * interval between the call to call_srcu() and the invocation of "func()".
- * This guarantee applies even if CPU A and CPU B are the same CPU (but
- * again only if the system has more than one CPU).
- *
- * Of course, these guarantees apply only for invocations of call_srcu(),
- * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
- * srcu_struct structure.
- */
- static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
- rcu_callback_t func, bool do_norm)
- {
- if (debug_rcu_head_queue(rhp)) {
- /* Probable double call_srcu(), so leak the callback. */
- WRITE_ONCE(rhp->func, srcu_leak_callback);
- WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
- return;
- }
- rhp->func = func;
- (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
- }
- /**
- * call_srcu() - Queue a callback for invocation after an SRCU grace period
- * @ssp: srcu_struct in queue the callback
- * @rhp: structure to be used for queueing the SRCU callback.
- * @func: function to be invoked after the SRCU grace period
- *
- * The callback function will be invoked some time after a full SRCU
- * grace period elapses, in other words after all pre-existing SRCU
- * read-side critical sections have completed. However, the callback
- * function might well execute concurrently with other SRCU read-side
- * critical sections that started after call_srcu() was invoked. SRCU
- * read-side critical sections are delimited by srcu_read_lock() and
- * srcu_read_unlock(), and may be nested.
- *
- * The callback will be invoked from process context, but must nevertheless
- * be fast and must not block.
- */
- void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
- rcu_callback_t func)
- {
- __call_srcu(ssp, rhp, func, true);
- }
- EXPORT_SYMBOL_GPL(call_srcu);
- /*
- * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
- */
- static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
- {
- struct rcu_synchronize rcu;
- RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
- lock_is_held(&rcu_bh_lock_map) ||
- lock_is_held(&rcu_lock_map) ||
- lock_is_held(&rcu_sched_lock_map),
- "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
- if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
- return;
- might_sleep();
- check_init_srcu_struct(ssp);
- init_completion(&rcu.completion);
- init_rcu_head_on_stack(&rcu.head);
- __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
- wait_for_completion(&rcu.completion);
- destroy_rcu_head_on_stack(&rcu.head);
- /*
- * Make sure that later code is ordered after the SRCU grace
- * period. This pairs with the spin_lock_irq_rcu_node()
- * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
- * because the current CPU might have been totally uninvolved with
- * (and thus unordered against) that grace period.
- */
- smp_mb();
- }
- /**
- * synchronize_srcu_expedited - Brute-force SRCU grace period
- * @ssp: srcu_struct with which to synchronize.
- *
- * Wait for an SRCU grace period to elapse, but be more aggressive about
- * spinning rather than blocking when waiting.
- *
- * Note that synchronize_srcu_expedited() has the same deadlock and
- * memory-ordering properties as does synchronize_srcu().
- */
- void synchronize_srcu_expedited(struct srcu_struct *ssp)
- {
- __synchronize_srcu(ssp, rcu_gp_is_normal());
- }
- EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
- /**
- * synchronize_srcu - wait for prior SRCU read-side critical-section completion
- * @ssp: srcu_struct with which to synchronize.
- *
- * Wait for the count to drain to zero of both indexes. To avoid the
- * possible starvation of synchronize_srcu(), it waits for the count of
- * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
- * and then flip the srcu_idx and wait for the count of the other index.
- *
- * Can block; must be called from process context.
- *
- * Note that it is illegal to call synchronize_srcu() from the corresponding
- * SRCU read-side critical section; doing so will result in deadlock.
- * However, it is perfectly legal to call synchronize_srcu() on one
- * srcu_struct from some other srcu_struct's read-side critical section,
- * as long as the resulting graph of srcu_structs is acyclic.
- *
- * There are memory-ordering constraints implied by synchronize_srcu().
- * On systems with more than one CPU, when synchronize_srcu() returns,
- * each CPU is guaranteed to have executed a full memory barrier since
- * the end of its last corresponding SRCU read-side critical section
- * whose beginning preceded the call to synchronize_srcu(). In addition,
- * each CPU having an SRCU read-side critical section that extends beyond
- * the return from synchronize_srcu() is guaranteed to have executed a
- * full memory barrier after the beginning of synchronize_srcu() and before
- * the beginning of that SRCU read-side critical section. Note that these
- * guarantees include CPUs that are offline, idle, or executing in user mode,
- * as well as CPUs that are executing in the kernel.
- *
- * Furthermore, if CPU A invoked synchronize_srcu(), which returned
- * to its caller on CPU B, then both CPU A and CPU B are guaranteed
- * to have executed a full memory barrier during the execution of
- * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
- * are the same CPU, but again only if the system has more than one CPU.
- *
- * Of course, these memory-ordering guarantees apply only when
- * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
- * passed the same srcu_struct structure.
- *
- * Implementation of these memory-ordering guarantees is similar to
- * that of synchronize_rcu().
- *
- * If SRCU is likely idle, expedite the first request. This semantic
- * was provided by Classic SRCU, and is relied upon by its users, so TREE
- * SRCU must also provide it. Note that detecting idleness is heuristic
- * and subject to both false positives and negatives.
- */
- void synchronize_srcu(struct srcu_struct *ssp)
- {
- if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
- synchronize_srcu_expedited(ssp);
- else
- __synchronize_srcu(ssp, true);
- }
- EXPORT_SYMBOL_GPL(synchronize_srcu);
- /**
- * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
- * @ssp: srcu_struct to provide cookie for.
- *
- * This function returns a cookie that can be passed to
- * poll_state_synchronize_srcu(), which will return true if a full grace
- * period has elapsed in the meantime. It is the caller's responsibility
- * to make sure that grace period happens, for example, by invoking
- * call_srcu() after return from get_state_synchronize_srcu().
- */
- unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
- {
- // Any prior manipulation of SRCU-protected data must happen
- // before the load from ->srcu_gp_seq.
- smp_mb();
- return rcu_seq_snap(&ssp->srcu_gp_seq);
- }
- EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
- /**
- * start_poll_synchronize_srcu - Provide cookie and start grace period
- * @ssp: srcu_struct to provide cookie for.
- *
- * This function returns a cookie that can be passed to
- * poll_state_synchronize_srcu(), which will return true if a full grace
- * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
- * this function also ensures that any needed SRCU grace period will be
- * started. This convenience does come at a cost in terms of CPU overhead.
- */
- unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
- {
- return srcu_gp_start_if_needed(ssp, NULL, true);
- }
- EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
- /**
- * poll_state_synchronize_srcu - Has cookie's grace period ended?
- * @ssp: srcu_struct to provide cookie for.
- * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
- *
- * This function takes the cookie that was returned from either
- * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
- * returns @true if an SRCU grace period elapsed since the time that the
- * cookie was created.
- *
- * Because cookies are finite in size, wrapping/overflow is possible.
- * This is more pronounced on 32-bit systems where cookies are 32 bits,
- * where in theory wrapping could happen in about 14 hours assuming
- * 25-microsecond expedited SRCU grace periods. However, a more likely
- * overflow lower bound is on the order of 24 days in the case of
- * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
- * system requires geologic timespans, as in more than seven million years
- * even for expedited SRCU grace periods.
- *
- * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
- * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
- * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
- * few minutes. If this proves to be a problem, this counter will be
- * expanded to the same size as for Tree SRCU.
- */
- bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
- {
- if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
- return false;
- // Ensure that the end of the SRCU grace period happens before
- // any subsequent code that the caller might execute.
- smp_mb(); // ^^^
- return true;
- }
- EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
- /*
- * Callback function for srcu_barrier() use.
- */
- static void srcu_barrier_cb(struct rcu_head *rhp)
- {
- struct srcu_data *sdp;
- struct srcu_struct *ssp;
- sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
- ssp = sdp->ssp;
- if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
- complete(&ssp->srcu_barrier_completion);
- }
- /*
- * Enqueue an srcu_barrier() callback on the specified srcu_data
- * structure's ->cblist. but only if that ->cblist already has at least one
- * callback enqueued. Note that if a CPU already has callbacks enqueue,
- * it must have already registered the need for a future grace period,
- * so all we need do is enqueue a callback that will use the same grace
- * period as the last callback already in the queue.
- */
- static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
- {
- spin_lock_irq_rcu_node(sdp);
- atomic_inc(&ssp->srcu_barrier_cpu_cnt);
- sdp->srcu_barrier_head.func = srcu_barrier_cb;
- debug_rcu_head_queue(&sdp->srcu_barrier_head);
- if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
- &sdp->srcu_barrier_head)) {
- debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
- atomic_dec(&ssp->srcu_barrier_cpu_cnt);
- }
- spin_unlock_irq_rcu_node(sdp);
- }
- /**
- * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
- * @ssp: srcu_struct on which to wait for in-flight callbacks.
- */
- void srcu_barrier(struct srcu_struct *ssp)
- {
- int cpu;
- int idx;
- unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
- check_init_srcu_struct(ssp);
- mutex_lock(&ssp->srcu_barrier_mutex);
- if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
- smp_mb(); /* Force ordering following return. */
- mutex_unlock(&ssp->srcu_barrier_mutex);
- return; /* Someone else did our work for us. */
- }
- rcu_seq_start(&ssp->srcu_barrier_seq);
- init_completion(&ssp->srcu_barrier_completion);
- /* Initial count prevents reaching zero until all CBs are posted. */
- atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
- idx = srcu_read_lock(ssp);
- if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
- srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id()));
- else
- for_each_possible_cpu(cpu)
- srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
- srcu_read_unlock(ssp, idx);
- /* Remove the initial count, at which point reaching zero can happen. */
- if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
- complete(&ssp->srcu_barrier_completion);
- wait_for_completion(&ssp->srcu_barrier_completion);
- rcu_seq_end(&ssp->srcu_barrier_seq);
- mutex_unlock(&ssp->srcu_barrier_mutex);
- }
- EXPORT_SYMBOL_GPL(srcu_barrier);
- /**
- * srcu_batches_completed - return batches completed.
- * @ssp: srcu_struct on which to report batch completion.
- *
- * Report the number of batches, correlated with, but not necessarily
- * precisely the same as, the number of grace periods that have elapsed.
- */
- unsigned long srcu_batches_completed(struct srcu_struct *ssp)
- {
- return READ_ONCE(ssp->srcu_idx);
- }
- EXPORT_SYMBOL_GPL(srcu_batches_completed);
- /*
- * Core SRCU state machine. Push state bits of ->srcu_gp_seq
- * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
- * completed in that state.
- */
- static void srcu_advance_state(struct srcu_struct *ssp)
- {
- int idx;
- mutex_lock(&ssp->srcu_gp_mutex);
- /*
- * Because readers might be delayed for an extended period after
- * fetching ->srcu_idx for their index, at any point in time there
- * might well be readers using both idx=0 and idx=1. We therefore
- * need to wait for readers to clear from both index values before
- * invoking a callback.
- *
- * The load-acquire ensures that we see the accesses performed
- * by the prior grace period.
- */
- idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
- if (idx == SRCU_STATE_IDLE) {
- spin_lock_irq_rcu_node(ssp);
- if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
- WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
- spin_unlock_irq_rcu_node(ssp);
- mutex_unlock(&ssp->srcu_gp_mutex);
- return;
- }
- idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
- if (idx == SRCU_STATE_IDLE)
- srcu_gp_start(ssp);
- spin_unlock_irq_rcu_node(ssp);
- if (idx != SRCU_STATE_IDLE) {
- mutex_unlock(&ssp->srcu_gp_mutex);
- return; /* Someone else started the grace period. */
- }
- }
- if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
- idx = 1 ^ (ssp->srcu_idx & 1);
- if (!try_check_zero(ssp, idx, 1)) {
- mutex_unlock(&ssp->srcu_gp_mutex);
- return; /* readers present, retry later. */
- }
- srcu_flip(ssp);
- spin_lock_irq_rcu_node(ssp);
- rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
- ssp->srcu_n_exp_nodelay = 0;
- spin_unlock_irq_rcu_node(ssp);
- }
- if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
- /*
- * SRCU read-side critical sections are normally short,
- * so check at least twice in quick succession after a flip.
- */
- idx = 1 ^ (ssp->srcu_idx & 1);
- if (!try_check_zero(ssp, idx, 2)) {
- mutex_unlock(&ssp->srcu_gp_mutex);
- return; /* readers present, retry later. */
- }
- ssp->srcu_n_exp_nodelay = 0;
- srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
- }
- }
- /*
- * Invoke a limited number of SRCU callbacks that have passed through
- * their grace period. If there are more to do, SRCU will reschedule
- * the workqueue. Note that needed memory barriers have been executed
- * in this task's context by srcu_readers_active_idx_check().
- */
- static void srcu_invoke_callbacks(struct work_struct *work)
- {
- long len;
- bool more;
- struct rcu_cblist ready_cbs;
- struct rcu_head *rhp;
- struct srcu_data *sdp;
- struct srcu_struct *ssp;
- sdp = container_of(work, struct srcu_data, work);
- ssp = sdp->ssp;
- rcu_cblist_init(&ready_cbs);
- spin_lock_irq_rcu_node(sdp);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&ssp->srcu_gp_seq));
- if (sdp->srcu_cblist_invoking ||
- !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
- spin_unlock_irq_rcu_node(sdp);
- return; /* Someone else on the job or nothing to do. */
- }
- /* We are on the job! Extract and invoke ready callbacks. */
- sdp->srcu_cblist_invoking = true;
- rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
- len = ready_cbs.len;
- spin_unlock_irq_rcu_node(sdp);
- rhp = rcu_cblist_dequeue(&ready_cbs);
- for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
- debug_rcu_head_unqueue(rhp);
- local_bh_disable();
- rhp->func(rhp);
- local_bh_enable();
- }
- WARN_ON_ONCE(ready_cbs.len);
- /*
- * Update counts, accelerate new callbacks, and if needed,
- * schedule another round of callback invocation.
- */
- spin_lock_irq_rcu_node(sdp);
- rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&ssp->srcu_gp_seq));
- sdp->srcu_cblist_invoking = false;
- more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
- spin_unlock_irq_rcu_node(sdp);
- if (more)
- srcu_schedule_cbs_sdp(sdp, 0);
- }
- /*
- * Finished one round of SRCU grace period. Start another if there are
- * more SRCU callbacks queued, otherwise put SRCU into not-running state.
- */
- static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
- {
- bool pushgp = true;
- spin_lock_irq_rcu_node(ssp);
- if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
- if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
- /* All requests fulfilled, time to go idle. */
- pushgp = false;
- }
- } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
- /* Outstanding request and no GP. Start one. */
- srcu_gp_start(ssp);
- }
- spin_unlock_irq_rcu_node(ssp);
- if (pushgp)
- queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
- }
- /*
- * This is the work-queue function that handles SRCU grace periods.
- */
- static void process_srcu(struct work_struct *work)
- {
- unsigned long curdelay;
- unsigned long j;
- struct srcu_struct *ssp;
- ssp = container_of(work, struct srcu_struct, work.work);
- srcu_advance_state(ssp);
- curdelay = srcu_get_delay(ssp);
- if (curdelay) {
- WRITE_ONCE(ssp->reschedule_count, 0);
- } else {
- j = jiffies;
- if (READ_ONCE(ssp->reschedule_jiffies) == j) {
- WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
- if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
- curdelay = 1;
- } else {
- WRITE_ONCE(ssp->reschedule_count, 1);
- WRITE_ONCE(ssp->reschedule_jiffies, j);
- }
- }
- srcu_reschedule(ssp, curdelay);
- }
- void srcutorture_get_gp_data(enum rcutorture_type test_type,
- struct srcu_struct *ssp, int *flags,
- unsigned long *gp_seq)
- {
- if (test_type != SRCU_FLAVOR)
- return;
- *flags = 0;
- *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
- }
- EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
- static const char * const srcu_size_state_name[] = {
- "SRCU_SIZE_SMALL",
- "SRCU_SIZE_ALLOC",
- "SRCU_SIZE_WAIT_BARRIER",
- "SRCU_SIZE_WAIT_CALL",
- "SRCU_SIZE_WAIT_CBS1",
- "SRCU_SIZE_WAIT_CBS2",
- "SRCU_SIZE_WAIT_CBS3",
- "SRCU_SIZE_WAIT_CBS4",
- "SRCU_SIZE_BIG",
- "SRCU_SIZE_???",
- };
- void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
- {
- int cpu;
- int idx;
- unsigned long s0 = 0, s1 = 0;
- int ss_state = READ_ONCE(ssp->srcu_size_state);
- int ss_state_idx = ss_state;
- idx = ssp->srcu_idx & 0x1;
- if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
- ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
- pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
- tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
- srcu_size_state_name[ss_state_idx]);
- if (!ssp->sda) {
- // Called after cleanup_srcu_struct(), perhaps.
- pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
- } else {
- pr_cont(" per-CPU(idx=%d):", idx);
- for_each_possible_cpu(cpu) {
- unsigned long l0, l1;
- unsigned long u0, u1;
- long c0, c1;
- struct srcu_data *sdp;
- sdp = per_cpu_ptr(ssp->sda, cpu);
- u0 = data_race(sdp->srcu_unlock_count[!idx]);
- u1 = data_race(sdp->srcu_unlock_count[idx]);
- /*
- * Make sure that a lock is always counted if the corresponding
- * unlock is counted.
- */
- smp_rmb();
- l0 = data_race(sdp->srcu_lock_count[!idx]);
- l1 = data_race(sdp->srcu_lock_count[idx]);
- c0 = l0 - u0;
- c1 = l1 - u1;
- pr_cont(" %d(%ld,%ld %c)",
- cpu, c0, c1,
- "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
- s0 += c0;
- s1 += c1;
- }
- pr_cont(" T(%ld,%ld)\n", s0, s1);
- }
- if (SRCU_SIZING_IS_TORTURE())
- srcu_transition_to_big(ssp);
- }
- EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
- static int __init srcu_bootup_announce(void)
- {
- pr_info("Hierarchical SRCU implementation.\n");
- if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
- pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
- if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
- pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
- if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
- pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
- pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
- return 0;
- }
- early_initcall(srcu_bootup_announce);
- void __init srcu_init(void)
- {
- struct srcu_struct *ssp;
- /* Decide on srcu_struct-size strategy. */
- if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
- if (nr_cpu_ids >= big_cpu_lim) {
- convert_to_big = SRCU_SIZING_INIT; // Don't bother waiting for contention.
- pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
- } else {
- convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
- pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
- }
- }
- /*
- * Once that is set, call_srcu() can follow the normal path and
- * queue delayed work. This must follow RCU workqueues creation
- * and timers initialization.
- */
- srcu_init_done = true;
- while (!list_empty(&srcu_boot_list)) {
- ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
- work.work.entry);
- list_del_init(&ssp->work.work.entry);
- if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
- ssp->srcu_size_state = SRCU_SIZE_ALLOC;
- queue_work(rcu_gp_wq, &ssp->work.work);
- }
- }
- #ifdef CONFIG_MODULES
- /* Initialize any global-scope srcu_struct structures used by this module. */
- static int srcu_module_coming(struct module *mod)
- {
- int i;
- struct srcu_struct **sspp = mod->srcu_struct_ptrs;
- int ret;
- for (i = 0; i < mod->num_srcu_structs; i++) {
- ret = init_srcu_struct(*(sspp++));
- if (WARN_ON_ONCE(ret))
- return ret;
- }
- return 0;
- }
- /* Clean up any global-scope srcu_struct structures used by this module. */
- static void srcu_module_going(struct module *mod)
- {
- int i;
- struct srcu_struct **sspp = mod->srcu_struct_ptrs;
- for (i = 0; i < mod->num_srcu_structs; i++)
- cleanup_srcu_struct(*(sspp++));
- }
- /* Handle one module, either coming or going. */
- static int srcu_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
- {
- struct module *mod = data;
- int ret = 0;
- switch (val) {
- case MODULE_STATE_COMING:
- ret = srcu_module_coming(mod);
- break;
- case MODULE_STATE_GOING:
- srcu_module_going(mod);
- break;
- default:
- break;
- }
- return ret;
- }
- static struct notifier_block srcu_module_nb = {
- .notifier_call = srcu_module_notify,
- .priority = 0,
- };
- static __init int init_srcu_module_notifier(void)
- {
- int ret;
- ret = register_module_notifier(&srcu_module_nb);
- if (ret)
- pr_warn("Failed to register srcu module notifier\n");
- return ret;
- }
- late_initcall(init_srcu_module_notifier);
- #endif /* #ifdef CONFIG_MODULES */
|