Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull module updates from Rusty Russell: "Main excitement here is Peter Zijlstra's lockless rbtree optimization to speed module address lookup. He found some abusers of the module lock doing that too. A little bit of parameter work here too; including Dan Streetman's breaking up the big param mutex so writing a parameter can load another module (yeah, really). Unfortunately that broke the usual suspects, !CONFIG_MODULES and !CONFIG_SYSFS, so those fixes were appended too" * tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (26 commits) modules: only use mod->param_lock if CONFIG_MODULES param: fix module param locks when !CONFIG_SYSFS. rcu: merge fix for Convert ACCESS_ONCE() to READ_ONCE() and WRITE_ONCE() module: add per-module param_lock module: make perm const params: suppress unused variable error, warn once just in case code changes. modules: clarify CONFIG_MODULE_COMPRESS help, suggest 'N'. kernel/module.c: avoid ifdefs for sig_enforce declaration kernel/workqueue.c: remove ifdefs over wq_power_efficient kernel/params.c: export param_ops_bool_enable_only kernel/params.c: generalize bool_enable_only kernel/module.c: use generic module param operaters for sig_enforce kernel/params: constify struct kernel_param_ops uses sysfs: tightened sysfs permission checks module: Rework module_addr_{min,max} module: Use __module_address() for module_address_lookup() module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING module: Optimize __module_address() using a latched RB-tree rbtree: Implement generic latch_tree seqlock: Introduce raw_read_seqcount_latch() ...
This commit is contained in:
@@ -319,32 +319,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
|
||||
* We want to use this from any context including NMI and tracing /
|
||||
* instrumenting the timekeeping code itself.
|
||||
*
|
||||
* So we handle this differently than the other timekeeping accessor
|
||||
* functions which retry when the sequence count has changed. The
|
||||
* update side does:
|
||||
*
|
||||
* smp_wmb(); <- Ensure that the last base[1] update is visible
|
||||
* tkf->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
* update(tkf->base[0], tkr);
|
||||
* smp_wmb(); <- Ensure that the base[0] update is visible
|
||||
* tkf->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
* update(tkf->base[1], tkr);
|
||||
*
|
||||
* The reader side does:
|
||||
*
|
||||
* do {
|
||||
* seq = tkf->seq;
|
||||
* smp_rmb();
|
||||
* idx = seq & 0x01;
|
||||
* now = now(tkf->base[idx]);
|
||||
* smp_rmb();
|
||||
* } while (seq != tkf->seq)
|
||||
*
|
||||
* As long as we update base[0] readers are forced off to
|
||||
* base[1]. Once base[0] is updated readers are redirected to base[0]
|
||||
* and the base[1] update takes place.
|
||||
* Employ the latch technique; see @raw_write_seqcount_latch.
|
||||
*
|
||||
* So if a NMI hits the update of base[0] then it will use base[1]
|
||||
* which is still consistent. In the worst case this can result is a
|
||||
@@ -407,7 +382,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
u64 now;
|
||||
|
||||
do {
|
||||
seq = raw_read_seqcount(&tkf->seq);
|
||||
seq = raw_read_seqcount_latch(&tkf->seq);
|
||||
tkr = tkf->base + (seq & 0x01);
|
||||
now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
|
||||
} while (read_seqcount_retry(&tkf->seq, seq));
|
||||
|
Reference in New Issue
Block a user