Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner: "The locking, atomics and memory model brains delivered: - A larger update to the atomics code which reworks the ordering barriers, consolidates the atomic primitives, provides the new atomic64_fetch_add_unless() primitive and cleans up the include hell. - Simplify cmpxchg() instrumentation and add instrumentation for xchg() and cmpxchg_double(). - Updates to the memory model and documentation" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits) locking/atomics: Rework ordering barriers locking/atomics: Instrument cmpxchg_double*() locking/atomics: Instrument xchg() locking/atomics: Simplify cmpxchg() instrumentation locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation tools/memory-model: Rename litmus tests to comply to norm7 tools/memory-model/Documentation: Fix typo, smb->smp sched/Documentation: Update wake_up() & co. memory-barrier guarantees locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock() sched/core: Use smp_mb() in wake_woken_function() tools/memory-model: Add informal LKMM documentation to MAINTAINERS locking/atomics/Documentation: Describe atomic_set() as a write operation tools/memory-model: Make scripts executable tools/memory-model: Remove ACCESS_ONCE() from model tools/memory-model: Remove ACCESS_ONCE() from recipes locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example MAINTAINERS: Add Daniel Lustig as an LKMM reviewer tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name tools/memory-model: Add litmus test for full multicopy atomicity locking/refcount: Always allow checked forms ...
This commit is contained in:
@@ -575,7 +575,7 @@ static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
|
||||
{
|
||||
int refold;
|
||||
|
||||
refold = __atomic_add_unless(&map->refcnt, 1, 0);
|
||||
refold = atomic_fetch_add_unless(&map->refcnt, 1, 0);
|
||||
|
||||
if (refold >= BPF_MAX_REFCNT) {
|
||||
__bpf_map_put(map, false);
|
||||
@@ -1144,7 +1144,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
|
||||
{
|
||||
int refold;
|
||||
|
||||
refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
|
||||
refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0);
|
||||
|
||||
if (refold >= BPF_MAX_REFCNT) {
|
||||
__bpf_prog_put(prog, false);
|
||||
|
Reference in New Issue
Block a user