Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull core locking updates from Ingo Molnar: "The main changes in this cycle were: - reduced/streamlined smp_mb__*() interface that allows more usecases and makes the existing ones less buggy, especially in rarer architectures - add rwsem implementation comments - bump up lockdep limits" * 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) rwsem: Add comments to explain the meaning of the rwsem's count field lockdep: Increase static allocations arch: Mass conversion of smp_mb__*() arch,doc: Convert smp_mb__*() arch,xtensa: Convert smp_mb__*() arch,x86: Convert smp_mb__*() arch,tile: Convert smp_mb__*() arch,sparc: Convert smp_mb__*() arch,sh: Convert smp_mb__*() arch,score: Convert smp_mb__*() arch,s390: Convert smp_mb__*() arch,powerpc: Convert smp_mb__*() arch,parisc: Convert smp_mb__*() arch,openrisc: Convert smp_mb__*() arch,mn10300: Convert smp_mb__*() arch,mips: Convert smp_mb__*() arch,metag: Convert smp_mb__*() arch,m68k: Convert smp_mb__*() arch,m32r: Convert smp_mb__*() arch,ia64: Convert smp_mb__*() ...
这个提交包含在:
@@ -828,7 +828,7 @@ static inline bool cached_dev_get(struct cached_dev *dc)
|
||||
return false;
|
||||
|
||||
/* Paired with the mb in cached_dev_attach */
|
||||
smp_mb__after_atomic_inc();
|
||||
smp_mb__after_atomic();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -243,7 +243,7 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
|
||||
cl->fn = fn;
|
||||
cl->wq = wq;
|
||||
/* between atomic_dec() in closure_put() */
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
|
||||
static inline void closure_queue(struct closure *cl)
|
||||
|
@@ -607,9 +607,9 @@ static void write_endio(struct bio *bio, int error)
|
||||
|
||||
BUG_ON(!test_bit(B_WRITING, &b->state));
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(B_WRITING, &b->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
wake_up_bit(&b->state, B_WRITING);
|
||||
}
|
||||
@@ -997,9 +997,9 @@ static void read_endio(struct bio *bio, int error)
|
||||
|
||||
BUG_ON(!test_bit(B_READING, &b->state));
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(B_READING, &b->state);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
wake_up_bit(&b->state, B_READING);
|
||||
}
|
||||
|
@@ -642,7 +642,7 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
|
||||
struct dm_snapshot *s = pe->snap;
|
||||
|
||||
mempool_free(pe, s->pending_pool);
|
||||
smp_mb__before_atomic_dec();
|
||||
smp_mb__before_atomic();
|
||||
atomic_dec(&s->pending_exceptions_count);
|
||||
}
|
||||
|
||||
@@ -783,7 +783,7 @@ static int init_hash_tables(struct dm_snapshot *s)
|
||||
static void merge_shutdown(struct dm_snapshot *s)
|
||||
{
|
||||
clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&s->state_bits, RUNNING_MERGE);
|
||||
}
|
||||
|
||||
|
@@ -2446,7 +2446,7 @@ static void dm_wq_work(struct work_struct *work)
|
||||
static void dm_queue_flush(struct mapped_device *md)
|
||||
{
|
||||
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
queue_work(md->wq, &md->work);
|
||||
}
|
||||
|
||||
|
@@ -4400,7 +4400,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
|
||||
* STRIPE_ON_UNPLUG_LIST clear but the stripe
|
||||
* is still in our list
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
|
||||
/*
|
||||
* STRIPE_ON_RELEASE_LIST could be set here. In that
|
||||
|
在新工单中引用
屏蔽一个用户