Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU changes from Paul E. McKenney: - Convert RCU's BUG_ON() and similar calls to WARN_ON() and similar. - Replace calls of RCU-bh and RCU-sched update-side functions to their vanilla RCU counterparts. This series is a step towards complete removal of the RCU-bh and RCU-sched update-side functions. ( Note that some of these conversions are going upstream via their respective maintainers. ) - Documentation updates, including a number of flavor-consolidation updates from Joel Fernandes. - Miscellaneous fixes. - Automate generation of the initrd filesystem used for rcutorture testing. - Convert spin_is_locked() assertions to instead use lockdep. ( Note that some of these conversions are going upstream via their respective maintainers. ) - SRCU updates, especially including a fix from Dennis Krein for a bag-on-head-class bug. - RCU torture-test updates. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
|
||||
{
|
||||
struct mm_struct *mm = mm_slot->mm;
|
||||
|
||||
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
|
||||
lockdep_assert_held(&khugepaged_mm_lock);
|
||||
|
||||
if (khugepaged_test_exit(mm)) {
|
||||
/* free mm_slot */
|
||||
@@ -1653,7 +1653,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
|
||||
int progress = 0;
|
||||
|
||||
VM_BUG_ON(!pages);
|
||||
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
|
||||
lockdep_assert_held(&khugepaged_mm_lock);
|
||||
|
||||
if (khugepaged_scan.mm_slot)
|
||||
mm_slot = khugepaged_scan.mm_slot;
|
||||
|
@@ -199,7 +199,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
|
||||
|
||||
if (*batch) {
|
||||
tlb_table_invalidate(tlb);
|
||||
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
|
||||
call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
|
||||
*batch = NULL;
|
||||
}
|
||||
}
|
||||
|
@@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
||||
* To protect lockless access to n->shared during irq disabled context.
|
||||
* If n->shared isn't NULL in irq disabled context, accessing to it is
|
||||
* guaranteed to be valid until irq is re-enabled, because it will be
|
||||
* freed after synchronize_sched().
|
||||
* freed after synchronize_rcu().
|
||||
*/
|
||||
if (old_shared && force_change)
|
||||
synchronize_sched();
|
||||
synchronize_rcu();
|
||||
|
||||
fail:
|
||||
kfree(old_shared);
|
||||
|
@@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
|
||||
css_get(&s->memcg_params.memcg->css);
|
||||
|
||||
s->memcg_params.deact_fn = deact_fn;
|
||||
call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
|
||||
call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
|
||||
}
|
||||
|
||||
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
|
||||
@@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
|
||||
mutex_unlock(&slab_mutex);
|
||||
|
||||
/*
|
||||
* SLUB deactivates the kmem_caches through call_rcu_sched. Make
|
||||
* SLUB deactivates the kmem_caches through call_rcu. Make
|
||||
* sure all registered rcu callbacks have been invoked.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SLUB))
|
||||
rcu_barrier_sched();
|
||||
rcu_barrier();
|
||||
|
||||
/*
|
||||
* SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
|
||||
|
@@ -823,8 +823,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
|
||||
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
|
||||
VM_BUG_ON(NR_CPUS != 1 &&
|
||||
!spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
|
||||
lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
|
||||
|
||||
if (!list)
|
||||
SetPageLRU(page_tail);
|
||||
|
Reference in New Issue
Block a user