diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e7bab1c7a452..a769ef105e6a 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -404,7 +404,7 @@ struct core_state { }; struct kioctx_table; -struct percpu_rw_semaphore; +struct percpu_rw_semaphore_atomic; struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ @@ -563,7 +563,7 @@ struct mm_struct { #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_subscriptions *notifier_subscriptions; #ifdef CONFIG_SPECULATIVE_PAGE_FAULT - struct percpu_rw_semaphore *mmu_notifier_lock; + struct percpu_rw_semaphore_atomic *mmu_notifier_lock; #endif #endif #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index de5e37f95d99..986ed43eeafc 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -508,11 +508,12 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm) { - mm->mmu_notifier_lock = kzalloc(sizeof(struct percpu_rw_semaphore), GFP_KERNEL); + mm->mmu_notifier_lock = kzalloc( + sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL); if (!mm->mmu_notifier_lock) return false; - percpu_init_rwsem(mm->mmu_notifier_lock); + percpu_init_rwsem(&mm->mmu_notifier_lock->rw_sem); mm->notifier_subscriptions = NULL; return true; @@ -526,7 +527,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) if (in_atomic()) { percpu_rwsem_async_destroy(mm->mmu_notifier_lock); } else { - percpu_free_rwsem(mm->mmu_notifier_lock); + percpu_free_rwsem(&mm->mmu_notifier_lock->rw_sem); kfree(mm->mmu_notifier_lock); } mm->mmu_notifier_lock = NULL; @@ -534,12 +535,12 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) static inline bool mmu_notifier_trylock(struct mm_struct *mm) { - return percpu_down_read_trylock(mm->mmu_notifier_lock); + return percpu_down_read_trylock(&mm->mmu_notifier_lock->rw_sem); } static inline void mmu_notifier_unlock(struct mm_struct *mm) { - percpu_up_read(mm->mmu_notifier_lock); + percpu_up_read(&mm->mmu_notifier_lock->rw_sem); } #else /* CONFIG_SPECULATIVE_PAGE_FAULT */ diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index bf1668fc9c5e..536976636c58 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -13,20 +13,18 @@ struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __percpu *read_count; struct rcuwait writer; - /* - * destroy_list_entry is used during object destruction when waiters - * can't be used, therefore reusing the same space. - */ - union { - wait_queue_head_t waiters; - struct list_head destroy_list_entry; - }; + wait_queue_head_t waiters; atomic_t block; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; +struct percpu_rw_semaphore_atomic { + struct percpu_rw_semaphore rw_sem; + struct list_head destroy_list_entry; +}; + #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, #else @@ -138,7 +136,7 @@ extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, extern void percpu_free_rwsem(struct percpu_rw_semaphore *); /* Invokes percpu_free_rwsem and frees the semaphore from a worker thread. */ -extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem); +extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem); #define percpu_init_rwsem(sem) \ ({ \ diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index a3d37bf83c60..b88eda4eb624 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -275,7 +275,7 @@ static DEFINE_SPINLOCK(destroy_list_lock); static void destroy_list_workfn(struct work_struct *work) { - struct percpu_rw_semaphore *sem, *sem2; + struct percpu_rw_semaphore_atomic *sem, *sem2; LIST_HEAD(to_destroy); spin_lock(&destroy_list_lock); @@ -286,14 +286,14 @@ static void destroy_list_workfn(struct work_struct *work) return; list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) { - percpu_free_rwsem(sem); + percpu_free_rwsem(&sem->rw_sem); kfree(sem); } } static DECLARE_WORK(destroy_list_work, destroy_list_workfn); -void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem) +void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem) { spin_lock(&destroy_list_lock); list_add_tail(&sem->destroy_list_entry, &destroy_list); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index ce161cd0c8a1..7f01fa75d351 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -625,12 +625,12 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm, static inline void mmu_notifier_write_lock(struct mm_struct *mm) { - percpu_down_write(mm->mmu_notifier_lock); + percpu_down_write(&mm->mmu_notifier_lock->rw_sem); } static inline void mmu_notifier_write_unlock(struct mm_struct *mm) { - percpu_up_write(mm->mmu_notifier_lock); + percpu_up_write(&mm->mmu_notifier_lock->rw_sem); } #else /* CONFIG_SPECULATIVE_PAGE_FAULT */