ANDROID: fix ABI breakage caused by percpu_rw_semaphore changes
percpu_rw_semaphore changes to allow calling percpu_free_rwsem in atomic context cause ABI breakage. Introduce percpu_free_rwsem_atomic wrapper and change percpu_rwsem_destroy to use it in order to keep percpu_rw_semaphore struct intact and fix ABI breakage. Bug: 161210518 Signed-off-by: Suren Baghdasaryan <surenb@google.com> Change-Id: I198a6381fb48059f2aaa2ec38b8c1e5e5e936bb0
This commit is contained in:
@@ -404,7 +404,7 @@ struct core_state {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct kioctx_table;
|
struct kioctx_table;
|
||||||
struct percpu_rw_semaphore;
|
struct percpu_rw_semaphore_atomic;
|
||||||
struct mm_struct {
|
struct mm_struct {
|
||||||
struct {
|
struct {
|
||||||
struct vm_area_struct *mmap; /* list of VMAs */
|
struct vm_area_struct *mmap; /* list of VMAs */
|
||||||
@@ -563,7 +563,7 @@ struct mm_struct {
|
|||||||
#ifdef CONFIG_MMU_NOTIFIER
|
#ifdef CONFIG_MMU_NOTIFIER
|
||||||
struct mmu_notifier_subscriptions *notifier_subscriptions;
|
struct mmu_notifier_subscriptions *notifier_subscriptions;
|
||||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
struct percpu_rw_semaphore *mmu_notifier_lock;
|
struct percpu_rw_semaphore_atomic *mmu_notifier_lock;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
||||||
|
@@ -508,11 +508,12 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
|||||||
|
|
||||||
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
mm->mmu_notifier_lock = kzalloc(sizeof(struct percpu_rw_semaphore), GFP_KERNEL);
|
mm->mmu_notifier_lock = kzalloc(
|
||||||
|
sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
|
||||||
if (!mm->mmu_notifier_lock)
|
if (!mm->mmu_notifier_lock)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
percpu_init_rwsem(mm->mmu_notifier_lock);
|
percpu_init_rwsem(&mm->mmu_notifier_lock->rw_sem);
|
||||||
mm->notifier_subscriptions = NULL;
|
mm->notifier_subscriptions = NULL;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@@ -526,7 +527,7 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
|||||||
if (in_atomic()) {
|
if (in_atomic()) {
|
||||||
percpu_rwsem_async_destroy(mm->mmu_notifier_lock);
|
percpu_rwsem_async_destroy(mm->mmu_notifier_lock);
|
||||||
} else {
|
} else {
|
||||||
percpu_free_rwsem(mm->mmu_notifier_lock);
|
percpu_free_rwsem(&mm->mmu_notifier_lock->rw_sem);
|
||||||
kfree(mm->mmu_notifier_lock);
|
kfree(mm->mmu_notifier_lock);
|
||||||
}
|
}
|
||||||
mm->mmu_notifier_lock = NULL;
|
mm->mmu_notifier_lock = NULL;
|
||||||
@@ -534,12 +535,12 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
|||||||
|
|
||||||
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
return percpu_down_read_trylock(mm->mmu_notifier_lock);
|
return percpu_down_read_trylock(&mm->mmu_notifier_lock->rw_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
percpu_up_read(mm->mmu_notifier_lock);
|
percpu_up_read(&mm->mmu_notifier_lock->rw_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
@@ -13,20 +13,18 @@ struct percpu_rw_semaphore {
|
|||||||
struct rcu_sync rss;
|
struct rcu_sync rss;
|
||||||
unsigned int __percpu *read_count;
|
unsigned int __percpu *read_count;
|
||||||
struct rcuwait writer;
|
struct rcuwait writer;
|
||||||
/*
|
wait_queue_head_t waiters;
|
||||||
* destroy_list_entry is used during object destruction when waiters
|
|
||||||
* can't be used, therefore reusing the same space.
|
|
||||||
*/
|
|
||||||
union {
|
|
||||||
wait_queue_head_t waiters;
|
|
||||||
struct list_head destroy_list_entry;
|
|
||||||
};
|
|
||||||
atomic_t block;
|
atomic_t block;
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
struct lockdep_map dep_map;
|
struct lockdep_map dep_map;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct percpu_rw_semaphore_atomic {
|
||||||
|
struct percpu_rw_semaphore rw_sem;
|
||||||
|
struct list_head destroy_list_entry;
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
|
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
|
||||||
#else
|
#else
|
||||||
@@ -138,7 +136,7 @@ extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
|
|||||||
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
|
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
|
||||||
|
|
||||||
/* Invokes percpu_free_rwsem and frees the semaphore from a worker thread. */
|
/* Invokes percpu_free_rwsem and frees the semaphore from a worker thread. */
|
||||||
extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem);
|
extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem);
|
||||||
|
|
||||||
#define percpu_init_rwsem(sem) \
|
#define percpu_init_rwsem(sem) \
|
||||||
({ \
|
({ \
|
||||||
|
@@ -275,7 +275,7 @@ static DEFINE_SPINLOCK(destroy_list_lock);
|
|||||||
|
|
||||||
static void destroy_list_workfn(struct work_struct *work)
|
static void destroy_list_workfn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct percpu_rw_semaphore *sem, *sem2;
|
struct percpu_rw_semaphore_atomic *sem, *sem2;
|
||||||
LIST_HEAD(to_destroy);
|
LIST_HEAD(to_destroy);
|
||||||
|
|
||||||
spin_lock(&destroy_list_lock);
|
spin_lock(&destroy_list_lock);
|
||||||
@@ -286,14 +286,14 @@ static void destroy_list_workfn(struct work_struct *work)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) {
|
list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) {
|
||||||
percpu_free_rwsem(sem);
|
percpu_free_rwsem(&sem->rw_sem);
|
||||||
kfree(sem);
|
kfree(sem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static DECLARE_WORK(destroy_list_work, destroy_list_workfn);
|
static DECLARE_WORK(destroy_list_work, destroy_list_workfn);
|
||||||
|
|
||||||
void percpu_rwsem_async_destroy(struct percpu_rw_semaphore *sem)
|
void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem)
|
||||||
{
|
{
|
||||||
spin_lock(&destroy_list_lock);
|
spin_lock(&destroy_list_lock);
|
||||||
list_add_tail(&sem->destroy_list_entry, &destroy_list);
|
list_add_tail(&sem->destroy_list_entry, &destroy_list);
|
||||||
|
@@ -625,12 +625,12 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
|||||||
|
|
||||||
static inline void mmu_notifier_write_lock(struct mm_struct *mm)
|
static inline void mmu_notifier_write_lock(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
percpu_down_write(mm->mmu_notifier_lock);
|
percpu_down_write(&mm->mmu_notifier_lock->rw_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
|
static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
percpu_up_write(mm->mmu_notifier_lock);
|
percpu_up_write(&mm->mmu_notifier_lock->rw_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
Reference in New Issue
Block a user