BACKPORT: ANDROID: dma-buf: heaps: replace mutex lock with spinlock

We should use spinlock to protect page pool's critical section as
1. The critical section is short, using spinlock is more efficient.
2. Spinlock could protect priority inversion. Ex. Low priority
   thread (dmabuf-deferred) hold the page lock but get scheduled
   out under heavy loading. Then the other high priority threads
   need to wait for dmabuf-deferred to release the lock. It causes
   long allocation latency and possible UI jank.

Also, we could move NR_KERNEL_MISC_RECLAIMABLE stat out of the
critical section to make it shorter as mod_node_page_state can
handle concurrent access cases.

Conflicts:
	drivers/dma-buf/heaps/page_pool.h
	drivers/dma-buf/heaps/page_pool.c

1. The android12-5.10 KMI is frozen, and the modification to struct
   dmabuf_page_pool in the original patch would break the KMI.
   Instead we wrap dmabuf_page_pool allocations in a struct
   dmabuf_page_pool_with_spinlock which also contains a spinlock
   replacement for dmabuf_page_pool's mutex.

   No callers should attempt to acquire dmabuf_page_pool's mutex on this
   branch, so it is locked immediately after initialization and never
   unlocked.

(cherry picked from commit 060e38dce1d69b81fe633f31751a610e3dd8e983)
Bug: 245454030
Change-Id: I15f349f9e893621f71ca79f1de037de184c33edf
Signed-off-by: T.J. Mercier <tjmercier@google.com>
This commit is contained in:
T.J. Mercier
2022-11-09 20:25:56 +00:00
parent 9ef4727680
commit d55aeb4029
2 changed files with 30 additions and 9 deletions

View File

@@ -11,10 +11,16 @@
#include <linux/freezer.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/swap.h>
#include <linux/sched/signal.h>
#include "page_pool.h"
struct dmabuf_page_pool_with_spinlock {
struct dmabuf_page_pool pool;
struct spinlock spinlock;
};
static LIST_HEAD(pool_list);
static DEFINE_MUTEX(pool_list_lock);
@@ -35,34 +41,41 @@ static inline void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool, struct page *page)
{
int index;
struct dmabuf_page_pool_with_spinlock *container_pool =
container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
if (PageHighMem(page))
index = POOL_HIGHPAGE;
else
index = POOL_LOWPAGE;
mutex_lock(&pool->mutex);
spin_lock(&container_pool->spinlock);
list_add_tail(&page->lru, &pool->items[index]);
pool->count[index]++;
spin_unlock(&container_pool->spinlock);
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1 << pool->order);
mutex_unlock(&pool->mutex);
}
static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool, int index)
{
struct page *page;
struct dmabuf_page_pool_with_spinlock *container_pool =
container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
mutex_lock(&pool->mutex);
spin_lock(&container_pool->spinlock);
page = list_first_entry_or_null(&pool->items[index], struct page, lru);
if (page) {
pool->count[index]--;
list_del(&page->lru);
spin_unlock(&container_pool->spinlock);
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
-(1 << pool->order));
goto out;
}
mutex_unlock(&pool->mutex);
spin_unlock(&container_pool->spinlock);
out:
return page;
}
@@ -113,19 +126,25 @@ static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int order)
{
struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
struct dmabuf_page_pool *pool;
struct dmabuf_page_pool_with_spinlock *container_pool =
kmalloc(sizeof(*container_pool), GFP_KERNEL);
int i;
if (!pool)
if (!container_pool)
return NULL;
spin_lock_init(&container_pool->spinlock);
pool = &container_pool->pool;
for (i = 0; i < POOL_TYPE_SIZE; i++) {
pool->count[i] = 0;
INIT_LIST_HEAD(&pool->items[i]);
}
pool->gfp_mask = gfp_mask | __GFP_COMP;
pool->order = order;
mutex_init(&pool->mutex);
mutex_init(&pool->mutex); /* No longer used! */
mutex_lock(&pool->mutex); /* Make sure anyone who attempts to acquire this hangs */
mutex_lock(&pool_list_lock);
list_add(&pool->list, &pool_list);
@@ -138,6 +157,7 @@ EXPORT_SYMBOL_GPL(dmabuf_page_pool_create);
void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
{
struct page *page;
struct dmabuf_page_pool_with_spinlock *container_pool;
int i;
/* Remove us from the pool list */
@@ -151,7 +171,8 @@ void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
dmabuf_page_pool_free_pages(pool, page);
}
kfree(pool);
container_pool = container_of(pool, struct dmabuf_page_pool_with_spinlock, pool);
kfree(container_pool);
}
EXPORT_SYMBOL_GPL(dmabuf_page_pool_destroy);

View File

@@ -40,7 +40,7 @@ enum {
struct dmabuf_page_pool {
int count[POOL_TYPE_SIZE];
struct list_head items[POOL_TYPE_SIZE];
struct mutex mutex;
struct mutex mutex; /* No longer used! */
gfp_t gfp_mask;
unsigned int order;
struct list_head list;