xfs: embedd mru_elem into parent structure

There is no need to do a separate allocation for each mru element, just
embedd the structure into the parent one in the user.  Besides saving
a memory allocation and the infrastructure required for it this also
simplifies the API.

While we do major surgery on xfs_mru_cache.c also de-typedef it and
make struct mru_cache private to the implementation file.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Dave Chinner <david@fromorbit.com>
此提交包含在:
Christoph Hellwig
2014-04-23 07:11:51 +10:00
提交者 Dave Chinner
父節點 ce695c6551
當前提交 22328d712d
共有 3 個檔案被更改,包括 107 行新增146 行删除

查看文件

@@ -100,14 +100,20 @@
* likely result in a loop in one of the lists. That's a sure-fire recipe for
* an infinite loop in the code.
*/
typedef struct xfs_mru_cache_elem
{
struct list_head list_node;
unsigned long key;
void *value;
} xfs_mru_cache_elem_t;
struct xfs_mru_cache {
struct radix_tree_root store; /* Core storage data structure. */
struct list_head *lists; /* Array of lists, one per grp. */
struct list_head reap_list; /* Elements overdue for reaping. */
spinlock_t lock; /* Lock to protect this struct. */
unsigned int grp_count; /* Number of discrete groups. */
unsigned int grp_time; /* Time period spanned by grps. */
unsigned int lru_grp; /* Group containing time zero. */
unsigned long time_zero; /* Time first element was added. */
xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */
struct delayed_work work; /* Workqueue data for reaping. */
unsigned int queued; /* work has been queued */
};
static kmem_zone_t *xfs_mru_elem_zone;
static struct workqueue_struct *xfs_mru_reap_wq;
/*
@@ -129,12 +135,12 @@ static struct workqueue_struct *xfs_mru_reap_wq;
*/
STATIC unsigned long
_xfs_mru_cache_migrate(
xfs_mru_cache_t *mru,
unsigned long now)
struct xfs_mru_cache *mru,
unsigned long now)
{
unsigned int grp;
unsigned int migrated = 0;
struct list_head *lru_list;
unsigned int grp;
unsigned int migrated = 0;
struct list_head *lru_list;
/* Nothing to do if the data store is empty. */
if (!mru->time_zero)
@@ -193,11 +199,11 @@ _xfs_mru_cache_migrate(
*/
STATIC void
_xfs_mru_cache_list_insert(
xfs_mru_cache_t *mru,
xfs_mru_cache_elem_t *elem)
struct xfs_mru_cache *mru,
struct xfs_mru_cache_elem *elem)
{
unsigned int grp = 0;
unsigned long now = jiffies;
unsigned int grp = 0;
unsigned long now = jiffies;
/*
* If the data store is empty, initialise time zero, leave grp set to
@@ -231,10 +237,10 @@ _xfs_mru_cache_list_insert(
*/
STATIC void
_xfs_mru_cache_clear_reap_list(
xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock)
struct xfs_mru_cache *mru)
__releases(mru->lock) __acquires(mru->lock)
{
xfs_mru_cache_elem_t *elem, *next;
struct xfs_mru_cache_elem *elem, *next;
struct list_head tmp;
INIT_LIST_HEAD(&tmp);
@@ -252,15 +258,8 @@ _xfs_mru_cache_clear_reap_list(
spin_unlock(&mru->lock);
list_for_each_entry_safe(elem, next, &tmp, list_node) {
/* Remove the element from the reap list. */
list_del_init(&elem->list_node);
/* Call the client's free function with the key and value pointer. */
mru->free_func(elem->key, elem->value);
/* Free the element structure. */
kmem_zone_free(xfs_mru_elem_zone, elem);
mru->free_func(elem);
}
spin_lock(&mru->lock);
@@ -277,7 +276,8 @@ STATIC void
_xfs_mru_cache_reap(
struct work_struct *work)
{
xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work);
struct xfs_mru_cache *mru =
container_of(work, struct xfs_mru_cache, work.work);
unsigned long now, next;
ASSERT(mru && mru->lists);
@@ -304,28 +304,16 @@ _xfs_mru_cache_reap(
int
xfs_mru_cache_init(void)
{
xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t),
"xfs_mru_cache_elem");
if (!xfs_mru_elem_zone)
goto out;
xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1);
if (!xfs_mru_reap_wq)
goto out_destroy_mru_elem_zone;
return -ENOMEM;
return 0;
out_destroy_mru_elem_zone:
kmem_zone_destroy(xfs_mru_elem_zone);
out:
return -ENOMEM;
}
void
xfs_mru_cache_uninit(void)
{
destroy_workqueue(xfs_mru_reap_wq);
kmem_zone_destroy(xfs_mru_elem_zone);
}
/*
@@ -336,14 +324,14 @@ xfs_mru_cache_uninit(void)
*/
int
xfs_mru_cache_create(
xfs_mru_cache_t **mrup,
struct xfs_mru_cache **mrup,
unsigned int lifetime_ms,
unsigned int grp_count,
xfs_mru_cache_free_func_t free_func)
{
xfs_mru_cache_t *mru = NULL;
int err = 0, grp;
unsigned int grp_time;
struct xfs_mru_cache *mru = NULL;
int err = 0, grp;
unsigned int grp_time;
if (mrup)
*mrup = NULL;
@@ -400,7 +388,7 @@ exit:
*/
static void
xfs_mru_cache_flush(
xfs_mru_cache_t *mru)
struct xfs_mru_cache *mru)
{
if (!mru || !mru->lists)
return;
@@ -420,7 +408,7 @@ xfs_mru_cache_flush(
void
xfs_mru_cache_destroy(
xfs_mru_cache_t *mru)
struct xfs_mru_cache *mru)
{
if (!mru || !mru->lists)
return;
@@ -438,45 +426,29 @@ xfs_mru_cache_destroy(
*/
int
xfs_mru_cache_insert(
xfs_mru_cache_t *mru,
unsigned long key,
void *value)
struct xfs_mru_cache *mru,
unsigned long key,
struct xfs_mru_cache_elem *elem)
{
xfs_mru_cache_elem_t *elem;
int error;
int error;
ASSERT(mru && mru->lists);
if (!mru || !mru->lists)
return EINVAL;
elem = kmem_zone_zalloc(xfs_mru_elem_zone, KM_SLEEP);
if (!elem)
if (radix_tree_preload(GFP_KERNEL))
return ENOMEM;
if (radix_tree_preload(GFP_KERNEL)) {
error = ENOMEM;
goto out_free_item;
}
INIT_LIST_HEAD(&elem->list_node);
elem->key = key;
elem->value = value;
spin_lock(&mru->lock);
error = -radix_tree_insert(&mru->store, key, elem);
radix_tree_preload_end();
if (error) {
spin_unlock(&mru->lock);
goto out_free_item;
}
_xfs_mru_cache_list_insert(mru, elem);
if (!error)
_xfs_mru_cache_list_insert(mru, elem);
spin_unlock(&mru->lock);
return 0;
out_free_item:
kmem_zone_free(xfs_mru_elem_zone, elem);
return error;
}
@@ -486,13 +458,12 @@ out_free_item:
* the client data pointer for the removed element is returned, otherwise this
* function will return a NULL pointer.
*/
void *
struct xfs_mru_cache_elem *
xfs_mru_cache_remove(
xfs_mru_cache_t *mru,
unsigned long key)
struct xfs_mru_cache *mru,
unsigned long key)
{
xfs_mru_cache_elem_t *elem;
void *value = NULL;
struct xfs_mru_cache_elem *elem;
ASSERT(mru && mru->lists);
if (!mru || !mru->lists)
@@ -500,17 +471,11 @@ xfs_mru_cache_remove(
spin_lock(&mru->lock);
elem = radix_tree_delete(&mru->store, key);
if (elem) {
value = elem->value;
if (elem)
list_del(&elem->list_node);
}
spin_unlock(&mru->lock);
if (elem)
kmem_zone_free(xfs_mru_elem_zone, elem);
return value;
return elem;
}
/*
@@ -519,13 +484,14 @@ xfs_mru_cache_remove(
*/
void
xfs_mru_cache_delete(
xfs_mru_cache_t *mru,
unsigned long key)
struct xfs_mru_cache *mru,
unsigned long key)
{
void *value = xfs_mru_cache_remove(mru, key);
struct xfs_mru_cache_elem *elem;
if (value)
mru->free_func(key, value);
elem = xfs_mru_cache_remove(mru, key);
if (elem)
mru->free_func(elem);
}
/*
@@ -548,12 +514,12 @@ xfs_mru_cache_delete(
* status, we need to help it get it right by annotating the path that does
* not release the lock.
*/
void *
struct xfs_mru_cache_elem *
xfs_mru_cache_lookup(
xfs_mru_cache_t *mru,
unsigned long key)
struct xfs_mru_cache *mru,
unsigned long key)
{
xfs_mru_cache_elem_t *elem;
struct xfs_mru_cache_elem *elem;
ASSERT(mru && mru->lists);
if (!mru || !mru->lists)
@@ -568,7 +534,7 @@ xfs_mru_cache_lookup(
} else
spin_unlock(&mru->lock);
return elem ? elem->value : NULL;
return elem;
}
/*
@@ -578,7 +544,8 @@ xfs_mru_cache_lookup(
*/
void
xfs_mru_cache_done(
xfs_mru_cache_t *mru) __releases(mru->lock)
struct xfs_mru_cache *mru)
__releases(mru->lock)
{
spin_unlock(&mru->lock);
}