radix-tree: delete radix_tree_range_tag_if_tagged()
This is an exceptionally complicated function with just one caller (tag_pages_for_writeback). We devote a large portion of the runtime of the test suite to testing this one function which has one caller. By introducing the new function radix_tree_iter_tag_set(), we can eliminate all of the complexity while keeping the performance. The caller can now use a fairly standard radix_tree_for_each() loop, and it doesn't need to worry about tricksy things like 'start' wrapping. The test suite continues to spend a large amount of time investigating this function, but now it's testing the underlying primitives such as radix_tree_iter_resume() and the radix_tree_for_each_tagged() iterator which are also used by other parts of the kernel. Link: http://lkml.kernel.org/r/1480369871-5271-57-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox <willy@infradead.org> Tested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Matthew Wilcox <mawilcox@microsoft.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
478922e2b0
commit
268f42de71
@@ -151,6 +151,40 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start,
|
||||
assert(nfound == 0);
|
||||
}
|
||||
|
||||
/* Use the same pattern as tag_pages_for_writeback() in mm/page-writeback.c */
|
||||
int tag_tagged_items(struct radix_tree_root *root, pthread_mutex_t *lock,
|
||||
unsigned long start, unsigned long end, unsigned batch,
|
||||
unsigned iftag, unsigned thentag)
|
||||
{
|
||||
unsigned long tagged = 0;
|
||||
struct radix_tree_iter iter;
|
||||
void **slot;
|
||||
|
||||
if (batch == 0)
|
||||
batch = 1;
|
||||
|
||||
if (lock)
|
||||
pthread_mutex_lock(lock);
|
||||
radix_tree_for_each_tagged(slot, root, &iter, start, iftag) {
|
||||
if (iter.index > end)
|
||||
break;
|
||||
radix_tree_iter_tag_set(root, &iter, thentag);
|
||||
tagged++;
|
||||
if ((tagged % batch) != 0)
|
||||
continue;
|
||||
slot = radix_tree_iter_resume(slot, &iter);
|
||||
if (lock) {
|
||||
pthread_mutex_unlock(lock);
|
||||
rcu_barrier();
|
||||
pthread_mutex_lock(lock);
|
||||
}
|
||||
}
|
||||
if (lock)
|
||||
pthread_mutex_unlock(lock);
|
||||
|
||||
return tagged;
|
||||
}
|
||||
|
||||
/* Use the same pattern as find_swap_entry() in mm/shmem.c */
|
||||
unsigned long find_item(struct radix_tree_root *root, void *item)
|
||||
{
|
||||
|
Reference in New Issue
Block a user