Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - procfs updates - various misc bits - lib/ updates - epoll updates - autofs - fatfs - a few more MM bits * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (58 commits) mm/page_io.c: fix polled swap page in checkpatch: add Co-developed-by to signature tags docs: fix Co-Developed-by docs drivers/base/platform.c: kmemleak ignore a known leak fs: don't open code lru_to_page() fs/: remove caller signal_pending branch predictions mm/: remove caller signal_pending branch predictions arch/arc/mm/fault.c: remove caller signal_pending_branch predictions kernel/sched/: remove caller signal_pending branch predictions kernel/locking/mutex.c: remove caller signal_pending branch predictions mm: select HAVE_MOVE_PMD on x86 for faster mremap mm: speed up mremap by 20x on large regions mm: treewide: remove unused address argument from pte_alloc functions initramfs: cleanup incomplete rootfs scripts/gdb: fix lx-version string output kernel/kcov.c: mark write_comp_data() as notrace kernel/sysctl: add panic_print into sysctl panic: add options to print system info when panic happens bfs: extra sanity checking and static inode bitmap exec: separate MM_ANONPAGES and RLIMIT_STACK accounting ...
This commit is contained in:
@@ -108,14 +108,13 @@ static int __init test_find_next_and_bit(const void *bitmap,
|
||||
const void *bitmap2, unsigned long len)
|
||||
{
|
||||
unsigned long i, cnt;
|
||||
cycles_t cycles;
|
||||
ktime_t time;
|
||||
|
||||
cycles = get_cycles();
|
||||
time = ktime_get();
|
||||
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
|
||||
i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1);
|
||||
cycles = get_cycles() - cycles;
|
||||
pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n",
|
||||
(u64)cycles, cnt);
|
||||
i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1);
|
||||
time = ktime_get() - time;
|
||||
pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -187,7 +187,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
|
||||
int nbytes = sizeof(struct gen_pool_chunk) +
|
||||
BITS_TO_LONGS(nbits) * sizeof(long);
|
||||
|
||||
chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
|
||||
chunk = vzalloc_node(nbytes, nid);
|
||||
if (unlikely(chunk == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool)
|
||||
bit = find_next_bit(chunk->bits, end_bit, 0);
|
||||
BUG_ON(bit < end_bit);
|
||||
|
||||
kfree(chunk);
|
||||
vfree(chunk);
|
||||
}
|
||||
kfree_const(pool->name);
|
||||
kfree(pool);
|
||||
@@ -311,7 +311,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
|
||||
end_bit = chunk_size(chunk) >> order;
|
||||
retry:
|
||||
start_bit = algo(chunk->bits, end_bit, start_bit,
|
||||
nbits, data, pool);
|
||||
nbits, data, pool, chunk->start_addr);
|
||||
if (start_bit >= end_bit)
|
||||
continue;
|
||||
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
|
||||
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
|
||||
*/
|
||||
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
|
||||
unsigned long start, unsigned int nr, void *data,
|
||||
struct gen_pool *pool)
|
||||
struct gen_pool *pool, unsigned long start_addr)
|
||||
{
|
||||
return bitmap_find_next_zero_area(map, size, start, nr, 0);
|
||||
}
|
||||
@@ -543,16 +543,19 @@ EXPORT_SYMBOL(gen_pool_first_fit);
|
||||
*/
|
||||
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
|
||||
unsigned long start, unsigned int nr, void *data,
|
||||
struct gen_pool *pool)
|
||||
struct gen_pool *pool, unsigned long start_addr)
|
||||
{
|
||||
struct genpool_data_align *alignment;
|
||||
unsigned long align_mask;
|
||||
unsigned long align_mask, align_off;
|
||||
int order;
|
||||
|
||||
alignment = data;
|
||||
order = pool->min_alloc_order;
|
||||
align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
|
||||
return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
|
||||
align_off = (start_addr & (alignment->align - 1)) >> order;
|
||||
|
||||
return bitmap_find_next_zero_area_off(map, size, start, nr,
|
||||
align_mask, align_off);
|
||||
}
|
||||
EXPORT_SYMBOL(gen_pool_first_fit_align);
|
||||
|
||||
@@ -567,7 +570,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
|
||||
*/
|
||||
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
|
||||
unsigned long start, unsigned int nr, void *data,
|
||||
struct gen_pool *pool)
|
||||
struct gen_pool *pool, unsigned long start_addr)
|
||||
{
|
||||
struct genpool_data_fixed *fixed_data;
|
||||
int order;
|
||||
@@ -601,7 +604,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
|
||||
*/
|
||||
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
|
||||
unsigned long size, unsigned long start,
|
||||
unsigned int nr, void *data, struct gen_pool *pool)
|
||||
unsigned int nr, void *data, struct gen_pool *pool,
|
||||
unsigned long start_addr)
|
||||
{
|
||||
unsigned long align_mask = roundup_pow_of_two(nr) - 1;
|
||||
|
||||
@@ -624,7 +628,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
|
||||
*/
|
||||
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
|
||||
unsigned long start, unsigned int nr, void *data,
|
||||
struct gen_pool *pool)
|
||||
struct gen_pool *pool, unsigned long start_addr)
|
||||
{
|
||||
unsigned long start_bit = size;
|
||||
unsigned long len = size + 1;
|
||||
|
Reference in New Issue
Block a user