x86: Use memblock to replace early_res
1. replace find_e820_area with memblock_find_in_range 2. replace reserve_early with memblock_x86_reserve_range 3. replace free_early with memblock_x86_free_range. 4. NO_BOOTMEM will switch to use memblock too. 5. use _e820, _early wrap in the patch, in following patch, will replace them all 6. because memblock_x86_free_range support partial free, we can remove some special care 7. Need to make sure that memblock_find_in_range() is called after memblock_x86_fill() so adjust some calling later in setup.c::setup_arch() -- corruption_check and mptable_update -v2: Move reserve_brk() early Before fill_memblock_area, to avoid overlap between brk and memblock_find_in_range() that could happen We have more then 128 RAM entry in E820 tables, and memblock_x86_fill() could use memblock_find_in_range() to find a new place for memblock.memory.region array. and We don't need to use extend_brk() after fill_memblock_area() So move reserve_brk() early before fill_memblock_area(). -v3: Move find_smp_config early To make sure memblock_find_in_range not find wrong place, if BIOS doesn't put mptable in right place. -v4: Treat RESERVED_KERN as RAM in memblock.memory. and they are already in memblock.reserved already.. use __NOT_KEEP_MEMBLOCK to make sure memblock related code could be freed later. -v5: Generic version __memblock_find_in_range() is going from high to low, and for 32bit active_region for 32bit does include high pages need to replace the limit with memblock.default_alloc_limit, aka get_max_mapped() -v6: Use current_limit instead -v7: check with MEMBLOCK_ERROR instead of -1ULL or -1L -v8: Set memblock_can_resize early to handle EFI with more RAM entries -v9: update after kmemleak changes in mainline Suggested-by: David S. Miller <davem@davemloft.net> Suggested-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:

committed by
H. Peter Anvin

parent
301ff3e88e
commit
72d7c3b33c
@@ -15,6 +15,7 @@
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/firmware-map.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/e820.h>
|
||||
#include <asm/proto.h>
|
||||
@@ -742,69 +743,29 @@ core_initcall(e820_mark_nvs_memory);
|
||||
*/
|
||||
u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
|
||||
{
|
||||
int i;
|
||||
u64 mem = memblock_find_in_range(start, end, size, align);
|
||||
|
||||
for (i = 0; i < e820.nr_map; i++) {
|
||||
struct e820entry *ei = &e820.map[i];
|
||||
u64 addr;
|
||||
u64 ei_start, ei_last;
|
||||
if (mem == MEMBLOCK_ERROR)
|
||||
return -1ULL;
|
||||
|
||||
if (ei->type != E820_RAM)
|
||||
continue;
|
||||
|
||||
ei_last = ei->addr + ei->size;
|
||||
ei_start = ei->addr;
|
||||
addr = find_early_area(ei_start, ei_last, start, end,
|
||||
size, align);
|
||||
|
||||
if (addr != -1ULL)
|
||||
return addr;
|
||||
}
|
||||
return -1ULL;
|
||||
return mem;
|
||||
}
|
||||
|
||||
u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
|
||||
{
|
||||
return find_e820_area(start, end, size, align);
|
||||
}
|
||||
|
||||
u64 __init get_max_mapped(void)
|
||||
{
|
||||
u64 end = max_pfn_mapped;
|
||||
|
||||
end <<= PAGE_SHIFT;
|
||||
|
||||
return end;
|
||||
}
|
||||
/*
|
||||
* Find next free range after *start
|
||||
*/
|
||||
u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
|
||||
{
|
||||
int i;
|
||||
u64 mem = memblock_x86_find_in_range_size(start, sizep, align);
|
||||
|
||||
for (i = 0; i < e820.nr_map; i++) {
|
||||
struct e820entry *ei = &e820.map[i];
|
||||
u64 addr;
|
||||
u64 ei_start, ei_last;
|
||||
if (mem == MEMBLOCK_ERROR)
|
||||
return -1ULL
|
||||
|
||||
if (ei->type != E820_RAM)
|
||||
continue;
|
||||
|
||||
ei_last = ei->addr + ei->size;
|
||||
ei_start = ei->addr;
|
||||
addr = find_early_area_size(ei_start, ei_last, start,
|
||||
sizep, align);
|
||||
|
||||
if (addr != -1ULL)
|
||||
return addr;
|
||||
}
|
||||
|
||||
return -1ULL;
|
||||
return mem;
|
||||
}
|
||||
|
||||
/*
|
||||
* pre allocated 4k and reserved it in e820
|
||||
* pre allocated 4k and reserved it in memblock and e820_saved
|
||||
*/
|
||||
u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
|
||||
{
|
||||
@@ -813,8 +774,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
|
||||
u64 start;
|
||||
|
||||
for (start = startt; ; start += size) {
|
||||
start = find_e820_area_size(start, &size, align);
|
||||
if (!(start + 1))
|
||||
start = memblock_x86_find_in_range_size(start, &size, align);
|
||||
if (start == MEMBLOCK_ERROR)
|
||||
return 0;
|
||||
if (size >= sizet)
|
||||
break;
|
||||
@@ -830,10 +791,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
|
||||
addr = round_down(start + size - sizet, align);
|
||||
if (addr < start)
|
||||
return 0;
|
||||
e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
|
||||
memblock_x86_reserve_range(addr, addr + sizet, "new next");
|
||||
e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
|
||||
printk(KERN_INFO "update e820 for early_reserve_e820\n");
|
||||
update_e820();
|
||||
printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
|
||||
update_e820_saved();
|
||||
|
||||
return addr;
|
||||
@@ -895,52 +855,12 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
|
||||
{
|
||||
return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
|
||||
}
|
||||
/*
|
||||
* Finds an active region in the address range from start_pfn to last_pfn and
|
||||
* returns its range in ei_startpfn and ei_endpfn for the e820 entry.
|
||||
*/
|
||||
int __init e820_find_active_region(const struct e820entry *ei,
|
||||
unsigned long start_pfn,
|
||||
unsigned long last_pfn,
|
||||
unsigned long *ei_startpfn,
|
||||
unsigned long *ei_endpfn)
|
||||
{
|
||||
u64 align = PAGE_SIZE;
|
||||
|
||||
*ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
|
||||
*ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
|
||||
|
||||
/* Skip map entries smaller than a page */
|
||||
if (*ei_startpfn >= *ei_endpfn)
|
||||
return 0;
|
||||
|
||||
/* Skip if map is outside the node */
|
||||
if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
|
||||
*ei_startpfn >= last_pfn)
|
||||
return 0;
|
||||
|
||||
/* Check for overlaps */
|
||||
if (*ei_startpfn < start_pfn)
|
||||
*ei_startpfn = start_pfn;
|
||||
if (*ei_endpfn > last_pfn)
|
||||
*ei_endpfn = last_pfn;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Walk the e820 map and register active regions within a node */
|
||||
void __init e820_register_active_regions(int nid, unsigned long start_pfn,
|
||||
unsigned long last_pfn)
|
||||
{
|
||||
unsigned long ei_startpfn;
|
||||
unsigned long ei_endpfn;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < e820.nr_map; i++)
|
||||
if (e820_find_active_region(&e820.map[i],
|
||||
start_pfn, last_pfn,
|
||||
&ei_startpfn, &ei_endpfn))
|
||||
add_active_range(nid, ei_startpfn, ei_endpfn);
|
||||
memblock_x86_register_active_regions(nid, start_pfn, last_pfn);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -950,18 +870,16 @@ void __init e820_register_active_regions(int nid, unsigned long start_pfn,
|
||||
*/
|
||||
u64 __init e820_hole_size(u64 start, u64 end)
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long last_pfn = end >> PAGE_SHIFT;
|
||||
unsigned long ei_startpfn, ei_endpfn, ram = 0;
|
||||
int i;
|
||||
return memblock_x86_hole_size(start, end);
|
||||
}
|
||||
|
||||
for (i = 0; i < e820.nr_map; i++) {
|
||||
if (e820_find_active_region(&e820.map[i],
|
||||
start_pfn, last_pfn,
|
||||
&ei_startpfn, &ei_endpfn))
|
||||
ram += ei_endpfn - ei_startpfn;
|
||||
}
|
||||
return end - start - ((u64)ram << PAGE_SHIFT);
|
||||
void reserve_early(u64 start, u64 end, char *name)
|
||||
{
|
||||
memblock_x86_reserve_range(start, end, name);
|
||||
}
|
||||
void free_early(u64 start, u64 end)
|
||||
{
|
||||
memblock_x86_free_range(start, end);
|
||||
}
|
||||
|
||||
static void early_panic(char *msg)
|
||||
@@ -1210,3 +1128,32 @@ void __init setup_memory_map(void)
|
||||
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
|
||||
e820_print_map(who);
|
||||
}
|
||||
|
||||
void __init memblock_x86_fill(void)
|
||||
{
|
||||
int i;
|
||||
u64 end;
|
||||
|
||||
/*
|
||||
* EFI may have more than 128 entries
|
||||
* We are safe to enable resizing, beause memblock_x86_fill()
|
||||
* is rather later for x86
|
||||
*/
|
||||
memblock_can_resize = 1;
|
||||
|
||||
for (i = 0; i < e820.nr_map; i++) {
|
||||
struct e820entry *ei = &e820.map[i];
|
||||
|
||||
end = ei->addr + ei->size;
|
||||
if (end != (resource_size_t)end)
|
||||
continue;
|
||||
|
||||
if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
|
||||
continue;
|
||||
|
||||
memblock_add(ei->addr, ei->size);
|
||||
}
|
||||
|
||||
memblock_analyze();
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
Reference in New Issue
Block a user