Merge commit 'gcl/next' into next
Esse commit está contido em:
@@ -135,7 +135,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
|
||||
/* If the size of RAM is not an exact power of two, we may not
|
||||
* have covered RAM in its entirety with 16 and 4 MiB
|
||||
* pages. Consequently, restrict the top end of RAM currently
|
||||
* allocable so that calls to the LMB to allocate PTEs for "tail"
|
||||
* allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
|
||||
* coverage with normal-sized pages (or other reasons) do not
|
||||
* attempt to allocate outside the allowed range.
|
||||
*/
|
||||
|
@@ -31,7 +31,7 @@
|
||||
#include <linux/cache.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/pgtable.h>
|
||||
@@ -384,8 +384,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
|
||||
printk(KERN_INFO "Huge page(16GB) memory: "
|
||||
"addr = 0x%lX size = 0x%lX pages = %d\n",
|
||||
phys_addr, block_size, expected_pages);
|
||||
if (phys_addr + (16 * GB) <= lmb_end_of_DRAM()) {
|
||||
lmb_reserve(phys_addr, block_size * expected_pages);
|
||||
if (phys_addr + (16 * GB) <= memblock_end_of_DRAM()) {
|
||||
memblock_reserve(phys_addr, block_size * expected_pages);
|
||||
add_gpage(phys_addr, block_size, expected_pages);
|
||||
}
|
||||
return 0;
|
||||
@@ -458,7 +458,7 @@ static void __init htab_init_page_sizes(void)
|
||||
* and we have at least 1G of RAM at boot
|
||||
*/
|
||||
if (mmu_psize_defs[MMU_PAGE_16M].shift &&
|
||||
lmb_phys_mem_size() >= 0x40000000)
|
||||
memblock_phys_mem_size() >= 0x40000000)
|
||||
mmu_vmemmap_psize = MMU_PAGE_16M;
|
||||
else if (mmu_psize_defs[MMU_PAGE_64K].shift)
|
||||
mmu_vmemmap_psize = MMU_PAGE_64K;
|
||||
@@ -520,7 +520,7 @@ static unsigned long __init htab_get_table_size(void)
|
||||
return 1UL << ppc64_pft_size;
|
||||
|
||||
/* round mem_size up to next power of 2 */
|
||||
mem_size = lmb_phys_mem_size();
|
||||
mem_size = memblock_phys_mem_size();
|
||||
rnd_mem_size = 1UL << __ilog2(mem_size);
|
||||
if (rnd_mem_size < mem_size)
|
||||
rnd_mem_size <<= 1;
|
||||
@@ -627,7 +627,7 @@ static void __init htab_initialize(void)
|
||||
else
|
||||
limit = 0;
|
||||
|
||||
table = lmb_alloc_base(htab_size_bytes, htab_size_bytes, limit);
|
||||
table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit);
|
||||
|
||||
DBG("Hash table allocated at %lx, size: %lx\n", table,
|
||||
htab_size_bytes);
|
||||
@@ -647,9 +647,9 @@ static void __init htab_initialize(void)
|
||||
prot = pgprot_val(PAGE_KERNEL);
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
|
||||
linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
|
||||
1, lmb.rmo_size));
|
||||
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
|
||||
1, memblock.rmo_size));
|
||||
memset(linear_map_hash_slots, 0, linear_map_hash_count);
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
@@ -659,16 +659,16 @@ static void __init htab_initialize(void)
|
||||
*/
|
||||
|
||||
/* create bolted the linear mapping in the hash table */
|
||||
for (i=0; i < lmb.memory.cnt; i++) {
|
||||
base = (unsigned long)__va(lmb.memory.region[i].base);
|
||||
size = lmb.memory.region[i].size;
|
||||
for (i=0; i < memblock.memory.cnt; i++) {
|
||||
base = (unsigned long)__va(memblock.memory.region[i].base);
|
||||
size = memblock.memory.region[i].size;
|
||||
|
||||
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
|
||||
base, size, prot);
|
||||
|
||||
#ifdef CONFIG_U3_DART
|
||||
/* Do not map the DART space. Fortunately, it will be aligned
|
||||
* in such a way that it will not cross two lmb regions and
|
||||
* in such a way that it will not cross two memblock regions and
|
||||
* will fit within a single 16Mb page.
|
||||
* The DART space is assumed to be a full 16Mb region even if
|
||||
* we only use 2Mb of that space. We will use more of it later
|
||||
|
@@ -30,7 +30,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
@@ -136,17 +136,17 @@ void __init MMU_init(void)
|
||||
/* parse args from command line */
|
||||
MMU_setup();
|
||||
|
||||
if (lmb.memory.cnt > 1) {
|
||||
if (memblock.memory.cnt > 1) {
|
||||
#ifndef CONFIG_WII
|
||||
lmb.memory.cnt = 1;
|
||||
lmb_analyze();
|
||||
memblock.memory.cnt = 1;
|
||||
memblock_analyze();
|
||||
printk(KERN_WARNING "Only using first contiguous memory region");
|
||||
#else
|
||||
wii_memory_fixups();
|
||||
#endif
|
||||
}
|
||||
|
||||
total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
|
||||
total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr;
|
||||
lowmem_end_addr = memstart_addr + total_lowmem;
|
||||
|
||||
#ifdef CONFIG_FSL_BOOKE
|
||||
@@ -161,8 +161,8 @@ void __init MMU_init(void)
|
||||
lowmem_end_addr = memstart_addr + total_lowmem;
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
total_memory = total_lowmem;
|
||||
lmb_enforce_memory_limit(lowmem_end_addr);
|
||||
lmb_analyze();
|
||||
memblock_enforce_memory_limit(lowmem_end_addr);
|
||||
memblock_analyze();
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
}
|
||||
|
||||
@@ -200,7 +200,7 @@ void __init *early_get_page(void)
|
||||
if (init_bootmem_done) {
|
||||
p = alloc_bootmem_pages(PAGE_SIZE);
|
||||
} else {
|
||||
p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||
__initial_memory_limit_addr));
|
||||
}
|
||||
return p;
|
||||
|
@@ -40,7 +40,7 @@
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/poison.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/hugetlb.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
@@ -83,13 +83,13 @@ int page_is_ram(unsigned long pfn)
|
||||
#else
|
||||
unsigned long paddr = (pfn << PAGE_SHIFT);
|
||||
int i;
|
||||
for (i=0; i < lmb.memory.cnt; i++) {
|
||||
for (i=0; i < memblock.memory.cnt; i++) {
|
||||
unsigned long base;
|
||||
|
||||
base = lmb.memory.region[i].base;
|
||||
base = memblock.memory.region[i].base;
|
||||
|
||||
if ((paddr >= base) &&
|
||||
(paddr < (base + lmb.memory.region[i].size))) {
|
||||
(paddr < (base + memblock.memory.region[i].size))) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -142,14 +142,14 @@ int arch_add_memory(int nid, u64 start, u64 size)
|
||||
/*
|
||||
* walk_memory_resource() needs to make sure there is no holes in a given
|
||||
* memory range. PPC64 does not maintain the memory layout in /proc/iomem.
|
||||
* Instead it maintains it in lmb.memory structures. Walk through the
|
||||
* Instead it maintains it in memblock.memory structures. Walk through the
|
||||
* memory regions, find holes and callback for contiguous regions.
|
||||
*/
|
||||
int
|
||||
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg, int (*func)(unsigned long, unsigned long, void *))
|
||||
{
|
||||
struct lmb_property res;
|
||||
struct memblock_property res;
|
||||
unsigned long pfn, len;
|
||||
u64 end;
|
||||
int ret = -1;
|
||||
@@ -158,7 +158,7 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
||||
res.size = (u64) nr_pages << PAGE_SHIFT;
|
||||
|
||||
end = res.base + res.size - 1;
|
||||
while ((res.base < end) && (lmb_find(&res) >= 0)) {
|
||||
while ((res.base < end) && (memblock_find(&res) >= 0)) {
|
||||
pfn = (unsigned long)(res.base >> PAGE_SHIFT);
|
||||
len = (unsigned long)(res.size >> PAGE_SHIFT);
|
||||
ret = (*func)(pfn, len, arg);
|
||||
@@ -184,8 +184,8 @@ void __init do_init_bootmem(void)
|
||||
unsigned long total_pages;
|
||||
int boot_mapsize;
|
||||
|
||||
max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
|
||||
total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
|
||||
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
total_pages = total_lowmem >> PAGE_SHIFT;
|
||||
max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
|
||||
@@ -198,16 +198,16 @@ void __init do_init_bootmem(void)
|
||||
*/
|
||||
bootmap_pages = bootmem_bootmap_pages(total_pages);
|
||||
|
||||
start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
|
||||
start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
|
||||
|
||||
min_low_pfn = MEMORY_START >> PAGE_SHIFT;
|
||||
boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
|
||||
|
||||
/* Add active regions with valid PFNs */
|
||||
for (i = 0; i < lmb.memory.cnt; i++) {
|
||||
for (i = 0; i < memblock.memory.cnt; i++) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
|
||||
end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
|
||||
start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
|
||||
end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
|
||||
add_active_range(0, start_pfn, end_pfn);
|
||||
}
|
||||
|
||||
@@ -218,17 +218,17 @@ void __init do_init_bootmem(void)
|
||||
free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
|
||||
|
||||
/* reserve the sections we're already using */
|
||||
for (i = 0; i < lmb.reserved.cnt; i++) {
|
||||
unsigned long addr = lmb.reserved.region[i].base +
|
||||
lmb_size_bytes(&lmb.reserved, i) - 1;
|
||||
for (i = 0; i < memblock.reserved.cnt; i++) {
|
||||
unsigned long addr = memblock.reserved.region[i].base +
|
||||
memblock_size_bytes(&memblock.reserved, i) - 1;
|
||||
if (addr < lowmem_end_addr)
|
||||
reserve_bootmem(lmb.reserved.region[i].base,
|
||||
lmb_size_bytes(&lmb.reserved, i),
|
||||
reserve_bootmem(memblock.reserved.region[i].base,
|
||||
memblock_size_bytes(&memblock.reserved, i),
|
||||
BOOTMEM_DEFAULT);
|
||||
else if (lmb.reserved.region[i].base < lowmem_end_addr) {
|
||||
else if (memblock.reserved.region[i].base < lowmem_end_addr) {
|
||||
unsigned long adjusted_size = lowmem_end_addr -
|
||||
lmb.reserved.region[i].base;
|
||||
reserve_bootmem(lmb.reserved.region[i].base,
|
||||
memblock.reserved.region[i].base;
|
||||
reserve_bootmem(memblock.reserved.region[i].base,
|
||||
adjusted_size, BOOTMEM_DEFAULT);
|
||||
}
|
||||
}
|
||||
@@ -236,9 +236,9 @@ void __init do_init_bootmem(void)
|
||||
free_bootmem_with_active_regions(0, max_pfn);
|
||||
|
||||
/* reserve the sections we're already using */
|
||||
for (i = 0; i < lmb.reserved.cnt; i++)
|
||||
reserve_bootmem(lmb.reserved.region[i].base,
|
||||
lmb_size_bytes(&lmb.reserved, i),
|
||||
for (i = 0; i < memblock.reserved.cnt; i++)
|
||||
reserve_bootmem(memblock.reserved.region[i].base,
|
||||
memblock_size_bytes(&memblock.reserved, i),
|
||||
BOOTMEM_DEFAULT);
|
||||
|
||||
#endif
|
||||
@@ -251,20 +251,20 @@ void __init do_init_bootmem(void)
|
||||
/* mark pages that don't exist as nosave */
|
||||
static int __init mark_nonram_nosave(void)
|
||||
{
|
||||
unsigned long lmb_next_region_start_pfn,
|
||||
lmb_region_max_pfn;
|
||||
unsigned long memblock_next_region_start_pfn,
|
||||
memblock_region_max_pfn;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < lmb.memory.cnt - 1; i++) {
|
||||
lmb_region_max_pfn =
|
||||
(lmb.memory.region[i].base >> PAGE_SHIFT) +
|
||||
(lmb.memory.region[i].size >> PAGE_SHIFT);
|
||||
lmb_next_region_start_pfn =
|
||||
lmb.memory.region[i+1].base >> PAGE_SHIFT;
|
||||
for (i = 0; i < memblock.memory.cnt - 1; i++) {
|
||||
memblock_region_max_pfn =
|
||||
(memblock.memory.region[i].base >> PAGE_SHIFT) +
|
||||
(memblock.memory.region[i].size >> PAGE_SHIFT);
|
||||
memblock_next_region_start_pfn =
|
||||
memblock.memory.region[i+1].base >> PAGE_SHIFT;
|
||||
|
||||
if (lmb_region_max_pfn < lmb_next_region_start_pfn)
|
||||
register_nosave_region(lmb_region_max_pfn,
|
||||
lmb_next_region_start_pfn);
|
||||
if (memblock_region_max_pfn < memblock_next_region_start_pfn)
|
||||
register_nosave_region(memblock_region_max_pfn,
|
||||
memblock_next_region_start_pfn);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -275,8 +275,8 @@ static int __init mark_nonram_nosave(void)
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long total_ram = lmb_phys_mem_size();
|
||||
phys_addr_t top_of_ram = lmb_end_of_DRAM();
|
||||
unsigned long total_ram = memblock_phys_mem_size();
|
||||
phys_addr_t top_of_ram = memblock_end_of_DRAM();
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
@@ -327,7 +327,7 @@ void __init mem_init(void)
|
||||
swiotlb_init(1);
|
||||
#endif
|
||||
|
||||
num_physpages = lmb.memory.size >> PAGE_SHIFT;
|
||||
num_physpages = memblock.memory.size >> PAGE_SHIFT;
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
@@ -364,7 +364,7 @@ void __init mem_init(void)
|
||||
highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
|
||||
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
if (lmb_is_reserved(pfn << PAGE_SHIFT))
|
||||
if (memblock_is_reserved(pfn << PAGE_SHIFT))
|
||||
continue;
|
||||
ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
|
@@ -17,7 +17,7 @@
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <asm/sparsemem.h>
|
||||
@@ -407,7 +407,7 @@ struct of_drconf_cell {
|
||||
#define DRCONF_MEM_RESERVED 0x00000080
|
||||
|
||||
/*
|
||||
* Read the next lmb list entry from the ibm,dynamic-memory property
|
||||
* Read the next memblock list entry from the ibm,dynamic-memory property
|
||||
* and return the information in the provided of_drconf_cell structure.
|
||||
*/
|
||||
static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
|
||||
@@ -428,8 +428,8 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
|
||||
/*
|
||||
* Retreive and validate the ibm,dynamic-memory property of the device tree.
|
||||
*
|
||||
* The layout of the ibm,dynamic-memory property is a number N of lmb
|
||||
* list entries followed by N lmb list entries. Each lmb list entry
|
||||
* The layout of the ibm,dynamic-memory property is a number N of memblock
|
||||
* list entries followed by N memblock list entries. Each memblock list entry
|
||||
* contains information as layed out in the of_drconf_cell struct above.
|
||||
*/
|
||||
static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
|
||||
@@ -454,15 +454,15 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
|
||||
}
|
||||
|
||||
/*
|
||||
* Retreive and validate the ibm,lmb-size property for drconf memory
|
||||
* Retreive and validate the ibm,memblock-size property for drconf memory
|
||||
* from the device tree.
|
||||
*/
|
||||
static u64 of_get_lmb_size(struct device_node *memory)
|
||||
static u64 of_get_memblock_size(struct device_node *memory)
|
||||
{
|
||||
const u32 *prop;
|
||||
u32 len;
|
||||
|
||||
prop = of_get_property(memory, "ibm,lmb-size", &len);
|
||||
prop = of_get_property(memory, "ibm,memblock-size", &len);
|
||||
if (!prop || len < sizeof(unsigned int))
|
||||
return 0;
|
||||
|
||||
@@ -596,19 +596,19 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
/*
|
||||
* We use lmb_end_of_DRAM() in here instead of memory_limit because
|
||||
* We use memblock_end_of_DRAM() in here instead of memory_limit because
|
||||
* we've already adjusted it for the limit and it takes care of
|
||||
* having memory holes below the limit. Also, in the case of
|
||||
* iommu_is_off, memory_limit is not set but is implicitly enforced.
|
||||
*/
|
||||
|
||||
if (start + size <= lmb_end_of_DRAM())
|
||||
if (start + size <= memblock_end_of_DRAM())
|
||||
return size;
|
||||
|
||||
if (start >= lmb_end_of_DRAM())
|
||||
if (start >= memblock_end_of_DRAM())
|
||||
return 0;
|
||||
|
||||
return lmb_end_of_DRAM() - start;
|
||||
return memblock_end_of_DRAM() - start;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -618,7 +618,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
|
||||
static inline int __init read_usm_ranges(const u32 **usm)
|
||||
{
|
||||
/*
|
||||
* For each lmb in ibm,dynamic-memory a corresponding
|
||||
* For each memblock in ibm,dynamic-memory a corresponding
|
||||
* entry in linux,drconf-usable-memory property contains
|
||||
* a counter followed by that many (base, size) duple.
|
||||
* read the counter from linux,drconf-usable-memory
|
||||
@@ -634,7 +634,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
||||
{
|
||||
const u32 *dm, *usm;
|
||||
unsigned int n, rc, ranges, is_kexec_kdump = 0;
|
||||
unsigned long lmb_size, base, size, sz;
|
||||
unsigned long memblock_size, base, size, sz;
|
||||
int nid;
|
||||
struct assoc_arrays aa;
|
||||
|
||||
@@ -642,8 +642,8 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
||||
if (!n)
|
||||
return;
|
||||
|
||||
lmb_size = of_get_lmb_size(memory);
|
||||
if (!lmb_size)
|
||||
memblock_size = of_get_memblock_size(memory);
|
||||
if (!memblock_size)
|
||||
return;
|
||||
|
||||
rc = of_get_assoc_arrays(memory, &aa);
|
||||
@@ -667,7 +667,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
||||
continue;
|
||||
|
||||
base = drmem.base_addr;
|
||||
size = lmb_size;
|
||||
size = memblock_size;
|
||||
ranges = 1;
|
||||
|
||||
if (is_kexec_kdump) {
|
||||
@@ -787,7 +787,7 @@ new_range:
|
||||
}
|
||||
|
||||
/*
|
||||
* Now do the same thing for each LMB listed in the ibm,dynamic-memory
|
||||
* Now do the same thing for each MEMBLOCK listed in the ibm,dynamic-memory
|
||||
* property in the ibm,dynamic-reconfiguration-memory node.
|
||||
*/
|
||||
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
|
||||
@@ -799,8 +799,8 @@ new_range:
|
||||
|
||||
static void __init setup_nonnuma(void)
|
||||
{
|
||||
unsigned long top_of_ram = lmb_end_of_DRAM();
|
||||
unsigned long total_ram = lmb_phys_mem_size();
|
||||
unsigned long top_of_ram = memblock_end_of_DRAM();
|
||||
unsigned long total_ram = memblock_phys_mem_size();
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned int i, nid = 0;
|
||||
|
||||
@@ -809,9 +809,9 @@ static void __init setup_nonnuma(void)
|
||||
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
|
||||
(top_of_ram - total_ram) >> 20);
|
||||
|
||||
for (i = 0; i < lmb.memory.cnt; ++i) {
|
||||
start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
|
||||
end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
|
||||
for (i = 0; i < memblock.memory.cnt; ++i) {
|
||||
start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
|
||||
end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
|
||||
|
||||
fake_numa_create_new_node(end_pfn, &nid);
|
||||
add_active_range(nid, start_pfn, end_pfn);
|
||||
@@ -869,7 +869,7 @@ static void __init dump_numa_memory_topology(void)
|
||||
|
||||
count = 0;
|
||||
|
||||
for (i = 0; i < lmb_end_of_DRAM();
|
||||
for (i = 0; i < memblock_end_of_DRAM();
|
||||
i += (1 << SECTION_SIZE_BITS)) {
|
||||
if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
|
||||
if (count == 0)
|
||||
@@ -889,7 +889,7 @@ static void __init dump_numa_memory_topology(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate some memory, satisfying the lmb or bootmem allocator where
|
||||
* Allocate some memory, satisfying the memblock or bootmem allocator where
|
||||
* required. nid is the preferred node and end is the physical address of
|
||||
* the highest address in the node.
|
||||
*
|
||||
@@ -903,11 +903,11 @@ static void __init *careful_zallocation(int nid, unsigned long size,
|
||||
int new_nid;
|
||||
unsigned long ret_paddr;
|
||||
|
||||
ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
|
||||
ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
|
||||
|
||||
/* retry over all memory */
|
||||
if (!ret_paddr)
|
||||
ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
|
||||
ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
|
||||
|
||||
if (!ret_paddr)
|
||||
panic("numa.c: cannot allocate %lu bytes for node %d",
|
||||
@@ -917,14 +917,14 @@ static void __init *careful_zallocation(int nid, unsigned long size,
|
||||
|
||||
/*
|
||||
* We initialize the nodes in numeric order: 0, 1, 2...
|
||||
* and hand over control from the LMB allocator to the
|
||||
* and hand over control from the MEMBLOCK allocator to the
|
||||
* bootmem allocator. If this function is called for
|
||||
* node 5, then we know that all nodes <5 are using the
|
||||
* bootmem allocator instead of the LMB allocator.
|
||||
* bootmem allocator instead of the MEMBLOCK allocator.
|
||||
*
|
||||
* So, check the nid from which this allocation came
|
||||
* and double check to see if we need to use bootmem
|
||||
* instead of the LMB. We don't free the LMB memory
|
||||
* instead of the MEMBLOCK. We don't free the MEMBLOCK memory
|
||||
* since it would be useless.
|
||||
*/
|
||||
new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
|
||||
@@ -949,9 +949,9 @@ static void mark_reserved_regions_for_nid(int nid)
|
||||
struct pglist_data *node = NODE_DATA(nid);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < lmb.reserved.cnt; i++) {
|
||||
unsigned long physbase = lmb.reserved.region[i].base;
|
||||
unsigned long size = lmb.reserved.region[i].size;
|
||||
for (i = 0; i < memblock.reserved.cnt; i++) {
|
||||
unsigned long physbase = memblock.reserved.region[i].base;
|
||||
unsigned long size = memblock.reserved.region[i].size;
|
||||
unsigned long start_pfn = physbase >> PAGE_SHIFT;
|
||||
unsigned long end_pfn = PFN_UP(physbase + size);
|
||||
struct node_active_region node_ar;
|
||||
@@ -959,7 +959,7 @@ static void mark_reserved_regions_for_nid(int nid)
|
||||
node->node_spanned_pages;
|
||||
|
||||
/*
|
||||
* Check to make sure that this lmb.reserved area is
|
||||
* Check to make sure that this memblock.reserved area is
|
||||
* within the bounds of the node that we care about.
|
||||
* Checking the nid of the start and end points is not
|
||||
* sufficient because the reserved area could span the
|
||||
@@ -1017,7 +1017,7 @@ void __init do_init_bootmem(void)
|
||||
int nid;
|
||||
|
||||
min_low_pfn = 0;
|
||||
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
|
||||
max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
max_pfn = max_low_pfn;
|
||||
|
||||
if (parse_numa_properties())
|
||||
@@ -1094,7 +1094,7 @@ void __init paging_init(void)
|
||||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
||||
max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
|
||||
max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
}
|
||||
|
||||
@@ -1128,7 +1128,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
||||
{
|
||||
const u32 *dm;
|
||||
unsigned int drconf_cell_cnt, rc;
|
||||
unsigned long lmb_size;
|
||||
unsigned long memblock_size;
|
||||
struct assoc_arrays aa;
|
||||
int nid = -1;
|
||||
|
||||
@@ -1136,8 +1136,8 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
||||
if (!drconf_cell_cnt)
|
||||
return -1;
|
||||
|
||||
lmb_size = of_get_lmb_size(memory);
|
||||
if (!lmb_size)
|
||||
memblock_size = of_get_memblock_size(memory);
|
||||
if (!memblock_size)
|
||||
return -1;
|
||||
|
||||
rc = of_get_assoc_arrays(memory, &aa);
|
||||
@@ -1156,7 +1156,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
||||
continue;
|
||||
|
||||
if ((scn_addr < drmem.base_addr)
|
||||
|| (scn_addr >= (drmem.base_addr + lmb_size)))
|
||||
|| (scn_addr >= (drmem.base_addr + memblock_size)))
|
||||
continue;
|
||||
|
||||
nid = of_drconf_to_nid_single(&drmem, &aa);
|
||||
@@ -1169,7 +1169,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory,
|
||||
/*
|
||||
* Find the node associated with a hot added memory section for memory
|
||||
* represented in the device tree as a node (i.e. memory@XXXX) for
|
||||
* each lmb.
|
||||
* each memblock.
|
||||
*/
|
||||
int hot_add_node_scn_to_nid(unsigned long scn_addr)
|
||||
{
|
||||
@@ -1210,8 +1210,8 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
|
||||
|
||||
/*
|
||||
* Find the node associated with a hot added memory section. Section
|
||||
* corresponds to a SPARSEMEM section, not an LMB. It is assumed that
|
||||
* sections are fully contained within a single LMB.
|
||||
* corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
|
||||
* sections are fully contained within a single MEMBLOCK.
|
||||
*/
|
||||
int hot_add_scn_to_nid(unsigned long scn_addr)
|
||||
{
|
||||
|
@@ -26,7 +26,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
@@ -198,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
|
||||
* mem_init() sets high_memory so only do the check after that.
|
||||
*/
|
||||
if (mem_init_done && (p < virt_to_phys(high_memory)) &&
|
||||
!(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
|
||||
!(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
|
||||
printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
|
||||
(unsigned long long)p, __builtin_return_address(0));
|
||||
return NULL;
|
||||
@@ -331,7 +331,7 @@ void __init mapin_ram(void)
|
||||
s = mmu_mapin_ram(top);
|
||||
__mapin_ram_chunk(s, top);
|
||||
|
||||
top = lmb_end_of_DRAM();
|
||||
top = memblock_end_of_DRAM();
|
||||
s = wii_mmu_mapin_mem2(top);
|
||||
__mapin_ram_chunk(s, top);
|
||||
}
|
||||
|
@@ -34,7 +34,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
@@ -67,7 +67,7 @@ static void *early_alloc_pgtable(unsigned long size)
|
||||
if (init_bootmem_done)
|
||||
pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
|
||||
else
|
||||
pt = __va(lmb_alloc_base(size, size,
|
||||
pt = __va(memblock_alloc_base(size, size,
|
||||
__pa(MAX_DMA_ADDRESS)));
|
||||
memset(pt, 0, size);
|
||||
|
||||
|
@@ -26,7 +26,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/prom.h>
|
||||
#include <asm/mmu.h>
|
||||
@@ -223,7 +223,7 @@ void __init MMU_init_hw(void)
|
||||
* Find some memory for the hash table.
|
||||
*/
|
||||
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
|
||||
Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
|
||||
Hash = __va(memblock_alloc_base(Hash_size, Hash_size,
|
||||
__initial_memory_limit_addr));
|
||||
cacheable_memzero(Hash, Hash_size);
|
||||
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
|
||||
|
@@ -12,7 +12,7 @@
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
@@ -252,7 +252,7 @@ void __init stabs_alloc(void)
|
||||
if (cpu == 0)
|
||||
continue; /* stab for CPU 0 is statically allocated */
|
||||
|
||||
newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
|
||||
newstab = memblock_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
|
||||
1<<SID_SHIFT);
|
||||
newstab = (unsigned long)__va(newstab);
|
||||
|
||||
|
@@ -34,7 +34,7 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/lmb.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
@@ -503,7 +503,7 @@ static void __early_init_mmu(int boot_cpu)
|
||||
/* Set the global containing the top of the linear mapping
|
||||
* for use by the TLB miss code
|
||||
*/
|
||||
linear_map_top = lmb_end_of_DRAM();
|
||||
linear_map_top = memblock_end_of_DRAM();
|
||||
|
||||
/* A sync won't hurt us after mucking around with
|
||||
* the MMU configuration
|
||||
|
Referência em uma nova issue
Block a user