ehea: Detect 16GB hugepages for firmware restriction
All kernel memory which is used for kernel/hardware data transfer must be registered with firmware using "memory regions". 16GB hugepages may not be part of a memory region due to firmware restrictions. This patch modifies the walk_memory_resource callback fn to filter hugepages and add only standard memory to the busmap which is later on used for MR registration. Signed-off-by: Thomas Klein <tklein@de.ibm.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
This commit is contained in:

committed by
Jeff Garzik

parent
74d5e8acd9
commit
3fd09c45bf
@@ -632,10 +632,13 @@ static void ehea_rebuild_busmap(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add)
|
||||
static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
|
||||
{
|
||||
unsigned long i, start_section, end_section;
|
||||
|
||||
if (!nr_pages)
|
||||
return 0;
|
||||
|
||||
if (!ehea_bmap) {
|
||||
ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
|
||||
if (!ehea_bmap)
|
||||
@@ -643,7 +646,7 @@ static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add)
|
||||
}
|
||||
|
||||
start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
|
||||
end_section = start_section + ((pgnum * PAGE_SIZE) / EHEA_SECTSIZE);
|
||||
end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
|
||||
/* Mark entries as valid or invalid only; address is assigned later */
|
||||
for (i = start_section; i < end_section; i++) {
|
||||
u64 flag;
|
||||
@@ -692,10 +695,54 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ehea_create_busmap_callback(unsigned long pfn,
|
||||
unsigned long nr_pages, void *arg)
|
||||
static int ehea_is_hugepage(unsigned long pfn)
|
||||
{
|
||||
return ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
|
||||
int page_order;
|
||||
|
||||
if (pfn & EHEA_HUGEPAGE_PFN_MASK)
|
||||
return 0;
|
||||
|
||||
page_order = compound_order(pfn_to_page(pfn));
|
||||
if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ehea_create_busmap_callback(unsigned long initial_pfn,
|
||||
unsigned long total_nr_pages, void *arg)
|
||||
{
|
||||
int ret;
|
||||
unsigned long pfn, start_pfn, end_pfn, nr_pages;
|
||||
|
||||
if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
|
||||
return ehea_update_busmap(initial_pfn, total_nr_pages,
|
||||
EHEA_BUSMAP_ADD_SECT);
|
||||
|
||||
/* Given chunk is >= 16GB -> check for hugepages */
|
||||
start_pfn = initial_pfn;
|
||||
end_pfn = initial_pfn + total_nr_pages;
|
||||
pfn = start_pfn;
|
||||
|
||||
while (pfn < end_pfn) {
|
||||
if (ehea_is_hugepage(pfn)) {
|
||||
/* Add mem found in front of the hugepage */
|
||||
nr_pages = pfn - start_pfn;
|
||||
ret = ehea_update_busmap(start_pfn, nr_pages,
|
||||
EHEA_BUSMAP_ADD_SECT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Skip the hugepage */
|
||||
pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
|
||||
start_pfn = pfn;
|
||||
} else
|
||||
pfn += (EHEA_SECTSIZE / PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* Add mem found behind the hugepage(s) */
|
||||
nr_pages = pfn - start_pfn;
|
||||
return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
|
||||
}
|
||||
|
||||
int ehea_create_busmap(void)
|
||||
|
Reference in New Issue
Block a user