PPC, KVM, CMA: use general CMA reserved area management framework
Now, we have general CMA reserved area management framework, so use it for future maintainabilty. There is no functional change. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Alexander Graf <agraf@suse.de> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Gleb Natapov <gleb@kernel.org> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

szülő
a254129e86
commit
fc95ca7284
@@ -16,12 +16,14 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/cma.h>
|
||||
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/kvm_book3s.h>
|
||||
|
||||
#include "book3s_hv_cma.h"
|
||||
#define KVM_CMA_CHUNK_ORDER 18
|
||||
|
||||
/*
|
||||
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
|
||||
* should be power of 2.
|
||||
@@ -43,6 +45,8 @@ static unsigned long kvm_cma_resv_ratio = 5;
|
||||
unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
|
||||
EXPORT_SYMBOL_GPL(kvm_rma_pages);
|
||||
|
||||
static struct cma *kvm_cma;
|
||||
|
||||
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
|
||||
Assumes POWER7 or PPC970. */
|
||||
static inline int lpcr_rmls(unsigned long rma_size)
|
||||
@@ -97,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma()
|
||||
ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
|
||||
if (!ri)
|
||||
return NULL;
|
||||
page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
|
||||
page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages));
|
||||
if (!page)
|
||||
goto err_out;
|
||||
atomic_set(&ri->use_count, 1);
|
||||
@@ -112,7 +116,7 @@ EXPORT_SYMBOL_GPL(kvm_alloc_rma);
|
||||
void kvm_release_rma(struct kvm_rma_info *ri)
|
||||
{
|
||||
if (atomic_dec_and_test(&ri->use_count)) {
|
||||
kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
|
||||
cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
|
||||
kfree(ri);
|
||||
}
|
||||
}
|
||||
@@ -131,16 +135,18 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
|
||||
{
|
||||
unsigned long align_pages = HPT_ALIGN_PAGES;
|
||||
|
||||
VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
|
||||
|
||||
/* Old CPUs require HPT aligned on a multiple of its size */
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
align_pages = nr_pages;
|
||||
return kvm_alloc_cma(nr_pages, align_pages);
|
||||
return cma_alloc(kvm_cma, nr_pages, get_order(align_pages));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
|
||||
|
||||
void kvm_release_hpt(struct page *page, unsigned long nr_pages)
|
||||
{
|
||||
kvm_release_cma(page, nr_pages);
|
||||
cma_release(kvm_cma, page, nr_pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_release_hpt);
|
||||
|
||||
@@ -179,7 +185,8 @@ void __init kvm_cma_reserve(void)
|
||||
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
|
||||
|
||||
align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
|
||||
kvm_cma_declare_contiguous(selected_size, align_size);
|
||||
cma_declare_contiguous(selected_size, 0, 0, align_size,
|
||||
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, &kvm_cma, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user