summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c27
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c167
2 files changed, 58 insertions, 136 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dde741..2b95c45e17d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1511,10 +1511,10 @@ static inline int lpcr_rmls(unsigned long rma_size)
static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct kvmppc_linear_info *ri = vma->vm_file->private_data;
struct page *page;
+ struct kvm_rma_info *ri = vma->vm_file->private_data;
- if (vmf->pgoff >= ri->npages)
+ if (vmf->pgoff >= kvm_rma_pages)
return VM_FAULT_SIGBUS;
page = pfn_to_page(ri->base_pfn + vmf->pgoff);
@@ -1536,7 +1536,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
static int kvm_rma_release(struct inode *inode, struct file *filp)
{
- struct kvmppc_linear_info *ri = filp->private_data;
+ struct kvm_rma_info *ri = filp->private_data;
kvm_release_rma(ri);
return 0;
@@ -1549,8 +1549,17 @@ static const struct file_operations kvm_rma_fops = {
long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
{
- struct kvmppc_linear_info *ri;
long fd;
+ struct kvm_rma_info *ri;
+ /*
+ * Only do this on PPC970 in HV mode
+ */
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_201))
+ return -EINVAL;
+
+ if (!kvm_rma_pages)
+ return -EINVAL;
ri = kvm_alloc_rma();
if (!ri)
@@ -1560,7 +1569,7 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
if (fd < 0)
kvm_release_rma(ri);
- ret->rma_size = ri->npages << PAGE_SHIFT;
+ ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
return fd;
}
@@ -1725,7 +1734,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
struct kvm *kvm = vcpu->kvm;
- struct kvmppc_linear_info *ri = NULL;
+ struct kvm_rma_info *ri = NULL;
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
@@ -1803,7 +1812,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
} else {
/* Set up to use an RMO region */
- rma_size = ri->npages;
+ rma_size = kvm_rma_pages;
if (rma_size > memslot->npages)
rma_size = memslot->npages;
rma_size <<= PAGE_SHIFT;
@@ -1831,14 +1840,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* POWER7 */
lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
lpcr |= rmls << LPCR_RMLS_SH;
- kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
+ kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
}
kvm->arch.lpcr = lpcr;
pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
/* Initialize phys addrs of pages in RMO */
- npages = ri->npages;
+ npages = kvm_rma_pages;
porder = __ilog2(npages);
physp = memslot->arch.slot_phys;
if (physp) {
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 4b865c55333..8cd0daebb82 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -21,13 +21,6 @@
#include <asm/kvm_book3s.h>
#include "book3s_hv_cma.h"
-
-#define KVM_LINEAR_RMA 0
-#define KVM_LINEAR_HPT 1
-
-static void __init kvm_linear_init_one(ulong size, int count, int type);
-static struct kvmppc_linear_info *kvm_alloc_linear(int type);
-static void kvm_release_linear(struct kvmppc_linear_info *ri);
/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
@@ -37,19 +30,17 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri);
* By default we reserve 5% of memory for hash pagetable allocation.
*/
static unsigned long kvm_cma_resv_ratio = 5;
-
-/*************** RMA *************/
-
/*
- * This maintains a list of RMAs (real mode areas) for KVM guests to use.
+ * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
* Each RMA has to be physically contiguous and of a size that the
* hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
* and other larger sizes. Since we are unlikely to be allocate that
* much physically contiguous memory after the system is up and running,
- * we preallocate a set of RMAs in early boot for KVM to use.
+ * we preallocate a set of RMAs in early boot using CMA.
+ * should be power of 2.
*/
-static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
-static unsigned long kvm_rma_count;
+unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
+EXPORT_SYMBOL_GPL(kvm_rma_pages);
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
@@ -79,35 +70,50 @@ static inline int lpcr_rmls(unsigned long rma_size)
static int __init early_parse_rma_size(char *p)
{
- if (!p)
- return 1;
+ unsigned long kvm_rma_size;
+ pr_debug("%s(%s)\n", __func__, p);
+ if (!p)
+ return -EINVAL;
kvm_rma_size = memparse(p, &p);
-
+ /*
+ * Check that the requested size is one supported in hardware
+ */
+ if (lpcr_rmls(kvm_rma_size) < 0) {
+ pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+ return -EINVAL;
+ }
+ kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
return 0;
}
early_param("kvm_rma_size", early_parse_rma_size);
-static int __init early_parse_rma_count(char *p)
+struct kvm_rma_info *kvm_alloc_rma()
{
- if (!p)
- return 1;
-
- kvm_rma_count = simple_strtoul(p, NULL, 0);
-
- return 0;
-}
-early_param("kvm_rma_count", early_parse_rma_count);
-
-struct kvmppc_linear_info *kvm_alloc_rma(void)
-{
- return kvm_alloc_linear(KVM_LINEAR_RMA);
+ struct page *page;
+ struct kvm_rma_info *ri;
+
+ ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
+ if (!ri)
+ return NULL;
+ page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
+ if (!page)
+ goto err_out;
+ atomic_set(&ri->use_count, 1);
+ ri->base_pfn = page_to_pfn(page);
+ return ri;
+err_out:
+ kfree(ri);
+ return NULL;
}
EXPORT_SYMBOL_GPL(kvm_alloc_rma);
-void kvm_release_rma(struct kvmppc_linear_info *ri)
+void kvm_release_rma(struct kvm_rma_info *ri)
{
- kvm_release_linear(ri);
+ if (atomic_dec_and_test(&ri->use_count)) {
+ kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
+ kfree(ri);
+ }
}
EXPORT_SYMBOL_GPL(kvm_release_rma);
@@ -137,101 +143,6 @@ void kvm_release_hpt(struct page *page, unsigned long nr_pages)
}
EXPORT_SYMBOL_GPL(kvm_release_hpt);
-/*************** generic *************/
-
-static LIST_HEAD(free_linears);
-static DEFINE_SPINLOCK(linear_lock);
-
-static void __init kvm_linear_init_one(ulong size, int count, int type)
-{
- unsigned long i;
- unsigned long j, npages;
- void *linear;
- struct page *pg;
- const char *typestr;
- struct kvmppc_linear_info *linear_info;
-
- if (!count)
- return;
-
- typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
-
- npages = size >> PAGE_SHIFT;
- linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
- for (i = 0; i < count; ++i) {
- linear = alloc_bootmem_align(size, size);
- pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
- size >> 20);
- linear_info[i].base_virt = linear;
- linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
- linear_info[i].npages = npages;
- linear_info[i].type = type;
- list_add_tail(&linear_info[i].list, &free_linears);
- atomic_set(&linear_info[i].use_count, 0);
-
- pg = pfn_to_page(linear_info[i].base_pfn);
- for (j = 0; j < npages; ++j) {
- atomic_inc(&pg->_count);
- ++pg;
- }
- }
-}
-
-static struct kvmppc_linear_info *kvm_alloc_linear(int type)
-{
- struct kvmppc_linear_info *ri, *ret;
-
- ret = NULL;
- spin_lock(&linear_lock);
- list_for_each_entry(ri, &free_linears, list) {
- if (ri->type != type)
- continue;
-
- list_del(&ri->list);
- atomic_inc(&ri->use_count);
- memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
- ret = ri;
- break;
- }
- spin_unlock(&linear_lock);
- return ret;
-}
-
-static void kvm_release_linear(struct kvmppc_linear_info *ri)
-{
- if (atomic_dec_and_test(&ri->use_count)) {
- spin_lock(&linear_lock);
- list_add_tail(&ri->list, &free_linears);
- spin_unlock(&linear_lock);
-
- }
-}
-
-/*
- * Called at boot time while the bootmem allocator is active,
- * to allocate contiguous physical memory for the hash page
- * tables for guests.
- */
-void __init kvm_linear_init(void)
-{
- /* RMA */
- /* Only do this on PPC970 in HV mode */
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_201))
- return;
-
- if (!kvm_rma_size || !kvm_rma_count)
- return;
-
- /* Check that the requested size is one supported in hardware */
- if (lpcr_rmls(kvm_rma_size) < 0) {
- pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
- return;
- }
-
- kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
-}
-
/**
* kvm_cma_reserve() - reserve area for kvm hash pagetable
*
@@ -265,6 +176,8 @@ void __init kvm_cma_reserve(void)
align_size = __rounddown_pow_of_two(selected_size);
else
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
+
+ align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
kvm_cma_declare_contiguous(selected_size, align_size);
}
}