summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm/kvm-ia64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kvm/kvm-ia64.c')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c131
1 files changed, 119 insertions, 12 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 4e586f6110a..28af6a731bb 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len)
int l;
for (l = 0; l < (len + 32); l += 32)
- ia64_fc(start + l);
+ ia64_fc((void *)(start + l));
ia64_sync_i();
ia64_srlz_i();
@@ -182,7 +182,7 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_MP_STATE:
-
+ case KVM_CAP_IRQ_INJECT_STATUS:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -314,7 +314,7 @@ static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
union ia64_lid lid;
int i;
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (kvm->vcpus[i]) {
lid.val = VCPU_LID(kvm->vcpus[i]);
if (lid.id == id && lid.eid == eid)
@@ -388,7 +388,7 @@ static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
call_data.ptc_g_data = p->u.ptc_g_data;
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ for (i = 0; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
KVM_MP_STATE_UNINITIALIZED ||
vcpu == kvm->vcpus[i])
@@ -788,6 +788,8 @@ struct kvm *kvm_arch_create_vm(void)
return ERR_PTR(-ENOMEM);
kvm_init_vm(kvm);
+ kvm->arch.online_vcpus = 0;
+
return kvm;
}
@@ -919,7 +921,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_ioapic_init(kvm);
if (r)
goto out;
+ r = kvm_setup_default_irq_routing(kvm);
+ if (r) {
+ kfree(kvm->arch.vioapic);
+ goto out;
+ }
break;
+ case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
@@ -927,10 +935,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
if (irqchip_in_kernel(kvm)) {
+ __s32 status;
mutex_lock(&kvm->lock);
- kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
+ status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
mutex_unlock(&kvm->lock);
+ if (ioctl == KVM_IRQ_LINE_STATUS) {
+ irq_event.status = status;
+ if (copy_to_user(argp, &irq_event,
+ sizeof irq_event))
+ goto out;
+ }
r = 0;
}
break;
@@ -1149,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
/*Initialize itc offset for vcpus*/
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
- for (i = 0; i < KVM_MAX_VCPUS; i++) {
+ for (i = 0; i < kvm->arch.online_vcpus; i++) {
v = (struct kvm_vcpu *)((char *)vcpu +
sizeof(struct kvm_vcpu_data) * i);
v->arch.itc_offset = itc_offset;
@@ -1283,6 +1298,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
goto fail;
}
+ kvm->arch.online_vcpus++;
+
return vcpu;
fail:
return ERR_PTR(r);
@@ -1303,8 +1320,8 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL;
}
-int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
- struct kvm_debug_guest *dbg)
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
{
return -EINVAL;
}
@@ -1337,6 +1354,10 @@ static void kvm_release_vm_pages(struct kvm *kvm)
}
}
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_iommu_unmap_guest(kvm);
@@ -1417,6 +1438,23 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0;
}
+int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
+ struct kvm_ia64_vcpu_stack *stack)
+{
+ memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
+ struct kvm_ia64_vcpu_stack *stack)
+{
+ memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
+ sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
+
+ vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
+ return 0;
+}
+
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
@@ -1426,9 +1464,78 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+ unsigned int ioctl, unsigned long arg)
{
- return -EINVAL;
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ struct kvm_ia64_vcpu_stack *stack = NULL;
+ long r;
+
+ switch (ioctl) {
+ case KVM_IA64_VCPU_GET_STACK: {
+ struct kvm_ia64_vcpu_stack __user *user_stack;
+ void __user *first_p = argp;
+
+ r = -EFAULT;
+ if (copy_from_user(&user_stack, first_p, sizeof(void *)))
+ goto out;
+
+ if (!access_ok(VERIFY_WRITE, user_stack,
+ sizeof(struct kvm_ia64_vcpu_stack))) {
+ printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
+ "Illegal user destination address for stack\n");
+ goto out;
+ }
+ stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
+ if (!stack) {
+ r = -ENOMEM;
+ goto out;
+ }
+
+ r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
+ if (r)
+ goto out;
+
+ if (copy_to_user(user_stack, stack,
+ sizeof(struct kvm_ia64_vcpu_stack)))
+ goto out;
+
+ break;
+ }
+ case KVM_IA64_VCPU_SET_STACK: {
+ struct kvm_ia64_vcpu_stack __user *user_stack;
+ void __user *first_p = argp;
+
+ r = -EFAULT;
+ if (copy_from_user(&user_stack, first_p, sizeof(void *)))
+ goto out;
+
+ if (!access_ok(VERIFY_READ, user_stack,
+ sizeof(struct kvm_ia64_vcpu_stack))) {
+ printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
+ "Illegal user address for stack\n");
+ goto out;
+ }
+ stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
+ if (!stack) {
+ r = -ENOMEM;
+ goto out;
+ }
+ if (copy_from_user(stack, user_stack,
+ sizeof(struct kvm_ia64_vcpu_stack)))
+ goto out;
+
+ r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
+ break;
+ }
+
+ default:
+ r = -EINVAL;
+ }
+
+out:
+ kfree(stack);
+ return r;
}
int kvm_arch_set_memory_region(struct kvm *kvm,
@@ -1468,7 +1575,7 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
}
long kvm_arch_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+ unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
}
@@ -1733,7 +1840,7 @@ struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
struct kvm_vcpu *lvcpu = kvm->vcpus[0];
int i;
- for (i = 1; i < KVM_MAX_VCPUS; i++) {
+ for (i = 1; i < kvm->arch.online_vcpus; i++) {
if (!kvm->vcpus[i])
continue;
if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)