summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/ptrace.c24
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/sys_arm.c2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h7
-rw-r--r--arch/powerpc/kernel/entry_64.S18
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c22
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/x86/kernel/kvm.c9
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/setup_percpu.c14
-rw-r--r--arch/x86/kvm/x86.c1
-rw-r--r--arch/x86/xen/enlighten.c42
-rw-r--r--arch/x86/xen/mmu.c7
17 files changed, 103 insertions, 72 deletions
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 80abafb9bf3..9650c143afc 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-#ifdef __ARMEB__
-#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
-#else
-#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
-#endif
-
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
- /*
- * Save IP. IP is used to denote syscall entry/exit:
- * IP = 0 -> entry, = 1 -> exit
- */
- ip = regs->ARM_ip;
- regs->ARM_ip = why;
-
- if (!ip)
+ if (why)
audit_syscall_exit(regs);
else
- audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
+ audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
if (!test_thread_flag(TIF_SYSCALL_TRACE))
@@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
current_thread_info()->syscall = scno;
+ /*
+ * IP is used to denote syscall entry/exit:
+ * IP = 0 -> entry, =1 -> exit
+ */
+ ip = regs->ARM_ip;
+ regs->ARM_ip = why;
+
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index f6a4d32b042..8f464465977 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
- printk("CPU%u: Booted secondary processor\n", cpu);
-
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
@@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
+ printk("CPU%u: Booted secondary processor\n", cpu);
+
cpu_init();
preempt_disable();
trace_hardirqs_off();
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index d2b177905cd..76cbb055dd0 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
"Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
- : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
+ : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
out:
return ret;
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f5104b7c52c..463fb3bbe11 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1174,7 +1174,7 @@ out:
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{
- return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 548da3aa0a3..d58fc4e4149 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -288,13 +288,6 @@ label##_hv: \
/* Exception addition: Hard disable interrupts */
#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
-/* Exception addition: Keep interrupt state */
-#define ENABLE_INTS \
- ld r11,PACAKMSR(r13); \
- ld r12,_MSR(r1); \
- rlwimi r11,r12,0,MSR_EE; \
- mtmsrd r11,1
-
#define ADD_NVGPRS \
bl .save_nvgprs
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index f8a7a1a1a9f..fc6015027a8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -767,16 +767,6 @@ do_work:
SOFT_DISABLE_INTS(r3,r4)
1: bl .preempt_schedule_irq
- /* Hard-disable interrupts again (and update PACA) */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
- li r0,PACA_IRQ_HARD_DIS
- stb r0,PACAIRQHAPPENED(r13)
-
/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
@@ -787,14 +777,6 @@ do_work:
user_work:
#endif /* CONFIG_PREEMPT */
- /* Enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
-#else
- ori r10,r10,MSR_EE
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
-
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
bl .restore_interrupts
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index cb705fdbb45..8f880bc77c5 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -768,8 +768,8 @@ alignment_common:
std r3,_DAR(r1)
std r4,_DSISR(r1)
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
bl .alignment_exception
b .ret_from_except
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 43eb74fcedd..c6c6f3b7f8c 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -260,11 +260,17 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* if they are currently disabled. This is typically called before
* schedule() or do_signal() when returning to userspace. We do it
* in C to avoid the burden of dealing with lockdep etc...
+ *
+ * NOTE: This is called with interrupts hard disabled but not marked
+ * as such in paca->irq_happened, so we need to resync this.
*/
void restore_interrupts(void)
{
- if (irqs_disabled())
+ if (irqs_disabled()) {
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
local_irq_enable();
+ } else
+ __hard_irq_enable();
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6aa0c663e24..158972341a2 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
addr, regs->nip, regs->link, code);
}
- if (!arch_irq_disabled_regs(regs))
+ if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
local_irq_enable();
memset(&info, 0, sizeof(info));
@@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}
- local_irq_enable();
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
#ifdef CONFIG_MATH_EMULATION
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
@@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs)
{
int sig, code, fixed = 0;
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ddc485a529f..c3beaeef3f6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
!(memslot->userspace_addr & (s - 1))) {
start &= ~(s - 1);
pgsize = s;
+ get_page(hpage);
+ put_page(page);
page = hpage;
}
}
@@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
err = 0;
out:
- if (got) {
- if (PageHuge(page))
- page = compound_head(page);
+ if (got)
put_page(page);
- }
return err;
up_err:
@@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page);
out_put:
- if (page)
- put_page(page);
+ if (page) {
+ /*
+ * We drop pages[0] here, not page because page might
+ * have been set to the head page of a compound, but
+ * we have to drop the reference on the correct tail
+ * page to match the get inside gup()
+ */
+ put_page(pages[0]);
+ }
return ret;
out_unlock:
@@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
pa = *physp;
}
page = pfn_to_page(pa >> PAGE_SHIFT);
+ get_page(page);
} else {
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, 1, pages);
@@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
page = compound_head(page);
psize <<= compound_order(page);
}
- if (!kvm->arch.using_mmu_notifiers)
- get_page(page);
offset = gpa & (psize - 1);
if (nb_ret)
*nb_ret = psize - offset;
@@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{
struct page *page = virt_to_page(va);
- page = compound_head(page);
put_page(page);
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 01294a5099d..108d1f58017 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
continue;
pfn = physp[j] >> PAGE_SHIFT;
page = pfn_to_page(pfn);
- if (PageHuge(page))
- page = compound_head(page);
SetPageDirty(page);
put_page(page);
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b8ba6e4a27e..e554e5ad2fe 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -79,7 +79,6 @@ struct kvm_task_sleep_node {
u32 token;
int cpu;
bool halted;
- struct mm_struct *mm;
};
static struct kvm_task_sleep_head {
@@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token)
n.token = token;
n.cpu = smp_processor_id();
- n.mm = current->active_mm;
n.halted = idle || preempt_count() > 1;
- atomic_inc(&n.mm->mm_count);
init_waitqueue_head(&n.wq);
hlist_add_head(&n.link, &b->list);
spin_unlock(&b->lock);
@@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{
hlist_del_init(&n->link);
- if (!n->mm)
- return;
- mmdrop(n->mm);
if (n->halted)
smp_send_reschedule(n->cpu);
else if (waitqueue_active(&n->wq))
@@ -207,7 +201,7 @@ again:
* async PF was not yet handled.
* Add dummy entry for the token.
*/
- n = kmalloc(sizeof(*n), GFP_ATOMIC);
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
if (!n) {
/*
* Allocation failed! Busy wait while other cpu
@@ -219,7 +213,6 @@ again:
}
n->token = token;
n->cpu = smp_processor_id();
- n->mm = NULL;
init_waitqueue_head(&n->wq);
hlist_add_head(&n->link, &b->list);
} else
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 733ca39f367..43d8b48b23e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -423,6 +423,7 @@ void set_personality_ia32(bool x32)
current_thread_info()->status |= TS_COMPAT;
}
}
+EXPORT_SYMBOL_GPL(set_personality_ia32);
unsigned long get_wchan(struct task_struct *p)
{
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 71f4727da37..5a98aa27218 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
#endif
rc = -EINVAL;
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
- const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
const size_t dyn_size = PERCPU_MODULE_RESERVE +
PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
+ size_t atom_size;
+ /*
+ * On 64bit, use PMD_SIZE for atom_size so that embedded
+ * percpu areas are aligned to PMD. This, in the future,
+ * can also allow using PMD mappings in vmalloc area. Use
+ * PAGE_SIZE on 32bit as vmalloc space is highly contended
+ * and large vmalloc area allocs can easily fail.
+ */
+#ifdef CONFIG_X86_64
+ atom_size = PMD_SIZE;
+#else
+ atom_size = PAGE_SIZE;
+#endif
rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
dyn_size, atom_size,
pcpu_cpu_distance,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 91a5e989abc..185a2b823a2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a8f8844b8d3..95dccce8e97 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -63,6 +63,7 @@
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
#include <asm/mwait.h>
+#include <asm/pci_x86.h>
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -809,9 +810,40 @@ static void xen_io_delay(void)
}
#ifdef CONFIG_X86_LOCAL_APIC
+static unsigned long xen_set_apic_id(unsigned int x)
+{
+ WARN_ON(1);
+ return x;
+}
+static unsigned int xen_get_apic_id(unsigned long x)
+{
+ return ((x)>>24) & 0xFFu;
+}
static u32 xen_apic_read(u32 reg)
{
- return 0;
+ struct xen_platform_op op = {
+ .cmd = XENPF_get_cpuinfo,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.pcpu_info.xen_cpuid = 0,
+ };
+ int ret = 0;
+
+ /* Shouldn't need this as APIC is turned off for PV, and we only
+ * get called on the bootup processor. But just in case. */
+ if (!xen_initial_domain() || smp_processor_id())
+ return 0;
+
+ if (reg == APIC_LVR)
+ return 0x10;
+
+ if (reg != APIC_ID)
+ return 0;
+
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ return 0;
+
+ return op.u.pcpu_info.apic_id << 24;
}
static void xen_apic_write(u32 reg, u32 val)
@@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void)
apic->icr_write = xen_apic_icr_write;
apic->wait_icr_idle = xen_apic_wait_icr_idle;
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
+ apic->set_apic_id = xen_set_apic_id;
+ apic->get_apic_id = xen_get_apic_id;
}
#endif
@@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void)
/* Make sure ACS will be enabled */
pci_request_acs();
}
-
-
+#ifdef CONFIG_PCI
+ /* PCI BIOS service won't work from a PV guest. */
+ pci_probe &= ~PCI_PROBE_BIOS;
+#endif
xen_raw_console_write("about to get started...\n");
xen_setup_runstate_info(0);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b8e279479a6..69f5857660a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
{
if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+ unsigned long pfn = mfn_to_pfn(mfn);
+
pteval_t flags = val & PTE_FLAGS_MASK;
- val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
+ if (unlikely(pfn == ~0))
+ val = flags & ~_PAGE_PRESENT;
+ else
+ val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
}
return val;