summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/cyclone.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/ia64/kernel/smp.c14
-rw-r--r--arch/ia64/kernel/time.c4
5 files changed, 12 insertions, 12 deletions
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 790ef0d87e1..71e35864d2e 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -21,7 +21,7 @@ void __init cyclone_setup(void)
static void __iomem *cyclone_mc;
-static cycle_t read_cyclone(void)
+static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readq((void __iomem *)cyclone_mc);
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 8dc69669586..7bebac0e1d4 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1803,6 +1803,8 @@ sys_call_table:
data8 sys_dup3
data8 sys_pipe2
data8 sys_inotify_init1
+ data8 sys_preadv
+ data8 sys_pwritev // 1320
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 573f02c39a0..285aae8431c 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
- if (dev->coherent_dma_mask != DMA_64BIT_MASK)
+ if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 2ea4199d9c5..5230eaafd83 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -225,6 +225,7 @@ smp_send_reschedule (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
/*
* Called with preemption disabled.
@@ -300,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
+ smp_call_function_mask(mm->cpu_vm_mask,
+ (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+ local_irq_disable();
+ local_finish_flush_tlb_mm(mm);
+ local_irq_enable();
preempt_enable();
- /*
- * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
- * have been running in the address space. It's not clear that this is worth the
- * trouble though: to avoid races, we have to raise the IPI on the target CPU
- * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
- * rather trivial.
- */
- on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
}
void arch_send_call_function_single_ipi(int cpu)
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 641c8b61c4f..604c1a35db3 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -33,7 +33,7 @@
#include "fsyscall_gtod_data.h"
-static cycle_t itc_get_cycles(void);
+static cycle_t itc_get_cycles(struct clocksource *cs);
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
.lock = SEQLOCK_UNLOCKED,
@@ -383,7 +383,7 @@ ia64_init_itm (void)
}
}
-static cycle_t itc_get_cycles(void)
+static cycle_t itc_get_cycles(struct clocksource *cs)
{
u64 lcycle, now, ret;