summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/gate.S1
-rw-r--r--arch/ia64/kernel/mca.c60
-rw-r--r--arch/ia64/kernel/mca_asm.S12
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S6
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/ia64/pci/pci.c22
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c17
-rw-r--r--arch/ia64/sn/kernel/io_init.c20
-rw-r--r--arch/ia64/sn/kernel/tiocx.c2
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c2
11 files changed, 106 insertions, 41 deletions
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 3274850cf27..74b1ccce4e8 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -30,6 +30,7 @@
.previous
#define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
[1:](pr)brl.cond.sptk 0; \
+ ;; \
.xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
GLOBAL_ENTRY(__kernel_syscall_via_break)
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 1ead5ea6c5c..4b5daa3cc0f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -57,6 +57,9 @@
*
* 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
* Add printing support for MCA/INIT.
+ *
+ * 2007-04-27 Russ Anderson <rja@sgi.com>
+ * Support multiple cpus going through OS_MCA in the same event.
*/
#include <linux/types.h>
#include <linux/init.h>
@@ -96,7 +99,6 @@
#endif
/* Used by mca_asm.S */
-u32 ia64_mca_serialize;
DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
@@ -963,11 +965,12 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
goto no_mod;
}
+ if (r13 != sos->prev_IA64_KR_CURRENT) {
+ msg = "inconsistent previous current and r13";
+ goto no_mod;
+ }
+
if (!mca_recover_range(ms->pmsa_iip)) {
- if (r13 != sos->prev_IA64_KR_CURRENT) {
- msg = "inconsistent previous current and r13";
- goto no_mod;
- }
if ((r12 - r13) >= KERNEL_STACK_SIZE) {
msg = "inconsistent r12 and r13";
goto no_mod;
@@ -1187,6 +1190,13 @@ all_in:
* further MCA logging is enabled by clearing logs.
* Monarch also has the duty of sending wakeup-IPIs to pull the
* slave processors out of rendezvous spinloop.
+ *
+ * If multiple processors call into OS_MCA, the first will become
+ * the monarch. Subsequent cpus will be recorded in the mca_cpu
+ * bitmask. After the first monarch has processed its MCA, it
+ * will wake up the next cpu in the mca_cpu bitmask and then go
+ * into the rendezvous loop. When all processors have serviced
+ * their MCA, the last monarch frees up the rest of the processors.
*/
void
ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
@@ -1196,16 +1206,32 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
struct task_struct *previous_current;
struct ia64_mca_notify_die nd =
{ .sos = sos, .monarch_cpu = &monarch_cpu };
+ static atomic_t mca_count;
+ static cpumask_t mca_cpu;
+ if (atomic_add_return(1, &mca_count) == 1) {
+ monarch_cpu = cpu;
+ sos->monarch = 1;
+ } else {
+ cpu_set(cpu, mca_cpu);
+ sos->monarch = 0;
+ }
mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
"monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
- monarch_cpu = cpu;
+
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
- ia64_wait_for_slaves(cpu, "MCA");
+ if (sos->monarch) {
+ ia64_wait_for_slaves(cpu, "MCA");
+ } else {
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
+ while (cpu_isset(cpu, mca_cpu))
+ cpu_relax(); /* spin until monarch wakes us */
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
+ }
/* Wakeup all the processors which are spinning in the rendezvous loop.
* They will leave SAL, then spin in the OS with interrupts disabled
@@ -1244,6 +1270,26 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
+
+ if (atomic_dec_return(&mca_count) > 0) {
+ int i;
+
+ /* wake up the next monarch cpu,
+ * and put this cpu in the rendez loop.
+ */
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
+ for_each_online_cpu(i) {
+ if (cpu_isset(i, mca_cpu)) {
+ monarch_cpu = i;
+ cpu_clear(i, mca_cpu); /* wake next cpu */
+ while (monarch_cpu != -1)
+ cpu_relax(); /* spin until last cpu leaves */
+ ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
+ set_curr_task(cpu, previous_current);
+ return;
+ }
+ }
+ }
set_curr_task(cpu, previous_current);
monarch_cpu = -1;
}
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 8c9c26aa6ae..0f5965fcdf8 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -133,14 +133,6 @@ ia64_do_tlb_purge:
//StartMain////////////////////////////////////////////////////////////////////
ia64_os_mca_dispatch:
- // Serialize all MCA processing
- mov r3=1;;
- LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
-ia64_os_mca_spin:
- xchg4 r4=[r2],r3;;
- cmp.ne p6,p0=r4,r0
-(p6) br ia64_os_mca_spin
-
mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
LOAD_PHYSICAL(p0,r2,1f) // return address
mov r19=1 // All MCA events are treated as monarch (for now)
@@ -291,10 +283,6 @@ END(ia64_os_mca_virtual_begin)
mov b0=r12 // SAL_CHECK return address
- // release lock
- LOAD_PHYSICAL(p0,r3,ia64_mca_serialize);;
- st4.rel [r3]=r0
-
br b0
//EndMain//////////////////////////////////////////////////////////////////////
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index f2d4900751b..3bccb06c8d2 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -40,7 +40,11 @@ GLOBAL_ENTRY(mca_handler_bhhook)
mov b6=loc1
;;
mov loc1=rp
- ssm psr.i | psr.ic
+ ssm psr.ic
+ ;;
+ srlz.i
+ ;;
+ ssm psr.i
br.call.sptk.many rp=b6 // does not return ...
;;
mov ar.pfs=loc0
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index af73b8dfde2..fa40cba4335 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -513,7 +513,8 @@ copy_thread (int nr, unsigned long clone_flags,
static void
do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
{
- unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
+ unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
+ unsigned long uninitialized_var(ip); /* GCC be quiet */
elf_greg_t *dst = arg;
struct pt_regs *pt;
char nat;
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index fa4e6d4810f..1682fc63903 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -175,7 +175,7 @@ EXPORT_SYMBOL(flush_tlb_range);
void __devinit
ia64_tlb_init (void)
{
- ia64_ptce_info_t ptce_info;
+ ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
unsigned long tr_pgbits;
long status;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 73696b4a2ee..07d0e92742c 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -591,6 +591,9 @@ int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ pgprot_t prot;
+
/*
* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
@@ -604,15 +607,24 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
*/
return -EINVAL;
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ return -EINVAL;
+
+ prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
+ vma->vm_page_prot);
+
/*
- * Leave vm_pgoff as-is, the PCI space address is the physical
- * address on this platform.
+ * If the user requested WC, the kernel uses UC or WC for this region,
+ * and the chipset supports WC, we can use WC. Otherwise, we have to
+ * use the same attribute the kernel uses.
*/
- if (write_combine && efi_range_is_wc(vma->vm_start,
- vma->vm_end - vma->vm_start))
+ if (write_combine &&
+ ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
+ (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
+ efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_page_prot = prot;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index c6216f454ff..3c7178f5dce 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -418,7 +418,7 @@ sn_acpi_slot_fixup(struct pci_dev *dev)
void __iomem *addr;
struct pcidev_info *pcidev_info = NULL;
struct sn_irq_info *sn_irq_info = NULL;
- size_t size;
+ size_t image_size, size;
if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) {
panic("%s: Failure obtaining pcidev_info for %s\n",
@@ -428,17 +428,16 @@ sn_acpi_slot_fixup(struct pci_dev *dev)
if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) {
/*
* A valid ROM image exists and has been shadowed by the
- * PROM. Setup the pci_dev ROM resource to point to
- * the shadowed copy.
+ * PROM. Setup the pci_dev ROM resource with the address
+ * of the shadowed copy, and the actual length of the ROM image.
*/
- size = dev->resource[PCI_ROM_RESOURCE].end -
- dev->resource[PCI_ROM_RESOURCE].start;
- addr =
- ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
- size);
+ size = pci_resource_len(dev, PCI_ROM_RESOURCE);
+ addr = ioremap(pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE],
+ size);
+ image_size = pci_get_rom_size(addr, size);
dev->resource[PCI_ROM_RESOURCE].start = (unsigned long) addr;
dev->resource[PCI_ROM_RESOURCE].end =
- (unsigned long) addr + size;
+ (unsigned long) addr + image_size - 1;
dev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_BIOS_COPY;
}
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 6b10e5d2848..906b93674b7 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -259,9 +259,23 @@ sn_io_slot_fixup(struct pci_dev *dev)
insert_resource(&ioport_resource, &dev->resource[idx]);
else
insert_resource(&iomem_resource, &dev->resource[idx]);
- /* If ROM, mark as shadowed in PROM */
- if (idx == PCI_ROM_RESOURCE)
- dev->resource[idx].flags |= IORESOURCE_ROM_BIOS_COPY;
+ /*
+ * If ROM, set the actual ROM image size, and mark as
+ * shadowed in PROM.
+ */
+ if (idx == PCI_ROM_RESOURCE) {
+ size_t image_size;
+ void __iomem *rom;
+
+ rom = ioremap(pci_resource_start(dev, PCI_ROM_RESOURCE),
+ size + 1);
+ image_size = pci_get_rom_size(rom, size + 1);
+ dev->resource[PCI_ROM_RESOURCE].end =
+ dev->resource[PCI_ROM_RESOURCE].start +
+ image_size - 1;
+ dev->resource[PCI_ROM_RESOURCE].flags |=
+ IORESOURCE_ROM_BIOS_COPY;
+ }
}
/* Create a pci_window in the pci_controller struct for
* each device resource.
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 493380b2c05..5a289e4de83 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -369,7 +369,7 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
static int is_fpga_tio(int nasid, int *bt)
{
- u16 ioboard_type;
+ u16 uninitialized_var(ioboard_type); /* GCC be quiet */
s64 rc;
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index b42bfcae6f9..42485ad50ce 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -80,7 +80,7 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
{
s64 rc;
- u16 ioboard;
+ u16 uninitialized_var(ioboard); /* GCC be quiet */
nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);