diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 9 | ||||
-rw-r--r-- | arch/ia64/hp/common/hwsw_iommu.c | 13 | ||||
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 45 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simscsi.c | 29 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 15 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 96 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.c | 135 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv_asm.S | 48 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/setup.c | 29 | ||||
-rw-r--r-- | arch/ia64/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/lib/dec_and_lock.c | 42 | ||||
-rw-r--r-- | arch/ia64/lib/swiotlb.c | 106 | ||||
-rw-r--r-- | arch/ia64/pci/pci.c | 23 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/tiocx.c | 62 | ||||
-rw-r--r-- | arch/ia64/sn/pci/pcibr/pcibr_reg.c | 59 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioca_provider.c | 32 | ||||
-rw-r--r-- | arch/ia64/sn/pci/tioce_provider.c | 30 |
21 files changed, 499 insertions, 289 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index ed25d66c8d5..cb7fc19ff08 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -254,8 +254,8 @@ config SMP If you don't know what to do here, say N. config NR_CPUS - int "Maximum number of CPUs (2-512)" - range 2 512 + int "Maximum number of CPUs (2-1024)" + range 2 1024 depends on SMP default "64" help @@ -298,11 +298,6 @@ config PREEMPT source "mm/Kconfig" -config HAVE_DEC_LOCK - bool - depends on (SMP || PREEMPT) - default y - config IA32_SUPPORT bool "Support for Linux/x86 binaries" help diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 80f8ef01393..317c334c5a1 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -17,7 +17,7 @@ #include <asm/machvec.h> /* swiotlb declarations & definitions: */ -extern void swiotlb_init_with_default_size (size_t size); +extern int swiotlb_late_init_with_default_size (size_t size); extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; extern ia64_mv_dma_free_coherent swiotlb_free_coherent; extern ia64_mv_dma_map_single swiotlb_map_single; @@ -67,7 +67,16 @@ void hwsw_init (void) { /* default to a smallish 2MB sw I/O TLB */ - swiotlb_init_with_default_size (2 * (1<<20)); + if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { +#ifdef CONFIG_IA64_GENERIC + /* Better to have normal DMA than panic */ + printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," + " reverting to hpzx1 platform vector\n", __FUNCTION__); + machvec_init("hpzx1"); +#else + panic("Unable to initialize software I/O TLB services"); +#endif + } } void * diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 11957598a8b..e64ca04ace8 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2028,9 +2028,40 @@ static struct acpi_driver acpi_sba_ioc_driver = { static int __init sba_init(void) { + if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) + return 0; + acpi_bus_register_driver(&acpi_sba_ioc_driver); - if (!ioc_list) + if (!ioc_list) { +#ifdef CONFIG_IA64_GENERIC + extern int swiotlb_late_init_with_default_size (size_t size); + + /* + * If we didn't find something sba_iommu can claim, we + * need to setup the swiotlb and switch to the dig machvec. + */ + if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) + panic("Unable to find SBA IOMMU or initialize " + "software I/O TLB: Try machvec=dig boot option"); + machvec_init("dig"); +#else + panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); +#endif return 0; + } + +#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) + /* + * hpzx1_swiotlb needs to have a fairly small swiotlb bounce + * buffer setup to support devices with smaller DMA masks than + * sba_iommu can handle. + */ + if (ia64_platform_is("hpzx1_swiotlb")) { + extern void hwsw_init(void); + + hwsw_init(); + } +#endif #ifdef CONFIG_PCI { @@ -2048,18 +2079,6 @@ sba_init(void) subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ -extern void dig_setup(char**); -/* - * MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good, - * so we use the platform_setup hook to fix it up. - */ -void __init -sba_setup(char **cmdline_p) -{ - MAX_DMA_ADDRESS = ~0UL; - dig_setup(cmdline_p); -} - static int __init nosbagart(char *str) { diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 56405dbfd73..a18983a3c93 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -233,6 +233,23 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); } +static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) +{ + + int scatterlen = sc->use_sg; + struct scatterlist *slp; + + if (scatterlen == 0) + memcpy(sc->request_buffer, buf, len); + else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { + unsigned thislen = min(len, slp->length); + + memcpy(page_address(slp->page) + slp->offset, buf, thislen); + slp++; + len -= thislen; + } +} + static int simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { @@ -240,6 +257,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) char fname[MAX_ROOT_LEN+16]; size_t disk_size; char *buf; + char localbuf[36]; #if DEBUG_SIMSCSI register long sp asm ("sp"); @@ -263,7 +281,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) /* disk doesn't exist... */ break; } - buf = sc->request_buffer; + buf = localbuf; buf[0] = 0; /* magnetic disk */ buf[1] = 0; /* not a removable medium */ buf[2] = 2; /* SCSI-2 compliant device */ @@ -273,6 +291,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) buf[6] = 0; /* reserved */ buf[7] = 0; /* various flags */ memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); + simscsi_fillresult(sc, buf, 36); sc->result = GOOD; break; @@ -304,16 +323,13 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) simscsi_readwrite10(sc, SSC_WRITE); break; - case READ_CAPACITY: if (desc[target_id] < 0 || sc->request_bufflen < 8) { break; } - buf = sc->request_buffer; - + buf = localbuf; disk_size = simscsi_get_disk_size(desc[target_id]); - /* pretend to be a 1GB disk (partition table contains real stuff): */ buf[0] = (disk_size >> 24) & 0xff; buf[1] = (disk_size >> 16) & 0xff; buf[2] = (disk_size >> 8) & 0xff; @@ -323,13 +339,14 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) buf[5] = 0; buf[6] = 2; buf[7] = 0; + simscsi_fillresult(sc, buf, 8); sc->result = GOOD; break; case MODE_SENSE: case MODE_SENSE_10: /* sd.c uses this to determine whether disk does write-caching. */ - memset(sc->request_buffer, 0, 128); + simscsi_fillresult(sc, (char *)empty_zero_page, sc->request_bufflen); sc->result = GOOD; break; diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 28a4529fdd6..9ad94ddf668 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -838,7 +838,7 @@ EXPORT_SYMBOL(acpi_unmap_lsapic); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ #ifdef CONFIG_ACPI_NUMA -acpi_status __devinit +static acpi_status __devinit acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -890,7 +890,16 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) map_iosapic_to_node(gsi_base, node); return AE_OK; } -#endif /* CONFIG_NUMA */ + +static int __init +acpi_map_iosapics (void) +{ + acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); + return 0; +} + +fs_initcall(acpi_map_iosapics); +#endif /* CONFIG_ACPI_NUMA */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { @@ -899,7 +908,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) if ((err = iosapic_init(phys_addr, gsi_base))) return err; -#if CONFIG_ACPI_NUMA +#ifdef CONFIG_ACPI_NUMA acpi_map_iosapic(handle, 0, NULL, NULL); #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ba0b6a1f429..0741b066b98 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -491,7 +491,7 @@ GLOBAL_ENTRY(prefetch_stack) ;; lfetch.fault [r16], 128 br.ret.sptk.many rp -END(prefetch_switch_stack) +END(prefetch_stack) GLOBAL_ENTRY(execve) mov r15=__NR_execve // put syscall number in place diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6dc726ad713..d0a5106fba2 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) cmc_polling_enabled = 1; spin_unlock(&cmc_history_lock); + /* If we're being hit with CMC interrupts, we won't + * ever execute the schedule_work() below. Need to + * disable CMC interrupts on this processor now. + */ + ia64_mca_cmc_vector_disable(NULL); schedule_work(&cmc_disable_work); /* diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 499a065f4e6..db32fc1d393 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -489,24 +489,27 @@ ia64_state_save: ;; st8 [temp1]=r17,16 // pal_min_state st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT + mov r6=IA64_KR(CURRENT_STACK) + ;; + st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK + st8 [temp2]=r0,16 // prev_task, starts off as NULL mov r6=cr.ifa ;; - st8 [temp1]=r0,16 // prev_task, starts off as NULL - st8 [temp2]=r12,16 // cr.isr + st8 [temp1]=r12,16 // cr.isr + st8 [temp2]=r6,16 // cr.ifa mov r12=cr.itir ;; - st8 [temp1]=r6,16 // cr.ifa - st8 [temp2]=r12,16 // cr.itir + st8 [temp1]=r12,16 // cr.itir + st8 [temp2]=r11,16 // cr.iipa mov r12=cr.iim ;; - st8 [temp1]=r11,16 // cr.iipa - st8 [temp2]=r12,16 // cr.iim - mov r6=cr.iha + st8 [temp1]=r12,16 // cr.iim (p1) mov r12=IA64_MCA_COLD_BOOT (p2) mov r12=IA64_INIT_WARM_BOOT + mov r6=cr.iha ;; - st8 [temp1]=r6,16 // cr.iha - st8 [temp2]=r12 // os_status, default is cold boot + st8 [temp2]=r6,16 // cr.iha + st8 [temp1]=r12 // os_status, default is cold boot mov r6=IA64_MCA_SAME_CONTEXT ;; st8 [temp1]=r6 // context, default is same context @@ -823,9 +826,12 @@ ia64_state_restore: ld8 r12=[temp1],16 // sal_ra ld8 r9=[temp2],16 // sal_gp ;; - ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task + ld8 r22=[temp1],16 // pal_min_state, virtual ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT ;; + ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK + ld8 r20=[temp2],16 // prev_task + ;; ld8 temp3=[temp1],16 // cr.isr ld8 temp4=[temp2],16 // cr.ifa ;; @@ -846,6 +852,45 @@ ia64_state_restore: ld8 r8=[temp1] // os_status ld8 r10=[temp2] // context + /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To + * avoid any dependencies on the algorithm in ia64_switch_to(), just + * purge any existing CURRENT_STACK mapping and insert the new one. + * + * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains + * prev_IA64_KR_CURRENT, these values may have been changed by the C + * code. Do not use r8, r9, r10, r22, they contain values ready for + * the return to SAL. + */ + + mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK + ;; + shl r15=r15,IA64_GRANULE_SHIFT + ;; + dep r15=-1,r15,61,3 // virtual granule + mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps + ;; + ptr.d r15,r18 + ;; + srlz.d + + extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT + shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK + movl r21=PAGE_KERNEL // page properties + ;; + mov IA64_KR(CURRENT_STACK)=r16 + cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? + or r21=r20,r21 // construct PA | page properties +(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( + ;; + mov cr.itir=r18 + mov cr.ifa=r21 + mov r20=IA64_TR_CURRENT_STACK + ;; + itr.d dtr[r20]=r21 + ;; + srlz.d +1: + br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// @@ -982,6 +1027,7 @@ ia64_set_kernel_registers: add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack add r13=temp1, r3 // set current to start of MCA/INIT stack + add r20=temp1, r3 // physical start of MCA/INIT stack ;; ld8 r1=[temp4] // OS GP from SAL OS state ;; @@ -991,7 +1037,35 @@ ia64_set_kernel_registers: ;; mov IA64_KR(CURRENT)=r13 - // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? + /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid + * any dependencies on the algorithm in ia64_switch_to(), just purge + * any existing CURRENT_STACK mapping and insert the new one. + */ + + mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK + ;; + shl r16=r16,IA64_GRANULE_SHIFT + ;; + dep r16=-1,r16,61,3 // virtual granule + mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps + ;; + ptr.d r16,r18 + ;; + srlz.d + + shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack + movl r21=PAGE_KERNEL // page properties + ;; + mov IA64_KR(CURRENT_STACK)=r16 + or r21=r20,r21 // construct PA | page properties + ;; + mov cr.itir=r18 + mov cr.ifa=r13 + mov r20=IA64_TR_CURRENT_STACK + ;; + itr.d dtr[r20]=r21 + ;; + srlz.d br.sptk b0 diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 6e683745af4..f081c60ab20 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE]; static int num_page_isolate = 0; typedef enum { - ISOLATE_NG = 0, - ISOLATE_OK = 1 + ISOLATE_NG, + ISOLATE_OK, + ISOLATE_NONE } isolate_status_t; /* @@ -74,7 +75,7 @@ static struct { * @paddr: poisoned memory location * * Return value: - * ISOLATE_OK / ISOLATE_NG + * one of isolate_status_t, ISOLATE_OK/NG/NONE. */ static isolate_status_t @@ -84,23 +85,26 @@ mca_page_isolate(unsigned long paddr) struct page *p; /* whether physical address is valid or not */ - if ( !ia64_phys_addr_valid(paddr) ) - return ISOLATE_NG; + if (!ia64_phys_addr_valid(paddr)) + return ISOLATE_NONE; + + if (!pfn_valid(paddr)) + return ISOLATE_NONE; /* convert physical address to physical page number */ p = pfn_to_page(paddr>>PAGE_SHIFT); /* check whether a page number have been already registered or not */ - for( i = 0; i < num_page_isolate; i++ ) - if( page_isolate[i] == p ) + for (i = 0; i < num_page_isolate; i++) + if (page_isolate[i] == p) return ISOLATE_OK; /* already listed */ /* limitation check */ - if( num_page_isolate == MAX_PAGE_ISOLATE ) + if (num_page_isolate == MAX_PAGE_ISOLATE) return ISOLATE_NG; /* kick pages having attribute 'SLAB' or 'Reserved' */ - if( PageSlab(p) || PageReserved(p) ) + if (PageSlab(p) || PageReserved(p)) return ISOLATE_NG; /* add attribute 'Reserved' and register the page */ @@ -122,10 +126,15 @@ mca_handler_bh(unsigned long paddr) current->pid, current->comm); spin_lock(&mca_bh_lock); - if (mca_page_isolate(paddr) == ISOLATE_OK) { + switch (mca_page_isolate(paddr)) { + case ISOLATE_OK: printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); - } else { + break; + case ISOLATE_NG: printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); + break; + default: + break; } spin_unlock(&mca_bh_lock); @@ -139,10 +148,10 @@ mca_handler_bh(unsigned long paddr) * @peidx: pointer to index of processor error section */ -static void +static void mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) { - /* + /* * calculate the start address of * "struct cpuid_info" and "sal_processor_static_info_t". */ @@ -164,7 +173,7 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) } /** - * mca_make_slidx - Make index of SAL error record + * mca_make_slidx - Make index of SAL error record * @buffer: pointer to SAL error record * @slidx: pointer to index of SAL error record * @@ -172,12 +181,12 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) * 1 if record has platform error / 0 if not */ #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ - { slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ - hl->hdr = ptr; \ - list_add(&hl->list, &(sect)); \ - slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } + {slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ + hl->hdr = ptr; \ + list_add(&hl->list, &(sect)); \ + slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } -static int +static int mca_make_slidx(void *buffer, slidx_table_t *slidx) { int platform_err = 0; @@ -214,28 +223,36 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_BUS_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_BUS_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); } else { @@ -253,15 +270,16 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) * Return value: * 0 on Success / -ENOMEM on Failure */ -static int +static int init_record_index_pools(void) { int i; int rec_max_size; /* Maximum size of SAL error records */ int sect_min_size; /* Minimum size of SAL error sections */ /* minimum size table of each section */ - static int sal_log_sect_min_sizes[] = { - sizeof(sal_log_processor_info_t) + sizeof(sal_processor_static_info_t), + static int sal_log_sect_min_sizes[] = { + sizeof(sal_log_processor_info_t) + + sizeof(sal_processor_static_info_t), sizeof(sal_log_mem_dev_err_info_t), sizeof(sal_log_sel_dev_err_info_t), sizeof(sal_log_pci_bus_err_info_t), @@ -294,7 +312,8 @@ init_record_index_pools(void) /* - 3 - */ slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; - slidx_pool.buffer = (slidx_list_t *) kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); + slidx_pool.buffer = (slidx_list_t *) + kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); return slidx_pool.buffer ? 0 : -ENOMEM; } @@ -308,6 +327,7 @@ init_record_index_pools(void) * is_mca_global - Check whether this MCA is global or not * @peidx: pointer of index of processor error section * @pbci: pointer to pal_bus_check_info_t + * @sos: pointer to hand off struct between SAL and OS * * Return value: * MCA_IS_LOCAL / MCA_IS_GLOBAL @@ -317,11 +337,12 @@ static mca_type_t is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { - pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); + pal_processor_state_info_t *psp = + (pal_processor_state_info_t*)peidx_psp(peidx); - /* + /* * PAL can request a rendezvous, if the MCA has a global scope. - * If "rz_always" flag is set, SAL requests MCA rendezvous + * If "rz_always" flag is set, SAL requests MCA rendezvous * in spite of global MCA. * Therefore it is local MCA when rendezvous has not been requested. * Failed to rendezvous, the system must be down. @@ -381,13 +402,15 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int -recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, +recover_from_read_error(slidx_table_t *slidx, + peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { sal_log_mod_error_info_t *smei; @@ -453,24 +476,28 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int -recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, +recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, + pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { int status = 0; - pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); + pal_processor_state_info_t *psp = + (pal_processor_state_info_t*)peidx_psp(peidx); if (psp->bc && pbci->eb && pbci->bsi == 0) { switch(pbci->type) { case 1: /* partial read */ case 3: /* full line(cpu) read */ case 9: /* I/O space read */ - status = recover_from_read_error(slidx, peidx, pbci, sos); + status = recover_from_read_error(slidx, peidx, pbci, + sos); break; case 0: /* unknown */ case 2: /* partial write */ @@ -481,7 +508,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ case 8: /* write coalescing transactions */ case 10: /* I/O space write */ case 11: /* inter-processor interrupt message(IPI) */ - case 12: /* interrupt acknowledge or external task priority cycle */ + case 12: /* interrupt acknowledge or + external task priority cycle */ default: break; } @@ -496,6 +524,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure @@ -509,15 +538,17 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ */ static int -recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, +recover_from_processor_error(int platform, slidx_table_t *slidx, + peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { - pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); + pal_processor_state_info_t *psp = + (pal_processor_state_info_t*)peidx_psp(peidx); - /* + /* * We cannot recover errors with other than bus_check. */ - if (psp->cc || psp->rc || psp->uc) + if (psp->cc || psp->rc || psp->uc) return 0; /* @@ -546,10 +577,10 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * * (e.g. a load from poisoned memory) * This means "there are some platform errors". */ - if (platform) + if (platform) return recover_from_platform_error(slidx, peidx, pbci, sos); - /* - * On account of strange SAL error record, we cannot recover. + /* + * On account of strange SAL error record, we cannot recover. */ return 0; } @@ -557,14 +588,14 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * /** * mca_try_to_recover - Try to recover from MCA * @rec: pointer to a SAL error record + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int -mca_try_to_recover(void *rec, - struct ia64_sal_os_state *sos) +mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos) { int platform_err; int n_proc_err; @@ -588,7 +619,8 @@ mca_try_to_recover(void *rec, } /* Make index of processor error section */ - mca_make_peidx((sal_log_processor_info_t*)slidx_first_entry(&slidx.proc_err)->hdr, &peidx); + mca_make_peidx((sal_log_processor_info_t*) + slidx_first_entry(&slidx.proc_err)->hdr, &peidx); /* Extract Processor BUS_CHECK[0] */ *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); @@ -598,7 +630,8 @@ mca_try_to_recover(void *rec, return 0; /* Try to recover a processor error */ - return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); + return recover_from_processor_error(platform_err, &slidx, &peidx, + &pbci, sos); } /* @@ -611,7 +644,7 @@ int __init mca_external_handler_init(void) return -ENOMEM; /* register external mca handlers */ - if (ia64_reg_MCA_extension(mca_try_to_recover)){ + if (ia64_reg_MCA_extension(mca_try_to_recover)) { printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); kfree(slidx_pool.buffer); return -EFAULT; diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h index 0227b761f2c..e2f6fa1e0ef 100644 --- a/arch/ia64/kernel/mca_drv.h +++ b/arch/ia64/kernel/mca_drv.h @@ -6,7 +6,7 @@ * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) */ /* - * Processor error section: + * Processor error section: * * +-sal_log_processor_info_t *info-------------+ * | sal_log_section_hdr_t header; | diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index 2d7e0217638..3f298ee4d00 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S @@ -13,45 +13,45 @@ #include <asm/ptrace.h> GLOBAL_ENTRY(mca_handler_bhhook) - invala // clear RSE ? - ;; // - cover // - ;; // - clrrrb // + invala // clear RSE ? + ;; + cover + ;; + clrrrb ;; - alloc r16=ar.pfs,0,2,1,0 // make a new frame + alloc r16=ar.pfs,0,2,1,0 // make a new frame ;; - mov ar.rsc=0 + mov ar.rsc=0 ;; - mov r13=IA64_KR(CURRENT) // current task pointer + mov r13=IA64_KR(CURRENT) // current task pointer ;; - mov r2=r13 + mov r2=r13 ;; - addl r22=IA64_RBS_OFFSET,r2 + addl r22=IA64_RBS_OFFSET,r2 ;; - mov ar.bspstore=r22 + mov ar.bspstore=r22 ;; - addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 + addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 ;; - adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 + adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; - st1 [r2]=r0 // clear current->thread.on_ustack flag - mov loc0=r16 - movl loc1=mca_handler_bh // recovery C function + st1 [r2]=r0 // clear current->thread.on_ustack flag + mov loc0=r16 + movl loc1=mca_handler_bh // recovery C function ;; - mov out0=r8 // poisoned address - mov b6=loc1 + mov out0=r8 // poisoned address + mov b6=loc1 ;; - mov loc1=rp + mov loc1=rp ;; - ssm psr.i + ssm psr.i ;; - br.call.sptk.many rp=b6 // does not return ... + br.call.sptk.many rp=b6 // does not return ... ;; - mov ar.pfs=loc0 - mov rp=loc1 + mov ar.pfs=loc0 + mov rp=loc1 ;; - mov r8=r0 + mov r8=r0 br.ret.sptk.many rp ;; END(mca_handler_bhhook) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 1650353e3f7..d71731ee5b6 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -574,7 +574,7 @@ pfm_protect_ctx_ctxsw(pfm_context_t *x) return 0UL; } -static inline unsigned long +static inline void pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) { spin_unlock(&(x)->ctx_lock); @@ -2218,12 +2218,13 @@ static void pfm_free_fd(int fd, struct file *file) { struct files_struct *files = current->files; - struct fdtable *fdt = files_fdtable(files); + struct fdtable *fdt; /* * there ie no fd_uninstall(), so we do it here */ spin_lock(&files->file_lock); + fdt = files_fdtable(files); rcu_assign_pointer(fdt->fd[fd], NULL); spin_unlock(&files->file_lock); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 1f5c26dbe70..e256b114bf4 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -244,28 +244,31 @@ find_initrd (void) static void __init io_port_init (void) { - extern unsigned long ia64_iobase; unsigned long phys_iobase; /* - * Set `iobase' to the appropriate address in region 6 (uncached access range). + * Set `iobase' based on the EFI memory map or, failing that, the + * value firmware left in ar.k0. * - * The EFI memory map is the "preferred" location to get the I/O port space base, - * rather the relying on AR.KR0. This should become more clear in future SAL - * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is - * found in the memory map. + * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute + * the port's virtual address, so ia32_load_state() loads it with a + * user virtual address. But in ia64 mode, glibc uses the + * *physical* address in ar.k0 to mmap the appropriate area from + * /dev/mem, and the inX()/outX() interfaces use MMIO. In both + * cases, user-mode can only use the legacy 0-64K I/O port space. + * + * ar.k0 is not involved in kernel I/O port accesses, which can use + * any of the I/O port spaces and are done via MMIO using the + * virtual mmio_base from the appropriate io_space[]. */ phys_iobase = efi_get_iobase(); - if (phys_iobase) - /* set AR.KR0 since this is all we use it for anyway */ - ia64_set_kr(IA64_KR_IO_BASE, phys_iobase); - else { + if (!phys_iobase) { phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); - printk(KERN_INFO "No I/O port range found in EFI memory map, falling back " - "to AR.KR0\n"); - printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase); + printk(KERN_INFO "No I/O port range found in EFI memory map, " + "falling back to AR.KR0 (0x%lx)\n", phys_iobase); } ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); + ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); /* setup legacy IO port space */ io_space[0].mmio_base = ia64_iobase; diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 799407e7726..cb1af597370 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -15,7 +15,6 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o lib-$(CONFIG_PERFMON) += carta_random.o lib-$(CONFIG_MD_RAID5) += xor.o -lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o AFLAGS___divdi3.o = AFLAGS___udivdi3.o = -DUNSIGNED diff --git a/arch/ia64/lib/dec_and_lock.c b/arch/ia64/lib/dec_and_lock.c deleted file mode 100644 index c7ce92f968f..00000000000 --- a/arch/ia64/lib/dec_and_lock.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2003 Jerome Marchand, Bull S.A. - * Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com> - * - * This file is released under the GPLv2, or at your option any later version. - * - * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This - * code is an adaptation of the x86 version of "atomic_dec_and_lock()". - */ - -#include <linux/compiler.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <asm/atomic.h> - -/* - * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these - * operations have to be done atomically, so that the count doesn't drop to zero without - * acquiring the spinlock first. - */ -int -_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock) -{ - int old, new; - - do { - old = atomic_read(refcount); - new = old - 1; - - if (unlikely (old == 1)) { - /* oops, we may be decrementing to zero, do it the slow way... */ - spin_lock(lock); - if (atomic_dec_and_test(refcount)) - return 1; - spin_unlock(lock); - return 0; - } - } while (cmpxchg(&refcount->counter, old, new) != old); - return 0; -} - -EXPORT_SYMBOL(_atomic_dec_and_lock); diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c index dbc0b3e449c..48e5ff26eb1 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/arch/ia64/lib/swiotlb.c @@ -49,6 +49,15 @@ */ #define IO_TLB_SHIFT 11 +#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) + +/* + * Minimum IO TLB size to bother booting with. Systems with mainly + * 64bit capable cards will only lightly use the swiotlb. If we can't + * allocate a contiguous 1MB, we're probably in trouble anyway. + */ +#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) + int swiotlb_force; /* @@ -123,8 +132,8 @@ swiotlb_init_with_default_size (size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * - (1 << IO_TLB_SHIFT)); + io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * + (1 << IO_TLB_SHIFT), 0x100000000); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); @@ -154,6 +163,99 @@ swiotlb_init (void) swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ } +/* + * Systems with larger DMA zones (those that don't support ISA) can + * initialize the swiotlb later using the slab allocator if needed. + * This should be just like above, but with some error catching. + */ +int +swiotlb_late_init_with_default_size (size_t default_size) +{ + unsigned long i, req_nslabs = io_tlb_nslabs; + unsigned int order; + + if (!io_tlb_nslabs) { + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); + } + + /* + * Get IO TLB memory from the low pages + */ + order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + io_tlb_nslabs = SLABS_PER_PAGE << order; + + while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { + io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, + order); + if (io_tlb_start) + break; + order--; + } + + if (!io_tlb_start) + goto cleanup1; + + if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { + printk(KERN_WARNING "Warning: only able to allocate %ld MB " + "for software IO TLB\n", (PAGE_SIZE << order) >> 20); + io_tlb_nslabs = SLABS_PER_PAGE << order; + } + io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); + memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); + + /* + * Allocate and initialize the free list array. This array is used + * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE + * between io_tlb_start and io_tlb_end. + */ + io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(int))); + if (!io_tlb_list) + goto cleanup2; + + for (i = 0; i < io_tlb_nslabs; i++) + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_index = 0; + + io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * sizeof(char *))); + if (!io_tlb_orig_addr) + goto cleanup3; + + memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); + + /* + * Get the overflow emergency buffer + */ + io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA, + get_order(io_tlb_overflow)); + if (!io_tlb_overflow_buffer) + goto cleanup4; + + printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " + "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20, + virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); + + return 0; + +cleanup4: + free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * + sizeof(char *))); + io_tlb_orig_addr = NULL; +cleanup3: + free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * + sizeof(int))); + io_tlb_list = NULL; + io_tlb_end = NULL; +cleanup2: + free_pages((unsigned long)io_tlb_start, order); + io_tlb_start = NULL; +cleanup1: + io_tlb_nslabs = req_nslabs; + return -ENOMEM; +} + static inline int address_needs_mapping(struct device *hwdev, dma_addr_t addr) { diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 9b5de589b82..6bf48d7842c 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c @@ -120,29 +120,6 @@ struct pci_ops pci_root_ops = { .write = pci_write, }; -#ifdef CONFIG_NUMA -extern acpi_status acpi_map_iosapic(acpi_handle, u32, void *, void **); -static void acpi_map_iosapics(void) -{ - acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); -} -#else -static void acpi_map_iosapics(void) -{ - return; -} -#endif /* CONFIG_NUMA */ - -static int __init -pci_acpi_init (void) -{ - acpi_map_iosapics(); - - return 0; -} - -subsys_initcall(pci_acpi_init); - /* Called by ACPI when it finds a new root bus. */ static struct pci_controller * __devinit diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index b45db5133f5..e0819ec5311 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -183,11 +183,12 @@ int cx_driver_unregister(struct cx_drv *cx_driver) * @part_num: device's part number * @mfg_num: device's manufacturer number * @hubdev: hub info associated with this device + * @bt: board type of the device * */ int cx_device_register(nasid_t nasid, int part_num, int mfg_num, - struct hubdev_info *hubdev) + struct hubdev_info *hubdev, int bt) { struct cx_dev *cx_dev; @@ -200,6 +201,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num, cx_dev->cx_id.mfg_num = mfg_num; cx_dev->cx_id.nasid = nasid; cx_dev->hubdev = hubdev; + cx_dev->bt = bt; cx_dev->dev.parent = NULL; cx_dev->dev.bus = &tiocx_bus_type; @@ -238,7 +240,8 @@ static int cx_device_reload(struct cx_dev *cx_dev) { cx_device_unregister(cx_dev); return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, - cx_dev->cx_id.mfg_num, cx_dev->hubdev); + cx_dev->cx_id.mfg_num, cx_dev->hubdev, + cx_dev->bt); } static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget, @@ -365,26 +368,20 @@ static void tio_corelet_reset(nasid_t nasid, int corelet) udelay(2000); } -static int tiocx_btchar_get(int nasid) +static int is_fpga_tio(int nasid, int *bt) { - moduleid_t module_id; - geoid_t geoid; - int cnodeid; - - cnodeid = nasid_to_cnodeid(nasid); - geoid = cnodeid_get_geoid(cnodeid); - module_id = geo_module(geoid); - return MODULE_GET_BTCHAR(module_id); -} + int ioboard_type; -static int is_fpga_brick(int nasid) -{ - switch (tiocx_btchar_get(nasid)) { + ioboard_type = ia64_sn_sysctl_ioboard_get(nasid); + + switch (ioboard_type) { case L1_BRICKTYPE_SA: case L1_BRICKTYPE_ATHENA: - case L1_BRICKTYPE_DAYTONA: + case L1_BOARDTYPE_DAYTONA: + *bt = ioboard_type; return 1; } + return 0; } @@ -407,16 +404,22 @@ static int tiocx_reload(struct cx_dev *cx_dev) if (bitstream_loaded(nasid)) { uint64_t cx_id; - - cx_id = - *(volatile uint64_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) + + int rv; + + rv = ia64_sn_sysctl_tio_clock_reset(nasid); + if (rv) { + printk(KERN_ALERT "CX port JTAG reset failed.\n"); + } else { + cx_id = *(volatile uint64_t *) + (TIO_SWIN_BASE(nasid, TIOCX_CORELET) + WIDGET_ID); - part_num = XWIDGET_PART_NUM(cx_id); - mfg_num = XWIDGET_MFG_NUM(cx_id); - DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num); - /* just ignore it if it's a CE */ - if (part_num == TIO_CE_ASIC_PARTNUM) - return 0; + part_num = XWIDGET_PART_NUM(cx_id); + mfg_num = XWIDGET_MFG_NUM(cx_id); + DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num); + /* just ignore it if it's a CE */ + if (part_num == TIO_CE_ASIC_PARTNUM) + return 0; + } } cx_dev->cx_id.part_num = part_num; @@ -436,10 +439,10 @@ static ssize_t show_cxdev_control(struct device *dev, struct device_attribute *a { struct cx_dev *cx_dev = to_cx_dev(dev); - return sprintf(buf, "0x%x 0x%x 0x%x %d\n", + return sprintf(buf, "0x%x 0x%x 0x%x 0x%x\n", cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, - tiocx_btchar_get(cx_dev->cx_id.nasid)); + cx_dev->bt); } static ssize_t store_cxdev_control(struct device *dev, struct device_attribute *attr, const char *buf, @@ -488,11 +491,12 @@ static int __init tiocx_init(void) for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { nasid_t nasid; + int bt; if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) break; /* No more nasids .. bail out of loop */ - if ((nasid & 0x1) && is_fpga_brick(nasid)) { + if ((nasid & 0x1) && is_fpga_tio(nasid, &bt)) { struct hubdev_info *hubdev; struct xwidget_info *widgetp; @@ -512,7 +516,7 @@ static int __init tiocx_init(void) if (cx_device_register (nasid, widgetp->xwi_hwid.part_num, - widgetp->xwi_hwid.mfg_num, hubdev) < 0) + widgetp->xwi_hwid.mfg_num, hubdev, bt) < 0) return -ENXIO; else found_tiocx_device++; diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c index 21426d02fbe..4f718c3e93d 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/types.h> +#include <asm/sn/io.h> #include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> @@ -29,10 +30,10 @@ void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ptr->tio.cp_control &= ~bits; + __sn_clrq_relaxed(&ptr->tio.cp_control, bits); break; case PCIBR_BRIDGETYPE_PIC: - ptr->pic.p_wid_control &= ~bits; + __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits); break; default: panic @@ -49,10 +50,10 @@ void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ptr->tio.cp_control |= bits; + __sn_setq_relaxed(&ptr->tio.cp_control, bits); break; case PCIBR_BRIDGETYPE_PIC: - ptr->pic.p_wid_control |= bits; + __sn_setq_relaxed(&ptr->pic.p_wid_control, bits); break; default: panic @@ -73,10 +74,10 @@ uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ret = ptr->tio.cp_tflush; + ret = __sn_readq_relaxed(&ptr->tio.cp_tflush); break; case PCIBR_BRIDGETYPE_PIC: - ret = ptr->pic.p_wid_tflush; + ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush); break; default: panic @@ -103,10 +104,10 @@ uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ret = ptr->tio.cp_int_status; + ret = __sn_readq_relaxed(&ptr->tio.cp_int_status); break; case PCIBR_BRIDGETYPE_PIC: - ret = ptr->pic.p_int_status; + ret = __sn_readq_relaxed(&ptr->pic.p_int_status); break; default: panic @@ -127,10 +128,10 @@ void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ptr->tio.cp_int_enable &= ~bits; + __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits); break; case PCIBR_BRIDGETYPE_PIC: - ptr->pic.p_int_enable &= ~bits; + __sn_clrq_relaxed(&ptr->pic.p_int_enable, ~bits); break; default: panic @@ -147,10 +148,10 @@ void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ptr->tio.cp_int_enable |= bits; + __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits); break; case PCIBR_BRIDGETYPE_PIC: - ptr->pic.p_int_enable |= bits; + __sn_setq_relaxed(&ptr->pic.p_int_enable, bits); break; default: panic @@ -171,14 +172,16 @@ void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR; - ptr->tio.cp_int_addr[int_n] |= - (addr & TIOCP_HOST_INTR_ADDR); + __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n], + TIOCP_HOST_INTR_ADDR); + __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n], + (addr & TIOCP_HOST_INTR_ADDR)); break; case PCIBR_BRIDGETYPE_PIC: - ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR; - ptr->pic.p_int_addr[int_n] |= - (addr & PIC_HOST_INTR_ADDR); + __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n], + PIC_HOST_INTR_ADDR); + __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n], + (addr & PIC_HOST_INTR_ADDR)); break; default: panic @@ -198,10 +201,10 @@ void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ptr->tio.cp_force_pin[int_n] = 1; + writeq(1, &ptr->tio.cp_force_pin[int_n]); break; case PCIBR_BRIDGETYPE_PIC: - ptr->pic.p_force_pin[int_n] = 1; + writeq(1, &ptr->pic.p_force_pin[int_n]); break; default: panic @@ -222,10 +225,12 @@ uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ret = ptr->tio.cp_wr_req_buf[device]; + ret = + __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]); break; case PCIBR_BRIDGETYPE_PIC: - ret = ptr->pic.p_wr_req_buf[device]; + ret = + __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]); break; default: panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr); @@ -244,10 +249,10 @@ void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val; + writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]); break; case PCIBR_BRIDGETYPE_PIC: - ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val; + writeq(val, &ptr->pic.p_int_ate_ram[ate_index]); break; default: panic @@ -265,12 +270,10 @@ uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) if (pcibus_info) { switch (pcibus_info->pbi_bridge_type) { case PCIBR_BRIDGETYPE_TIOCP: - ret = - (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]); + ret = &ptr->tio.cp_int_ate_ram[ate_index]; break; case PCIBR_BRIDGETYPE_PIC: - ret = - (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]); + ret = &ptr->pic.p_int_ate_ram[ate_index]; break; default: panic diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 19bced34d5f..46b646a6d34 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> +#include <asm/sn/io.h> #include <asm/sn/pcidev.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/tioca_provider.h> @@ -37,7 +38,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) uint64_t offset; struct page *tmp; struct tioca_common *tioca_common; - volatile struct tioca *ca_base; + struct tioca *ca_base; tioca_common = tioca_kern->ca_common; ca_base = (struct tioca *)tioca_common->ca_common.bs_base; @@ -174,27 +175,29 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 */ - ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */ - ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM); - ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT); + __sn_setq_relaxed(&ca_base->ca_control1, + CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */ + __sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM); + __sn_setq_relaxed(&ca_base->ca_control2, + (0x2ull << CA_GART_MEM_PARAM_SHFT)); tioca_kern->ca_gart_iscoherent = 1; - ca_base->ca_control2 &= - ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB); + __sn_clrq_relaxed(&ca_base->ca_control2, + (CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB)); /* * Unmask GART fetch error interrupts. Clear residual errors first. */ - ca_base->ca_int_status_alias = CA_GART_FETCH_ERR; - ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR; - ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR; + writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias); + writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias); + __sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR); /* * Program the aperature and gart registers in TIOCA */ - ca_base->ca_gart_aperature = ap_reg; - ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1; + writeq(ap_reg, &ca_base->ca_gart_aperature); + writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table); return 0; } @@ -211,7 +214,6 @@ void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) { int cap_ptr; - uint64_t ca_control1; uint32_t reg; struct tioca *tioca_base; struct pci_dev *pdev; @@ -256,9 +258,7 @@ tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) */ tioca_base = (struct tioca *)common->ca_common.bs_base; - ca_control1 = tioca_base->ca_control1; - ca_control1 |= CA_AGP_FW_ENABLE; - tioca_base->ca_control1 = ca_control1; + __sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE); } EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ @@ -345,7 +345,7 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) return 0; } - agp_dma_extn = ca_base->ca_agp_dma_addr_extn; + agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn); if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { printk(KERN_ERR "%s: coretalk upper node (%u) " "mismatch with ca_agp_dma_addr_extn (%lu)\n", diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 8e75db2b825..9f03d4e5121 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -11,6 +11,7 @@ #include <linux/pci.h> #include <asm/sn/sn_sal.h> #include <asm/sn/addrs.h> +#include <asm/sn/io.h> #include <asm/sn/pcidev.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/tioce_provider.h> @@ -227,7 +228,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port, ate = ATE_MAKE(addr, pagesize); ate_shadow[i + j] = ate; - ate_reg[i + j] = ate; + writeq(ate, &ate_reg[i + j]); addr += pagesize; } @@ -268,10 +269,10 @@ tioce_dma_d32(struct pci_dev *pdev, uint64_t ct_addr) pcidev_to_tioce(pdev, &ce_mmr, &ce_kern, &port); if (ce_kern->ce_port[port].dirmap_refcnt == 0) { - volatile uint64_t tmp; + uint64_t tmp; ce_kern->ce_port[port].dirmap_shadow = ct_upper; - ce_mmr->ce_ure_dir_map[port] = ct_upper; + writeq(ct_upper, &ce_mmr->ce_ure_dir_map[port]); tmp = ce_mmr->ce_ure_dir_map[port]; dma_ok = 1; } else @@ -343,7 +344,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) if (TIOCE_D32_ADDR(bus_addr)) { if (--ce_kern->ce_port[port].dirmap_refcnt == 0) { ce_kern->ce_port[port].dirmap_shadow = 0; - ce_mmr->ce_ure_dir_map[port] = 0; + writeq(0, &ce_mmr->ce_ure_dir_map[port]); } } else { struct tioce_dmamap *map; @@ -582,18 +583,18 @@ tioce_kern_init(struct tioce_common *tioce_common) */ tioce_mmr = (struct tioce *)tioce_common->ce_pcibus.bs_base; - tioce_mmr->ce_ure_page_map &= ~CE_URE_PAGESIZE_MASK; - tioce_mmr->ce_ure_page_map |= CE_URE_256K_PAGESIZE; + __sn_clrq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_PAGESIZE_MASK); + __sn_setq_relaxed(&tioce_mmr->ce_ure_page_map, CE_URE_256K_PAGESIZE); tioce_kern->ce_ate3240_pagesize = KB(256); for (i = 0; i < TIOCE_NUM_M40_ATES; i++) { tioce_kern->ce_ate40_shadow[i] = 0; - tioce_mmr->ce_ure_ate40[i] = 0; + writeq(0, &tioce_mmr->ce_ure_ate40[i]); } for (i = 0; i < TIOCE_NUM_M3240_ATES; i++) { tioce_kern->ce_ate3240_shadow[i] = 0; - tioce_mmr->ce_ure_ate3240[i] = 0; + writeq(0, &tioce_mmr->ce_ure_ate3240[i]); } return tioce_kern; @@ -665,7 +666,7 @@ tioce_force_interrupt(struct sn_irq_info *sn_irq_info) default: return; } - ce_mmr->ce_adm_force_int = force_int_val; + writeq(force_int_val, &ce_mmr->ce_adm_force_int); } /** @@ -686,6 +687,7 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) struct tioce_common *ce_common; struct tioce *ce_mmr; int bit; + uint64_t vector; pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; if (!pcidev_info) @@ -696,11 +698,11 @@ tioce_target_interrupt(struct sn_irq_info *sn_irq_info) bit = sn_irq_info->irq_int_bit; - ce_mmr->ce_adm_int_mask |= (1UL << bit); - ce_mmr->ce_adm_int_dest[bit] = - ((uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT) | - sn_irq_info->irq_xtalkaddr; - ce_mmr->ce_adm_int_mask &= ~(1UL << bit); + __sn_setq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); + vector = (uint64_t)sn_irq_info->irq_irq << INTR_VECTOR_SHFT; + vector |= sn_irq_info->irq_xtalkaddr; + writeq(vector, &ce_mmr->ce_adm_int_dest[bit]); + __sn_clrq_relaxed(&ce_mmr->ce_adm_int_mask, (1UL << bit)); tioce_force_interrupt(sn_irq_info); } |