diff options
Diffstat (limited to 'arch/sparc/kernel')
33 files changed, 1167 insertions, 518 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 475ce4696ac..247cc620cee 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -41,6 +41,8 @@ obj-y += of_device_common.o obj-y += of_device_$(BITS).o obj-$(CONFIG_SPARC64) += prom_irqtrans.o +obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o + obj-$(CONFIG_SPARC64) += reboot.o obj-$(CONFIG_SPARC64) += sysfs.o obj-$(CONFIG_SPARC64) += iommu.o @@ -61,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o obj-$(CONFIG_SPARC32) += devres.o devres-y := ../../../kernel/irq/devres.o -obj-$(CONFIG_SPARC32) += dma.o +obj-y += dma.o obj-$(CONFIG_SPARC32_PCI) += pcic.o @@ -101,3 +103,6 @@ obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o obj-$(CONFIG_AUDIT) += audit.o audit--$(CONFIG_AUDIT) := compat_audit.o obj-$(CONFIG_COMPAT) += $(audit--y) + +pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o +obj-$(CONFIG_SPARC64) += $(pc--y) diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index d85c3dc4953..1446df90ef8 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -312,7 +312,12 @@ void __cpuinit cpu_probe(void) psr = get_psr(); put_psr(psr | PSR_EF); +#ifdef CONFIG_SPARC_LEON + fpu_vers = 7; +#else fpu_vers = ((get_fsr() >> 17) & 0x7); +#endif + put_psr(psr); set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index 524c32f97c5..e1ba8ee21b9 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c @@ -1,178 +1,13 @@ -/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc. - * - * Copyright (C) 2008 David S. Miller <davem@davemloft.net> - */ - #include <linux/kernel.h> #include <linux/module.h> #include <linux/dma-mapping.h> -#include <linux/scatterlist.h> -#include <linux/mm.h> - -#ifdef CONFIG_PCI -#include <linux/pci.h> -#endif +#include <linux/dma-debug.h> -#include "dma.h" +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15) -int dma_supported(struct device *dev, u64 mask) +static int __init dma_init(void) { -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_dma_supported(to_pci_dev(dev), mask); -#endif + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } -EXPORT_SYMBOL(dma_supported); - -int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_set_dma_mask(to_pci_dev(dev), dma_mask); -#endif - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(dma_set_mask); - -static void *dma32_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); -#endif - return sbus_alloc_consistent(dev, size, dma_handle); -} - -static void dma32_free_coherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_free_consistent(to_pci_dev(dev), size, - cpu_addr, dma_handle); - return; - } -#endif - sbus_free_consistent(dev, size, cpu_addr, dma_handle); -} - -static dma_addr_t dma32_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_map_page(to_pci_dev(dev), page, offset, - size, (int)direction); -#endif - return sbus_map_single(dev, page_address(page) + offset, - size, (int)direction); -} - -static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_unmap_page(to_pci_dev(dev), dma_address, - size, (int)direction); - return; - } -#endif - sbus_unmap_single(dev, dma_address, size, (int)direction); -} - -static int dma32_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) - return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); -#endif - return sbus_map_sg(dev, sg, nents, direction); -} - -void dma32_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction); - return; - } -#endif - sbus_unmap_sg(dev, sg, nents, (int)direction); -} - -static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, - size, (int)direction); - return; - } -#endif - sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction); -} - -static void dma32_sync_single_for_device(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, - size, (int)direction); - return; - } -#endif - sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction); -} - -static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, - nelems, (int)direction); - return; - } -#endif - BUG(); -} - -static void dma32_sync_sg_for_device(struct device *dev, - struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) { - pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, - nelems, (int)direction); - return; - } -#endif - BUG(); -} - -static const struct dma_ops dma32_dma_ops = { - .alloc_coherent = dma32_alloc_coherent, - .free_coherent = dma32_free_coherent, - .map_page = dma32_map_page, - .unmap_page = dma32_unmap_page, - .map_sg = dma32_map_sg, - .unmap_sg = dma32_unmap_sg, - .sync_single_for_cpu = dma32_sync_single_for_cpu, - .sync_single_for_device = dma32_sync_single_for_device, - .sync_sg_for_cpu = dma32_sync_sg_for_cpu, - .sync_sg_for_device = dma32_sync_sg_for_device, -}; - -const struct dma_ops *dma_ops = &dma32_dma_ops; -EXPORT_SYMBOL(dma_ops); +fs_initcall(dma_init); diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h deleted file mode 100644 index f8d8951adb5..00000000000 --- a/arch/sparc/kernel/dma.h +++ /dev/null @@ -1,14 +0,0 @@ -void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp); -void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba); -dma_addr_t sbus_map_single(struct device *dev, void *va, - size_t len, int direction); -void sbus_unmap_single(struct device *dev, dma_addr_t ba, - size_t n, int direction); -int sbus_map_sg(struct device *dev, struct scatterlist *sg, - int n, int direction); -void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, - int n, int direction); -void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, - size_t size, int direction); -void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, - size_t size, int direction); diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 6b4d8acc4c8..439d82a95ac 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -809,6 +809,11 @@ found_version: nop got_prop: +#ifdef CONFIG_SPARC_LEON + /* no cpu-type check is needed, it is a SPARC-LEON */ + ba sun4c_continue_boot + nop +#endif set cputypval, %o2 ldub [%o2 + 0x4], %l1 diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c index 57922f69c3f..52a15fe2db1 100644 --- a/arch/sparc/kernel/idprom.c +++ b/arch/sparc/kernel/idprom.c @@ -31,6 +31,8 @@ static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { { .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, { .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, { .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, +/* Now Leon */ +{ .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) }, /* Now, Sun4c's */ { .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, { .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 0aeaefe696b..7690cc219ec 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, - enum dma_data_direction direction) + enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -474,7 +475,8 @@ do_flush_sync: } static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, - size_t sz, enum dma_data_direction direction) + size_t sz, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, } static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot, ctx; @@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) } static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { unsigned long flags, ctx; struct scatterlist *sg; @@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, spin_unlock_irqrestore(&iommu->lock, flags); } -static const struct dma_ops sun4u_dma_ops = { +static struct dma_map_ops sun4u_dma_ops = { .alloc_coherent = dma_4u_alloc_coherent, .free_coherent = dma_4u_free_coherent, .map_page = dma_4u_map_page, @@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = { .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, }; -const struct dma_ops *dma_ops = &sun4u_dma_ops; +struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); +extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); + int dma_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; @@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask) #ifdef CONFIG_PCI if (dev->bus == &pci_bus_type) - return pci_dma_supported(to_pci_dev(dev), device_mask); + return pci64_dma_supported(to_pci_dev(dev), device_mask); #endif return 0; diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 87ea0d03d97..9f61fd8cbb7 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/pci.h> /* struct pci_dev */ #include <linux/proc_fs.h> +#include <linux/seq_file.h> #include <linux/scatterlist.h> #include <linux/of_device.h> @@ -48,8 +49,6 @@ #include <asm/iommu.h> #include <asm/io-unit.h> -#include "dma.h" - #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ static struct resource *_sparc_find_resource(struct resource *r, @@ -246,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64); * Typically devices use them for control blocks. * CPU may access them without any explicit flushing. */ -void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) +static void *sbus_alloc_coherent(struct device *dev, size_t len, + dma_addr_t *dma_addrp, gfp_t gfp) { struct of_device *op = to_of_device(dev); unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; @@ -299,7 +299,8 @@ err_nopages: return NULL; } -void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) +static void sbus_free_coherent(struct device *dev, size_t n, void *p, + dma_addr_t ba) { struct resource *res; struct page *pgv; @@ -317,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) n = (n + PAGE_SIZE-1) & PAGE_MASK; if ((res->end-res->start)+1 != n) { - printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", + printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", (long)((res->end-res->start)+1), n); return; } @@ -337,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) * CPU view of this memory may be inconsistent with * a device view and explicit flushing is necessary. */ -dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) +static dma_addr_t sbus_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t len, + enum dma_data_direction dir, + struct dma_attrs *attrs) { + void *va = page_address(page) + offset; + /* XXX why are some lengths signed, others unsigned? */ if (len <= 0) { return 0; @@ -350,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi return mmu_get_scsi_one(dev, va, len); } -void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) +static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, + enum dma_data_direction dir, struct dma_attrs *attrs) { mmu_release_scsi_one(dev, ba, n); } -int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) +static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, + enum dma_data_direction dir, struct dma_attrs *attrs) { mmu_get_scsi_sgl(dev, sg, n); @@ -366,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction return n; } -void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) +static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, + enum dma_data_direction dir, struct dma_attrs *attrs) { mmu_release_scsi_sgl(dev, sg, n); } -void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) +static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int n, enum dma_data_direction dir) { + BUG(); } -void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) +static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int n, enum dma_data_direction dir) { + BUG(); } +struct dma_map_ops sbus_dma_ops = { + .alloc_coherent = sbus_alloc_coherent, + .free_coherent = sbus_free_coherent, + .map_page = sbus_map_page, + .unmap_page = sbus_unmap_page, + .map_sg = sbus_map_sg, + .unmap_sg = sbus_unmap_sg, + .sync_sg_for_cpu = sbus_sync_sg_for_cpu, + .sync_sg_for_device = sbus_sync_sg_for_device, +}; + +struct dma_map_ops *dma_ops = &sbus_dma_ops; +EXPORT_SYMBOL(dma_ops); + static int __init sparc_register_ioport(void) { register_proc_sparc_ioport(); @@ -395,7 +422,8 @@ arch_initcall(sparc_register_ioport); /* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. */ -void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) +static void *pci32_alloc_coherent(struct device *dev, size_t len, + dma_addr_t *pba, gfp_t gfp) { unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; unsigned long va; @@ -439,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ return (void *) res->start; } -EXPORT_SYMBOL(pci_alloc_consistent); /* Free and unmap a consistent DMA buffer. * cpu_addr is what was returned from pci_alloc_consistent, @@ -449,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent); * References to the memory and mappings associated with cpu_addr/dma_addr * past this call are illegal. */ -void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) +static void pci32_free_coherent(struct device *dev, size_t n, void *p, + dma_addr_t ba) { struct resource *res; unsigned long pgp; @@ -481,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) free_pages(pgp, get_order(n)); } -EXPORT_SYMBOL(pci_free_consistent); - -/* Map a single buffer of the indicated size for DMA in streaming mode. - * The 32-bit bus address to use is returned. - * - * Once the device is given the dma address, the device owns this memory - * until either pci_unmap_single or pci_dma_sync_single_* is performed. - */ -dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, - int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* IIep is write-through, not flushing. */ - return virt_to_phys(ptr); -} -EXPORT_SYMBOL(pci_map_single); - -/* Unmap a single streaming mode DMA translation. The dma_addr and size - * must match what was provided for in a previous pci_map_single call. All - * other usages are undefined. - * - * After this call, reads by the cpu to the buffer are guaranteed to see - * whatever the device wrote there. - */ -void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, - int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { - mmu_inval_dma_area((unsigned long)phys_to_virt(ba), - (size + PAGE_SIZE-1) & PAGE_MASK); - } -} -EXPORT_SYMBOL(pci_unmap_single); /* * Same as pci_map_single, but with pages. */ -dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, - unsigned long offset, size_t size, int direction) +static dma_addr_t pci32_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) { - BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ return page_to_phys(page) + offset; } -EXPORT_SYMBOL(pci_map_page); - -void pci_unmap_page(struct pci_dev *hwdev, - dma_addr_t dma_address, size_t size, int direction) -{ - BUG_ON(direction == PCI_DMA_NONE); - /* mmu_inval_dma_area XXX */ -} -EXPORT_SYMBOL(pci_unmap_page); /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scather-gather version of the @@ -551,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page); * Device ownership issues as mentioned above for pci_map_single are * the same here. */ -int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, - int direction) +static int pci32_map_sg(struct device *device, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); /* IIep is write-through, not flushing. */ for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); @@ -566,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, } return nents; } -EXPORT_SYMBOL(pci_map_sg); /* Unmap a set of streaming mode DMA translations. * Again, cpu read rules concerning calls here are the same as for * pci_unmap_single() above. */ -void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, - int direction) +static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( @@ -588,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, } } } -EXPORT_SYMBOL(pci_unmap_sg); /* Make physical memory consistent for a single * streaming mode DMA translation before or after a transfer. @@ -600,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg); * must first perform a pci_dma_sync_for_device, and then the * device again owns the buffer. */ -void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) +static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, + size_t size, enum dma_data_direction dir) { - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); } } -EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); -void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) +static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, + size_t size, enum dma_data_direction dir) { - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { mmu_inval_dma_area((unsigned long)phys_to_virt(ba), (size + PAGE_SIZE-1) & PAGE_MASK); } } -EXPORT_SYMBOL(pci_dma_sync_single_for_device); /* Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. @@ -626,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device); * The same as pci_dma_sync_single_* but for a scatter-gather list, * same rules and usage. */ -void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) +static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( @@ -641,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int } } } -EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); -void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) +static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl, + int nents, enum dma_data_direction dir) { struct scatterlist *sg; int n; - BUG_ON(direction == PCI_DMA_NONE); - if (direction != PCI_DMA_TODEVICE) { + if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { BUG_ON(page_address(sg_page(sg)) == NULL); mmu_inval_dma_area( @@ -658,31 +639,78 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, } } } -EXPORT_SYMBOL(pci_dma_sync_sg_for_device); + +struct dma_map_ops pci32_dma_ops = { + .alloc_coherent = pci32_alloc_coherent, + .free_coherent = pci32_free_coherent, + .map_page = pci32_map_page, + .map_sg = pci32_map_sg, + .unmap_sg = pci32_unmap_sg, + .sync_single_for_cpu = pci32_sync_single_for_cpu, + .sync_single_for_device = pci32_sync_single_for_device, + .sync_sg_for_cpu = pci32_sync_sg_for_cpu, + .sync_sg_for_device = pci32_sync_sg_for_device, +}; +EXPORT_SYMBOL(pci32_dma_ops); + #endif /* CONFIG_PCI */ +/* + * Return whether the given PCI device DMA address mask can be + * supported properly. For example, if your device can only drive the + * low 24-bits during PCI bus mastering, then you would pass + * 0x00ffffff as the mask to this function. + */ +int dma_supported(struct device *dev, u64 mask) +{ +#ifdef CONFIG_PCI + if (dev->bus == &pci_bus_type) + return 1; +#endif + return 0; +} +EXPORT_SYMBOL(dma_supported); + +int dma_set_mask(struct device *dev, u64 dma_mask) +{ +#ifdef CONFIG_PCI + if (dev->bus == &pci_bus_type) + return pci_set_dma_mask(to_pci_dev(dev), dma_mask); +#endif + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(dma_set_mask); + + #ifdef CONFIG_PROC_FS -static int -_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, - void *data) +static int sparc_io_proc_show(struct seq_file *m, void *v) { - char *p = buf, *e = buf + length; - struct resource *r; + struct resource *root = m->private, *r; const char *nm; - for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { - if (p + 32 >= e) /* Better than nothing */ - break; + for (r = root->child; r != NULL; r = r->sibling) { if ((nm = r->name) == 0) nm = "???"; - p += sprintf(p, "%016llx-%016llx: %s\n", + seq_printf(m, "%016llx-%016llx: %s\n", (unsigned long long)r->start, (unsigned long long)r->end, nm); } - return p-buf; + return 0; } +static int sparc_io_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, sparc_io_proc_show, PDE(inode)->data); +} + +static const struct file_operations sparc_io_proc_fops = { + .owner = THIS_MODULE, + .open = sparc_io_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; #endif /* CONFIG_PROC_FS */ /* @@ -707,7 +735,7 @@ static struct resource *_sparc_find_resource(struct resource *root, static void register_proc_sparc_ioport(void) { #ifdef CONFIG_PROC_FS - create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); - create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); + proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap); + proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma); #endif } diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index ad800b80c71..e1af4372832 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -45,6 +45,7 @@ #include <asm/pcic.h> #include <asm/cacheflush.h> #include <asm/irq_regs.h> +#include <asm/leon.h> #include "kernel.h" #include "irq.h" @@ -661,6 +662,10 @@ void __init init_IRQ(void) sun4d_init_IRQ(); break; + case sparc_leon: + leon_init_IRQ(); + break; + default: prom_printf("Cannot initialize IRQs on this Sun machine..."); break; diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index f0ee7905540..8daab33fc17 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void) * Therefore you cannot make any OBP calls, not even prom_printf, * from these two routines. */ -static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) +static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) { unsigned long num_entries = (qmask + 1) / 64; unsigned long status; diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index cef8defcd7a..3ea6e8cde8c 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -151,12 +151,46 @@ kvmap_dtlb_4v: * Must preserve %g1 and %g6 (TAG). */ kvmap_dtlb_tsb4m_miss: - sethi %hi(kpte_linear_bitmap), %g2 - or %g2, %lo(kpte_linear_bitmap), %g2 + /* Clear the PAGE_OFFSET top virtual bits, shift + * down to get PFN, and make sure PFN is in range. + */ + sllx %g4, 21, %g5 - /* Clear the PAGE_OFFSET top virtual bits, then shift - * down to get a 256MB physical address index. + /* Check to see if we know about valid memory at the 4MB + * chunk this physical address will reside within. */ + srlx %g5, 21 + 41, %g2 + brnz,pn %g2, kvmap_dtlb_longpath + nop + + /* This unconditional branch and delay-slot nop gets patched + * by the sethi sequence once the bitmap is properly setup. + */ + .globl valid_addr_bitmap_insn +valid_addr_bitmap_insn: + ba,pt %xcc, 2f + nop + .subsection 2 + .globl valid_addr_bitmap_patch +valid_addr_bitmap_patch: + sethi %hi(sparc64_valid_addr_bitmap), %g7 + or %g7, %lo(sparc64_valid_addr_bitmap), %g7 + .previous + + srlx %g5, 21 + 22, %g2 + srlx %g2, 6, %g5 + and %g2, 63, %g2 + sllx %g5, 3, %g5 + ldx [%g7 + %g5], %g5 + mov 1, %g7 + sllx %g7, %g2, %g7 + andcc %g5, %g7, %g0 + be,pn %xcc, kvmap_dtlb_longpath + +2: sethi %hi(kpte_linear_bitmap), %g2 + or %g2, %lo(kpte_linear_bitmap), %g2 + + /* Get the 256MB physical address index. */ sllx %g4, 21, %g5 mov 1, %g7 srlx %g5, 21 + 28, %g5 diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c new file mode 100644 index 00000000000..54d8a5bd482 --- /dev/null +++ b/arch/sparc/kernel/leon_kernel.c @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/interrupt.h> +#include <linux/of_device.h> +#include <asm/oplib.h> +#include <asm/timer.h> +#include <asm/prom.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> + +#include "prom.h" +#include "irq.h" + +struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */ +struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */ +struct amba_apb_device leon_percpu_timer_dev[16]; + +int leondebug_irq_disable; +int leon_debug_irqout; +static int dummy_master_l10_counter; + +unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */ +unsigned int sparc_leon_eirq; +#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0])) + +/* Return the IRQ of the pending IRQ on the extended IRQ controller */ +int sparc_leon_eirq_get(int eirq, int cpu) +{ + return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; +} + +irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id) +{ + printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n"); + return IRQ_HANDLED; +} + +/* The extended IRQ controller has been found, this function registers it */ +void sparc_leon_eirq_register(int eirq) +{ + int irq; + + /* Register a "BAD" handler for this interrupt, it should never happen */ + irq = request_irq(eirq, sparc_leon_eirq_isr, + (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL); + + if (irq) { + printk(KERN_ERR + "sparc_leon_eirq_register: unable to attach IRQ%d\n", + eirq); + } else { + sparc_leon_eirq = eirq; + } + +} + +static inline unsigned long get_irqmask(unsigned int irq) +{ + unsigned long mask; + + if (!irq || ((irq > 0xf) && !sparc_leon_eirq) + || ((irq > 0x1f) && sparc_leon_eirq)) { + printk(KERN_ERR + "leon_get_irqmask: false irq number: %d\n", irq); + mask = 0; + } else { + mask = LEON_HARD_INT(irq); + } + return mask; +} + +static void leon_enable_irq(unsigned int irq_nr) +{ + unsigned long mask, flags; + mask = get_irqmask(irq_nr); + local_irq_save(flags); + LEON3_BYPASS_STORE_PA(LEON_IMASK, + (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask))); + local_irq_restore(flags); +} + +static void leon_disable_irq(unsigned int irq_nr) +{ + unsigned long mask, flags; + mask = get_irqmask(irq_nr); + local_irq_save(flags); + LEON3_BYPASS_STORE_PA(LEON_IMASK, + (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask))); + local_irq_restore(flags); + +} + +void __init leon_init_timers(irq_handler_t counter_fn) +{ + int irq; + + leondebug_irq_disable = 0; + leon_debug_irqout = 0; + master_l10_counter = (unsigned int *)&dummy_master_l10_counter; + dummy_master_l10_counter = 0; + + if (leon3_gptimer_regs && leon3_irqctrl_regs) { + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, + (((1000000 / 100) - 1))); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); + + } else { + printk(KERN_ERR "No Timer/irqctrl found\n"); + BUG(); + } + + irq = request_irq(leon3_gptimer_irq, + counter_fn, + (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); + + if (irq) { + printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n", + LEON_INTERRUPT_TIMER1); + prom_halt(); + } + + if (leon3_gptimer_regs) { + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, + LEON3_GPTIMER_EN | + LEON3_GPTIMER_RL | + LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); + } +} + +void leon_clear_clock_irq(void) +{ +} + +void leon_load_profile_irq(int cpu, unsigned int limit) +{ + BUG(); +} + + + + +void __init leon_trans_init(struct device_node *dp) +{ + if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) { + struct property *p; + p = of_find_property(dp, "mid", (void *)0); + if (p) { + int mid; + dp->name = prom_early_alloc(5 + 1); + memcpy(&mid, p->value, p->length); + sprintf((char *)dp->name, "cpu%.2d", mid); + } + } +} + +void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0; + +void __init leon_node_init(struct device_node *dp, struct device_node ***nextp) +{ + if (prom_amba_init && + strcmp(dp->type, "ambapp") == 0 && + strcmp(dp->name, "ambapp0") == 0) { + prom_amba_init(dp, nextp); + } +} + +void __init leon_init_IRQ(void) +{ + sparc_init_timers = leon_init_timers; + + BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq, + BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq, + BTFIXUPCALL_NOP); + +#ifdef CONFIG_SMP + BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM); +#endif + +} + +void __init leon_init(void) +{ + prom_build_more = &leon_node_init; +} diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 2c0cc72d295..378eb53e077 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/smp.h> +#include <asm/perf_counter.h> #include <asm/ptrace.h> #include <asm/local.h> #include <asm/pcr.h> @@ -31,13 +32,19 @@ * level 14 as our IRQ off level. */ -static int nmi_watchdog_active; static int panic_on_timeout; -int nmi_usable; -EXPORT_SYMBOL_GPL(nmi_usable); +/* nmi_active: + * >0: the NMI watchdog is active, but can be disabled + * <0: the NMI watchdog has not been set up, and cannot be enabled + * 0: the NMI watchdog is disabled, but can be enabled + */ +atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ +EXPORT_SYMBOL(nmi_active); static unsigned int nmi_hz = HZ; +static DEFINE_PER_CPU(short, wd_enabled); +static int endflag __initdata; static DEFINE_PER_CPU(unsigned int, last_irq_sum); static DEFINE_PER_CPU(local_t, alert_counter); @@ -45,7 +52,7 @@ static DEFINE_PER_CPU(int, nmi_touch); void touch_nmi_watchdog(void) { - if (nmi_watchdog_active) { + if (atomic_read(&nmi_active)) { int cpu; for_each_present_cpu(cpu) { @@ -78,6 +85,7 @@ static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) if (do_panic || panic_on_oops) panic("Non maskable interrupt"); + nmi_exit(); local_irq_enable(); do_exit(SIGBUS); } @@ -92,6 +100,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) local_cpu_data().__nmi_count++; + nmi_enter(); + if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; @@ -103,17 +113,19 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) } if (!touched && __get_cpu_var(last_irq_sum) == sum) { local_inc(&__get_cpu_var(alert_counter)); - if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) + if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) die_nmi("BUG: NMI Watchdog detected LOCKUP", regs, panic_on_timeout); } else { __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); } - if (nmi_usable) { + if (__get_cpu_var(wd_enabled)) { write_pic(picl_value(nmi_hz)); pcr_ops->write(pcr_enable); } + + nmi_exit(); } static inline unsigned int get_nmi_count(int cpu) @@ -121,8 +133,6 @@ static inline unsigned int get_nmi_count(int cpu) return cpu_data(cpu).__nmi_count; } -static int endflag __initdata; - static __init void nmi_cpu_busy(void *data) { local_irq_enable_in_hardirq(); @@ -143,12 +153,15 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) printk(KERN_WARNING "and attach the output of the 'dmesg' command.\n"); - nmi_usable = 0; + per_cpu(wd_enabled, cpu) = 0; + atomic_dec(&nmi_active); } -static void stop_watchdog(void *unused) +void stop_nmi_watchdog(void *unused) { pcr_ops->write(PCR_PIC_PRIV); + __get_cpu_var(wd_enabled) = 0; + atomic_dec(&nmi_active); } static int __init check_nmi_watchdog(void) @@ -156,6 +169,9 @@ static int __init check_nmi_watchdog(void) unsigned int *prev_nmi_count; int cpu, err; + if (!atomic_read(&nmi_active)) + return 0; + prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(unsigned int), GFP_KERNEL); if (!prev_nmi_count) { err = -ENOMEM; @@ -172,12 +188,15 @@ static int __init check_nmi_watchdog(void) mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */ for_each_online_cpu(cpu) { + if (!per_cpu(wd_enabled, cpu)) + continue; if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) report_broken_nmi(cpu, prev_nmi_count); } endflag = 1; - if (!nmi_usable) { + if (!atomic_read(&nmi_active)) { kfree(prev_nmi_count); + atomic_set(&nmi_active, -1); err = -ENODEV; goto error; } @@ -188,12 +207,26 @@ static int __init check_nmi_watchdog(void) kfree(prev_nmi_count); return 0; error: - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); return err; } -static void start_watchdog(void *unused) +void start_nmi_watchdog(void *unused) { + __get_cpu_var(wd_enabled) = 1; + atomic_inc(&nmi_active); + + pcr_ops->write(PCR_PIC_PRIV); + write_pic(picl_value(nmi_hz)); + + pcr_ops->write(pcr_enable); +} + +static void nmi_adjust_hz_one(void *unused) +{ + if (!__get_cpu_var(wd_enabled)) + return; + pcr_ops->write(PCR_PIC_PRIV); write_pic(picl_value(nmi_hz)); @@ -203,13 +236,13 @@ static void start_watchdog(void *unused) void nmi_adjust_hz(unsigned int new_hz) { nmi_hz = new_hz; - on_each_cpu(start_watchdog, NULL, 1); + on_each_cpu(nmi_adjust_hz_one, NULL, 1); } EXPORT_SYMBOL_GPL(nmi_adjust_hz); static int nmi_shutdown(struct notifier_block *nb, unsigned long cmd, void *p) { - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); return 0; } @@ -221,18 +254,19 @@ int __init nmi_init(void) { int err; - nmi_usable = 1; - - on_each_cpu(start_watchdog, NULL, 1); + on_each_cpu(start_nmi_watchdog, NULL, 1); err = check_nmi_watchdog(); if (!err) { err = register_reboot_notifier(&nmi_reboot_notifier); if (err) { - nmi_usable = 0; - on_each_cpu(stop_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); + atomic_set(&nmi_active, -1); } } + if (!err) + init_hw_perf_counters(); + return err; } diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 90396702ea2..4c26eb59e74 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -9,6 +9,8 @@ #include <linux/irq.h> #include <linux/of_device.h> #include <linux/of_platform.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> #include "of_device_common.h" @@ -97,6 +99,35 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) return IORESOURCE_MEM; } + /* + * AMBAPP bus specific translator + */ + +static int of_bus_ambapp_match(struct device_node *np) +{ + return !strcmp(np->name, "ambapp"); +} + +static void of_bus_ambapp_count_cells(struct device_node *child, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 1; + if (sizec) + *sizec = 1; +} + +static int of_bus_ambapp_map(u32 *addr, const u32 *range, + int na, int ns, int pna) +{ + return of_bus_default_map(addr, range, na, ns, pna); +} + +static unsigned long of_bus_ambapp_get_flags(const u32 *addr, + unsigned long flags) +{ + return IORESOURCE_MEM; +} /* * Array of bus specific translators @@ -121,6 +152,15 @@ static struct of_bus of_busses[] = { .map = of_bus_default_map, .get_flags = of_bus_sbus_get_flags, }, + /* AMBA */ + { + .name = "ambapp", + .addr_prop_name = "reg", + .match = of_bus_ambapp_match, + .count_cells = of_bus_ambapp_count_cells, + .map = of_bus_ambapp_map, + .get_flags = of_bus_ambapp_get_flags, + }, /* Default */ { .name = "default", diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 57859ad2354..c6864866280 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) pci_dev_put(ali_isa_bridge); } -int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) +int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) { u64 dma_addr_mask; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 2485eaa2310..23c33ff9c31 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, - enum dma_data_direction direction) + enum dma_data_direction direction, + struct dma_attrs *attrs) { struct iommu *iommu; unsigned long flags, npages, oaddr; @@ -296,7 +297,8 @@ iommu_map_fail: } static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, - size_t sz, enum dma_data_direction direction) + size_t sz, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; @@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot; @@ -478,7 +481,8 @@ iommu_map_failed: } static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction direction) + int nelems, enum dma_data_direction direction, + struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct scatterlist *sg; @@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, spin_unlock_irqrestore(&iommu->lock, flags); } -static void dma_4v_sync_single_for_cpu(struct device *dev, - dma_addr_t bus_addr, size_t sz, - enum dma_data_direction direction) -{ - /* Nothing to do... */ -} - -static void dma_4v_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sglist, int nelems, - enum dma_data_direction direction) -{ - /* Nothing to do... */ -} - -static const struct dma_ops sun4v_dma_ops = { +static struct dma_map_ops sun4v_dma_ops = { .alloc_coherent = dma_4v_alloc_coherent, .free_coherent = dma_4v_free_coherent, .map_page = dma_4v_map_page, .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, - .sync_single_for_cpu = dma_4v_sync_single_for_cpu, - .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu, }; static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index 1ae8cdd7e70..68ff0010707 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -7,6 +7,8 @@ #include <linux/init.h> #include <linux/irq.h> +#include <linux/perf_counter.h> + #include <asm/pil.h> #include <asm/pcr.h> #include <asm/nmi.h> @@ -34,10 +36,20 @@ unsigned int picl_shift; */ void deferred_pcr_work_irq(int irq, struct pt_regs *regs) { + struct pt_regs *old_regs; + clear_softint(1 << PIL_DEFERRED_PCR_WORK); + + old_regs = set_irq_regs(regs); + irq_enter(); +#ifdef CONFIG_PERF_COUNTERS + perf_counter_do_pending(); +#endif + irq_exit(); + set_irq_regs(old_regs); } -void schedule_deferred_pcr_work(void) +void set_perf_counter_pending(void) { set_softint(1 << PIL_DEFERRED_PCR_WORK); } diff --git a/arch/sparc/kernel/perf_counter.c b/arch/sparc/kernel/perf_counter.c new file mode 100644 index 00000000000..09de4035eaa --- /dev/null +++ b/arch/sparc/kernel/perf_counter.c @@ -0,0 +1,557 @@ +/* Performance counter support for sparc64. + * + * Copyright (C) 2009 David S. Miller <davem@davemloft.net> + * + * This code is based almost entirely upon the x86 perf counter + * code, which is: + * + * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> + */ + +#include <linux/perf_counter.h> +#include <linux/kprobes.h> +#include <linux/kernel.h> +#include <linux/kdebug.h> +#include <linux/mutex.h> + +#include <asm/cpudata.h> +#include <asm/atomic.h> +#include <asm/nmi.h> +#include <asm/pcr.h> + +/* Sparc64 chips have two performance counters, 32-bits each, with + * overflow interrupts generated on transition from 0xffffffff to 0. + * The counters are accessed in one go using a 64-bit register. + * + * Both counters are controlled using a single control register. The + * only way to stop all sampling is to clear all of the context (user, + * supervisor, hypervisor) sampling enable bits. But these bits apply + * to both counters, thus the two counters can't be enabled/disabled + * individually. + * + * The control register has two event fields, one for each of the two + * counters. It's thus nearly impossible to have one counter going + * while keeping the other one stopped. Therefore it is possible to + * get overflow interrupts for counters not currently "in use" and + * that condition must be checked in the overflow interrupt handler. + * + * So we use a hack, in that we program inactive counters with the + * "sw_count0" and "sw_count1" events. These count how many times + * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an + * unusual way to encode a NOP and therefore will not trigger in + * normal code. + */ + +#define MAX_HWCOUNTERS 2 +#define MAX_PERIOD ((1UL << 32) - 1) + +#define PIC_UPPER_INDEX 0 +#define PIC_LOWER_INDEX 1 + +struct cpu_hw_counters { + struct perf_counter *counters[MAX_HWCOUNTERS]; + unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; + unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; + int enabled; +}; +DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; + +struct perf_event_map { + u16 encoding; + u8 pic_mask; +#define PIC_NONE 0x00 +#define PIC_UPPER 0x01 +#define PIC_LOWER 0x02 +}; + +struct sparc_pmu { + const struct perf_event_map *(*event_map)(int); + int max_events; + int upper_shift; + int lower_shift; + int event_mask; + int hv_bit; + int irq_bit; + int upper_nop; + int lower_nop; +}; + +static const struct perf_event_map ultra3i_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, +}; + +static const struct perf_event_map *ultra3i_event_map(int event) +{ + return &ultra3i_perfmon_event_map[event]; +} + +static const struct sparc_pmu ultra3i_pmu = { + .event_map = ultra3i_event_map, + .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), + .upper_shift = 11, + .lower_shift = 4, + .event_mask = 0x3f, + .upper_nop = 0x1c, + .lower_nop = 0x14, +}; + +static const struct perf_event_map niagara2_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, +}; + +static const struct perf_event_map *niagara2_event_map(int event) +{ + return &niagara2_perfmon_event_map[event]; +} + +static const struct sparc_pmu niagara2_pmu = { + .event_map = niagara2_event_map, + .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), + .upper_shift = 19, + .lower_shift = 6, + .event_mask = 0xfff, + .hv_bit = 0x8, + .irq_bit = 0x03, + .upper_nop = 0x220, + .lower_nop = 0x220, +}; + +static const struct sparc_pmu *sparc_pmu __read_mostly; + +static u64 event_encoding(u64 event, int idx) +{ + if (idx == PIC_UPPER_INDEX) + event <<= sparc_pmu->upper_shift; + else + event <<= sparc_pmu->lower_shift; + return event; +} + +static u64 mask_for_index(int idx) +{ + return event_encoding(sparc_pmu->event_mask, idx); +} + +static u64 nop_for_index(int idx) +{ + return event_encoding(idx == PIC_UPPER_INDEX ? + sparc_pmu->upper_nop : + sparc_pmu->lower_nop, idx); +} + +static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, + int idx) +{ + u64 val, mask = mask_for_index(idx); + + val = pcr_ops->read(); + pcr_ops->write((val & ~mask) | hwc->config); +} + +static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, + int idx) +{ + u64 mask = mask_for_index(idx); + u64 nop = nop_for_index(idx); + u64 val = pcr_ops->read(); + + pcr_ops->write((val & ~mask) | nop); +} + +void hw_perf_enable(void) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + u64 val; + int i; + + if (cpuc->enabled) + return; + + cpuc->enabled = 1; + barrier(); + + val = pcr_ops->read(); + + for (i = 0; i < MAX_HWCOUNTERS; i++) { + struct perf_counter *cp = cpuc->counters[i]; + struct hw_perf_counter *hwc; + + if (!cp) + continue; + hwc = &cp->hw; + val |= hwc->config_base; + } + + pcr_ops->write(val); +} + +void hw_perf_disable(void) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + u64 val; + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + + val = pcr_ops->read(); + val &= ~(PCR_UTRACE | PCR_STRACE | + sparc_pmu->hv_bit | sparc_pmu->irq_bit); + pcr_ops->write(val); +} + +static u32 read_pmc(int idx) +{ + u64 val; + + read_pic(val); + if (idx == PIC_UPPER_INDEX) + val >>= 32; + + return val & 0xffffffff; +} + +static void write_pmc(int idx, u64 val) +{ + u64 shift, mask, pic; + + shift = 0; + if (idx == PIC_UPPER_INDEX) + shift = 32; + + mask = ((u64) 0xffffffff) << shift; + val <<= shift; + + read_pic(pic); + pic &= ~mask; + pic |= val; + write_pic(pic); +} + +static int sparc_perf_counter_set_period(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) +{ + s64 left = atomic64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + if (left > MAX_PERIOD) + left = MAX_PERIOD; + + atomic64_set(&hwc->prev_count, (u64)-left); + + write_pmc(idx, (u64)(-left) & 0xffffffff); + + perf_counter_update_userpage(counter); + + return ret; +} + +static int sparc_pmu_enable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct hw_perf_counter *hwc = &counter->hw; + int idx = hwc->idx; + + if (test_and_set_bit(idx, cpuc->used_mask)) + return -EAGAIN; + + sparc_pmu_disable_counter(hwc, idx); + + cpuc->counters[idx] = counter; + set_bit(idx, cpuc->active_mask); + + sparc_perf_counter_set_period(counter, hwc, idx); + sparc_pmu_enable_counter(hwc, idx); + perf_counter_update_userpage(counter); + return 0; +} + +static u64 sparc_perf_counter_update(struct perf_counter *counter, + struct hw_perf_counter *hwc, int idx) +{ + int shift = 64 - 32; + u64 prev_raw_count, new_raw_count; + s64 delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = read_pmc(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + atomic64_add(delta, &counter->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void sparc_pmu_disable(struct perf_counter *counter) +{ + struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); + struct hw_perf_counter *hwc = &counter->hw; + int idx = hwc->idx; + + clear_bit(idx, cpuc->active_mask); + sparc_pmu_disable_counter(hwc, idx); + + barrier(); + + sparc_perf_counter_update(counter, hwc, idx); + cpuc->counters[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_counter_update_userpage(counter); +} + +static void sparc_pmu_read(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + sparc_perf_counter_update(counter, hwc, hwc->idx); +} + +static void sparc_pmu_unthrottle(struct perf_counter *counter) +{ + struct hw_perf_counter *hwc = &counter->hw; + sparc_pmu_enable_counter(hwc, hwc->idx); +} + +static atomic_t active_counters = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmc_grab_mutex); + +void perf_counter_grab_pmc(void) +{ + if (atomic_inc_not_zero(&active_counters)) + return; + + mutex_lock(&pmc_grab_mutex); + if (atomic_read(&active_counters) == 0) { + if (atomic_read(&nmi_active) > 0) { + on_each_cpu(stop_nmi_watchdog, NULL, 1); + BUG_ON(atomic_read(&nmi_active) != 0); + } + atomic_inc(&active_counters); + } + mutex_unlock(&pmc_grab_mutex); +} + +void perf_counter_release_pmc(void) +{ + if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { + if (atomic_read(&nmi_active) == 0) + on_each_cpu(start_nmi_watchdog, NULL, 1); + mutex_unlock(&pmc_grab_mutex); + } +} + +static void hw_perf_counter_destroy(struct perf_counter *counter) +{ + perf_counter_release_pmc(); +} + +static int __hw_perf_counter_init(struct perf_counter *counter) +{ + struct perf_counter_attr *attr = &counter->attr; + struct hw_perf_counter *hwc = &counter->hw; + const struct perf_event_map *pmap; + u64 enc; + + if (atomic_read(&nmi_active) < 0) + return -ENODEV; + + if (attr->type != PERF_TYPE_HARDWARE) + return -EOPNOTSUPP; + + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + + perf_counter_grab_pmc(); + counter->destroy = hw_perf_counter_destroy; + + /* We save the enable bits in the config_base. So to + * turn off sampling just write 'config', and to enable + * things write 'config | config_base'. + */ + hwc->config_base = sparc_pmu->irq_bit; + if (!attr->exclude_user) + hwc->config_base |= PCR_UTRACE; + if (!attr->exclude_kernel) + hwc->config_base |= PCR_STRACE; + if (!attr->exclude_hv) + hwc->config_base |= sparc_pmu->hv_bit; + + if (!hwc->sample_period) { + hwc->sample_period = MAX_PERIOD; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } + + pmap = sparc_pmu->event_map(attr->config); + + enc = pmap->encoding; + if (pmap->pic_mask & PIC_UPPER) { + hwc->idx = PIC_UPPER_INDEX; + enc <<= sparc_pmu->upper_shift; + } else { + hwc->idx = PIC_LOWER_INDEX; + enc <<= sparc_pmu->lower_shift; + } + + hwc->config |= enc; + return 0; +} + +static const struct pmu pmu = { + .enable = sparc_pmu_enable, + .disable = sparc_pmu_disable, + .read = sparc_pmu_read, + .unthrottle = sparc_pmu_unthrottle, +}; + +const struct pmu *hw_perf_counter_init(struct perf_counter *counter) +{ + int err = __hw_perf_counter_init(counter); + + if (err) + return ERR_PTR(err); + return &pmu; +} + +void perf_counter_print_debug(void) +{ + unsigned long flags; + u64 pcr, pic; + int cpu; + + if (!sparc_pmu) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr = pcr_ops->read(); + read_pic(pic); + + pr_info("\n"); + pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", + cpu, pcr, pic); + + local_irq_restore(flags); +} + +static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, + unsigned long cmd, void *__args) +{ + struct die_args *args = __args; + struct perf_sample_data data; + struct cpu_hw_counters *cpuc; + struct pt_regs *regs; + int idx; + + if (!atomic_read(&active_counters)) + return NOTIFY_DONE; + + switch (cmd) { + case DIE_NMI: + break; + + default: + return NOTIFY_DONE; + } + + regs = args->regs; + + data.regs = regs; + data.addr = 0; + + cpuc = &__get_cpu_var(cpu_hw_counters); + for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { + struct perf_counter *counter = cpuc->counters[idx]; + struct hw_perf_counter *hwc; + u64 val; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + hwc = &counter->hw; + val = sparc_perf_counter_update(counter, hwc, idx); + if (val & (1ULL << 31)) + continue; + + data.period = counter->hw.last_period; + if (!sparc_perf_counter_set_period(counter, hwc, idx)) + continue; + + if (perf_counter_overflow(counter, 1, &data)) + sparc_pmu_disable_counter(hwc, idx); + } + + return NOTIFY_STOP; +} + +static __read_mostly struct notifier_block perf_counter_nmi_notifier = { + .notifier_call = perf_counter_nmi_handler, +}; + +static bool __init supported_pmu(void) +{ + if (!strcmp(sparc_pmu_type, "ultra3i")) { + sparc_pmu = &ultra3i_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "niagara2")) { + sparc_pmu = &niagara2_pmu; + return true; + } + return false; +} + +void __init init_hw_perf_counters(void) +{ + pr_info("Performance counters: "); + + if (!supported_pmu()) { + pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); + return; + } + + pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); + + /* All sparc64 PMUs currently have 2 counters. But this simple + * driver only supports one active counter at a time. + */ + perf_max_counters = 1; + + register_die_notifier(&perf_counter_nmi_notifier); +} diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 4041f94e772..18d67854a1b 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) } } -void __trigger_all_cpu_backtrace(void) +void arch_trigger_all_cpu_backtrace(void) { struct thread_info *tp = current_thread_info(); struct pt_regs *regs = get_irq_regs(); @@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void) static void sysrq_handle_globreg(int key, struct tty_struct *tty) { - __trigger_all_cpu_backtrace(); + arch_trigger_all_cpu_backtrace(); } static struct sysrq_key_op sparc_globalreg_op = { diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index fe43e80772d..0a37e8cfd16 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -24,6 +24,8 @@ #include <asm/prom.h> #include <asm/oplib.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> #include "prom.h" @@ -131,6 +133,35 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) regs->which_io, regs->phys_addr); } +/* "name:vendor:device@irq,addrlo" */ +static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) +{ + struct amba_prom_registers *regs; unsigned int *intr; + unsigned int *device, *vendor; + struct property *prop; + + prop = of_find_property(dp, "reg", NULL); + if (!prop) + return; + regs = prop->value; + prop = of_find_property(dp, "interrupts", NULL); + if (!prop) + return; + intr = prop->value; + prop = of_find_property(dp, "vendor", NULL); + if (!prop) + return; + vendor = prop->value; + prop = of_find_property(dp, "device", NULL); + if (!prop) + return; + device = prop->value; + + sprintf(tmp_buf, "%s:%d:%d@%x,%x", + dp->name, *vendor, *device, + *intr, regs->phys_addr); +} + static void __init __build_path_component(struct device_node *dp, char *tmp_buf) { struct device_node *parent = dp->parent; @@ -143,6 +174,8 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf) return sbus_path_component(dp, tmp_buf); if (!strcmp(parent->type, "ebus")) return ebus_path_component(dp, tmp_buf); + if (!strcmp(parent->type, "ambapp")) + return ambapp_path_component(dp, tmp_buf); /* "isa" is handled with platform naming */ } diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 0fb5789d43c..138910c6720 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -22,9 +22,12 @@ #include <linux/of.h> #include <asm/prom.h> #include <asm/oplib.h> +#include <asm/leon.h> #include "prom.h" +void (*prom_build_more)(struct device_node *dp, struct device_node ***nextp); + struct device_node *of_console_device; EXPORT_SYMBOL(of_console_device); @@ -161,7 +164,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, name = prom_nextprop(node, prev, p->name); } - if (strlen(name) == 0) { + if (!name || strlen(name) == 0) { tmp = p; return NULL; } @@ -242,7 +245,7 @@ static struct device_node * __init prom_create_node(phandle node, return dp; } -static char * __init build_full_name(struct device_node *dp) +char * __init build_full_name(struct device_node *dp) { int len, ourlen, plen; char *n; @@ -289,6 +292,9 @@ static struct device_node * __init prom_build_tree(struct device_node *parent, dp->child = prom_build_tree(dp, prom_getchild(node), nextp); + if (prom_build_more) + prom_build_more(dp, nextp); + node = prom_getsibling(node); } diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 998cadb4e7f..9be2af55c5c 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -235,6 +235,8 @@ void __init setup_arch(char **cmdline_p) sparc_cpu_model = sun4e; if (!strcmp(&cputypval,"sun4u")) sparc_cpu_model = sun4u; + if (!strncmp(&cputypval, "leon" , 4)) + sparc_cpu_model = sparc_leon; printk("ARCH: "); switch(sparc_cpu_model) { @@ -256,6 +258,9 @@ void __init setup_arch(char **cmdline_p) case sun4u: printk("SUN4U\n"); break; + case sparc_leon: + printk("LEON\n"); + break; default: printk("UNKNOWN!\n"); break; @@ -263,8 +268,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; -#elif defined(CONFIG_PROM_CONSOLE) - conswitchp = &prom_con; #endif boot_flags_init(*cmdline_p); diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index f2bcfd2967d..21180339cb0 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -295,8 +295,6 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; -#elif defined(CONFIG_PROM_CONSOLE) - conswitchp = &prom_con; #endif idprom_init(); diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 181d069a2d4..7ce1a1005b1 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + if (current->replacement_session_keyring) + key_replace_session_keyring(); } } diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index ec82d76dc6f..647afbda7ae 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); + if (current->replacement_session_keyring) + key_replace_session_keyring(); } } + diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index fa44eaf8d89..ff68373ce6d 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1389,8 +1389,8 @@ void smp_send_stop(void) * RETURNS: * Pointer to the allocated area on success, NULL on failure. */ -static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, - unsigned long align) +static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size, + size_t align) { const unsigned long goal = __pa(MAX_DMA_ADDRESS); #ifdef CONFIG_NEED_MULTIPLE_NODES @@ -1415,127 +1415,35 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, #endif } -static size_t pcpur_size __initdata; -static void **pcpur_ptrs __initdata; - -static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) +static void __init pcpu_free_bootmem(void *ptr, size_t size) { - size_t off = (size_t)pageno << PAGE_SHIFT; - - if (off >= pcpur_size) - return NULL; - - return virt_to_page(pcpur_ptrs[cpu] + off); + free_bootmem(__pa(ptr), size); } -#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL) - -static void __init pcpu_map_range(unsigned long start, unsigned long end, - struct page *page) +static int pcpu_cpu_distance(unsigned int from, unsigned int to) { - unsigned long pfn = page_to_pfn(page); - unsigned long pte_base; - - BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL)); - - pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | - _PAGE_CP_4U | _PAGE_CV_4U | - _PAGE_P_4U | _PAGE_W_4U); - if (tlb_type == hypervisor) - pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | - _PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); - - while (start < end) { - pgd_t *pgd = pgd_offset_k(start); - unsigned long this_end; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pud = pud_offset(pgd, start); - if (pud_none(*pud)) { - pmd_t *new; - - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); - pud_populate(&init_mm, pud, new); - } - - pmd = pmd_offset(pud, start); - if (!pmd_present(*pmd)) { - pte_t *new; - - new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); - pmd_populate_kernel(&init_mm, pmd, new); - } - - pte = pte_offset_kernel(pmd, start); - this_end = (start + PMD_SIZE) & PMD_MASK; - if (this_end > end) - this_end = end; - - while (start < this_end) { - unsigned long paddr = pfn << PAGE_SHIFT; - - pte_val(*pte) = (paddr | pte_base); - - start += PAGE_SIZE; - pte++; - pfn++; - } - } + if (cpu_to_node(from) == cpu_to_node(to)) + return LOCAL_DISTANCE; + else + return REMOTE_DISTANCE; } void __init setup_per_cpu_areas(void) { - size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start; - static struct vm_struct vm; - unsigned long delta, cpu; - size_t pcpu_unit_size; - size_t ptrs_size; - - pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + - PERCPU_DYNAMIC_RESERVE); - dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE; - + unsigned long delta; + unsigned int cpu; + int rc; - ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); - pcpur_ptrs = alloc_bootmem(ptrs_size); - - for_each_possible_cpu(cpu) { - pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE, - PCPU_CHUNK_SIZE); - - free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), - PCPU_CHUNK_SIZE - pcpur_size); - - memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); - } - - /* allocate address and map */ - vm.flags = VM_ALLOC; - vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE; - vm_area_register_early(&vm, PCPU_CHUNK_SIZE); - - for_each_possible_cpu(cpu) { - unsigned long start = (unsigned long) vm.addr; - unsigned long end; - - start += cpu * PCPU_CHUNK_SIZE; - end = start + PCPU_CHUNK_SIZE; - pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu])); - } - - pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size, - PERCPU_MODULE_RESERVE, dyn_size, - PCPU_CHUNK_SIZE, vm.addr, NULL); - - free_bootmem(__pa(pcpur_ptrs), ptrs_size); + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, 4 << 20, + pcpu_cpu_distance, pcpu_alloc_bootmem, + pcpu_free_bootmem); + if (rc) + panic("failed to initialize first chunk (%d)", rc); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; - for_each_possible_cpu(cpu) { - __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; - } + for_each_possible_cpu(cpu) + __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; /* Setup %g5 for the boot cpu. */ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 54fb02468f0..68791cad7b7 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -162,9 +162,6 @@ extern void cpu_panic(void); */ extern struct linux_prom_registers smp_penguin_ctable; -extern unsigned long trapbase_cpu1[]; -extern unsigned long trapbase_cpu2[]; -extern unsigned long trapbase_cpu3[]; void __init smp4d_boot_cpus(void) { @@ -235,25 +232,6 @@ void __init smp4d_smp_done(void) *prev = first; local_flush_cache_all(); - /* Free unneeded trap tables */ - ClearPageReserved(virt_to_page(trapbase_cpu1)); - init_page_count(virt_to_page(trapbase_cpu1)); - free_page((unsigned long)trapbase_cpu1); - totalram_pages++; - num_physpages++; - - ClearPageReserved(virt_to_page(trapbase_cpu2)); - init_page_count(virt_to_page(trapbase_cpu2)); - free_page((unsigned long)trapbase_cpu2); - totalram_pages++; - num_physpages++; - - ClearPageReserved(virt_to_page(trapbase_cpu3)); - init_page_count(virt_to_page(trapbase_cpu3)); - free_page((unsigned long)trapbase_cpu3); - totalram_pages++; - num_physpages++; - /* Ok, they are spinning and ready to go. */ smp_processors_ready = 1; sun4d_distribute_irqs(); diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 960b113d000..762d6eedd94 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -121,9 +121,6 @@ void __cpuinit smp4m_callin(void) */ extern struct linux_prom_registers smp_penguin_ctable; -extern unsigned long trapbase_cpu1[]; -extern unsigned long trapbase_cpu2[]; -extern unsigned long trapbase_cpu3[]; void __init smp4m_boot_cpus(void) { @@ -193,29 +190,6 @@ void __init smp4m_smp_done(void) *prev = first; local_flush_cache_all(); - /* Free unneeded trap tables */ - if (!cpu_isset(1, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu1)); - init_page_count(virt_to_page(trapbase_cpu1)); - free_page((unsigned long)trapbase_cpu1); - totalram_pages++; - num_physpages++; - } - if (!cpu_isset(2, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu2)); - init_page_count(virt_to_page(trapbase_cpu2)); - free_page((unsigned long)trapbase_cpu2); - totalram_pages++; - num_physpages++; - } - if (!cpu_isset(3, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu3)); - init_page_count(virt_to_page(trapbase_cpu3)); - free_page((unsigned long)trapbase_cpu3); - totalram_pages++; - num_physpages++; - } - /* Ok, they are spinning and ready to go. */ } diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index f061c4dda9e..e7061138c98 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -121,7 +121,7 @@ SIGN2(sys32_syslog, sys_syslog, %o0, %o2) SIGN1(sys32_umask, sys_umask, %o0) SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) SIGN1(sys32_sendto, sys_sendto, %o0) -SIGN1(sys32_recvfrom, sys_recvfrom, %o0) +SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) SIGN2(sys32_connect, sys_connect, %o0, %o2) SIGN2(sys32_bind, sys_bind, %o0, %o2) @@ -134,10 +134,12 @@ SIGN1(sys32_getpeername, sys_getpeername, %o0) SIGN1(sys32_getsockname, sys_getsockname, %o0) SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) -SIGN2(sys32_splice, sys_splice, %o0, %o1) +SIGN2(sys32_splice, sys_splice, %o0, %o2) SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) SIGN2(sys32_tee, sys_tee, %o0, %o1) SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0) +SIGN1(sys32_truncate, sys_truncate, %o1) +SIGN1(sys32_ftruncate, sys_ftruncate, %o1) .globl sys32_mmap2 sys32_mmap2: diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index d28f496f466..ca39c606fe8 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -2,6 +2,7 @@ * * Copyright (C) 2007 David S. Miller <davem@davemloft.net> */ +#include <linux/sched.h> #include <linux/sysdev.h> #include <linux/cpu.h> #include <linux/smp.h> diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 69090165729..04181577cb6 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -82,5 +82,5 @@ sys_call_table: /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv -/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo +/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 6b3ee88e253..91b06b7f7ac 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -43,8 +43,8 @@ sys_call_table32: /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall .word sys32_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd /*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod - .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate -/*130*/ .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall + .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys32_truncate +/*130*/ .word sys32_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall .word sys_nis_syscall, sys32_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 /*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write @@ -83,7 +83,7 @@ sys_call_table32: /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv - .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo + .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open #endif /* CONFIG_COMPAT */ @@ -158,4 +158,4 @@ sys_call_table: /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv - .word sys_pwritev, sys_rt_tgsigqueueinfo + .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index fcbbd000ec0..866390feb68 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -171,12 +171,8 @@ SECTIONS } _end = . ; - /DISCARD/ : { - EXIT_TEXT - EXIT_DATA - *(.exitcall.exit) - } - STABS_DEBUG DWARF_DEBUG + + DISCARDS } |