summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cputable.c88
-rw-r--r--arch/powerpc/kernel/head_64.S48
-rw-r--r--arch/powerpc/kernel/iommu.c77
-rw-r--r--arch/powerpc/kernel/misc_32.S74
-rw-r--r--arch/powerpc/kernel/misc_64.S124
-rw-r--r--arch/powerpc/kernel/module_32.c39
-rw-r--r--arch/powerpc/kernel/module_64.c49
-rw-r--r--arch/powerpc/kernel/pci_32.c12
-rw-r--r--arch/powerpc/kernel/pci_64.c10
-rw-r--r--arch/powerpc/kernel/perfmon_fsl_booke.c221
-rw-r--r--arch/powerpc/kernel/pmc.c2
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom.c8
-rw-r--r--arch/powerpc/kernel/rtas_flash.c47
-rw-r--r--arch/powerpc/kernel/setup_32.c8
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/kernel/time.c105
-rw-r--r--arch/powerpc/kernel/traps.c21
-rw-r--r--arch/powerpc/kernel/vdso.c43
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S12
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S10
-rw-r--r--arch/powerpc/kernel/vio.c4
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S8
26 files changed, 393 insertions, 647 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 8b133afbdc2..7af23c43fd4 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
obj32-$(CONFIG_MODULES) += module_32.o
-obj-$(CONFIG_E500) += perfmon_fsl_booke.o
ifeq ($(CONFIG_PPC_MERGE),y)
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 995fcef156f..93f21aaf7c8 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -182,7 +182,7 @@ int btext_initialize(struct device_node *np)
prop = get_property(np, "linux,bootx-linebytes", NULL);
if (prop == NULL)
prop = get_property(np, "linebytes", NULL);
- if (prop)
+ if (prop && *prop != 0xffffffffu)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 47a613cdd77..bfd499ee375 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -18,6 +18,7 @@
#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
+#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
struct cpu_spec* cur_cpu_spec = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
@@ -73,7 +74,7 @@ extern void __restore_cpu_ppc970(void);
#define PPC_FEATURE_SPE_COMP 0
#endif
-struct cpu_spec cpu_specs[] = {
+static struct cpu_spec cpu_specs[] = {
#ifdef CONFIG_PPC64
{ /* Power3 */
.pvr_mask = 0xffff0000,
@@ -227,6 +228,21 @@ struct cpu_spec cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "ppc970",
},
+ { /* PPC970GX */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00450000,
+ .cpu_name = "PPC970GX",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .cpu_setup = __setup_cpu_ppc970,
+ .oprofile_cpu_type = "ppc64/970",
+ .oprofile_type = PPC_OPROFILE_POWER4,
+ .platform = "ppc970",
+ },
{ /* Power5 GR */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003a0000,
@@ -268,7 +284,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_user_features = COMMON_USER_POWER6,
.icache_bsize = 128,
.dcache_bsize = 128,
- .num_pmcs = 8,
+ .num_pmcs = 6,
.oprofile_cpu_type = "ppc64/power6",
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
@@ -1152,3 +1168,71 @@ struct cpu_spec cpu_specs[] = {
#endif /* !CLASSIC_PPC */
#endif /* CONFIG_PPC32 */
};
+
+struct cpu_spec *identify_cpu(unsigned long offset)
+{
+ struct cpu_spec *s = cpu_specs;
+ struct cpu_spec **cur = &cur_cpu_spec;
+ unsigned int pvr = mfspr(SPRN_PVR);
+ int i;
+
+ s = PTRRELOC(s);
+ cur = PTRRELOC(cur);
+
+ if (*cur != NULL)
+ return PTRRELOC(*cur);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++)
+ if ((pvr & s->pvr_mask) == s->pvr_value) {
+ *cur = cpu_specs + i;
+#ifdef CONFIG_PPC64
+ /* ppc64 expects identify_cpu to also call setup_cpu
+ * for that processor. I will consolidate that at a
+ * later time, for now, just use our friend #ifdef.
+ * we also don't need to PTRRELOC the function pointer
+ * on ppc64 as we are running at 0 in real mode.
+ */
+ if (s->cpu_setup) {
+ s->cpu_setup(offset, s);
+ }
+#endif /* CONFIG_PPC64 */
+ return s;
+ }
+ BUG();
+ return NULL;
+}
+
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+ struct fixup_entry {
+ unsigned long mask;
+ unsigned long value;
+ long start_off;
+ long end_off;
+ } *fcur, *fend;
+
+ fcur = fixup_start;
+ fend = fixup_end;
+
+ for (; fcur < fend; fcur++) {
+ unsigned int *pstart, *pend, *p;
+
+ if ((value & fcur->mask) == fcur->value)
+ continue;
+
+ /* These PTRRELOCs will disappear once the new scheme for
+ * modules and vdso is implemented
+ */
+ pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
+ pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
+
+ for (p = pstart; p < pend; p++) {
+ *p = 0x60000000u;
+ asm volatile ("dcbst 0, %0" : : "r" (p));
+ }
+ asm volatile ("sync" : : : "memory");
+ for (p = pstart; p < pend; p++)
+ asm volatile ("icbi 0,%0" : : "r" (p));
+ asm volatile ("sync; isync" : : : "memory");
+ }
+}
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 645c7f10fb2..e720729f3e5 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -487,7 +487,7 @@ BEGIN_FTR_SECTION
rlwimi r13,r12,16,0x20
mfcr r12
cmpwi r13,0x2c
- beq .do_stab_bolted_pSeries
+ beq do_stab_bolted_pSeries
mtcrf 0x80,r12
mfspr r12,SPRN_SPRG2
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
@@ -600,7 +600,7 @@ system_call_pSeries:
STD_EXCEPTION_PSERIES(., performance_monitor)
.align 7
-_GLOBAL(do_stab_bolted_pSeries)
+do_stab_bolted_pSeries:
mtcrf 0x80,r12
mfspr r12,SPRN_SPRG2
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
@@ -1046,7 +1046,7 @@ slb_miss_fault:
li r5,0
std r4,_DAR(r1)
std r5,_DSISR(r1)
- b .handle_page_fault
+ b handle_page_fault
unrecov_user_slb:
EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
@@ -1174,12 +1174,13 @@ program_check_common:
.globl fp_unavailable_common
fp_unavailable_common:
EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
- bne .load_up_fpu /* if from user, just load it up */
+ bne 1f /* if from user, just load it up */
bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
ENABLE_INTS
bl .kernel_fp_unavailable_exception
BUG_OPCODE
+1: b .load_up_fpu
.align 7
.globl altivec_unavailable_common
@@ -1279,10 +1280,10 @@ _GLOBAL(do_hash_page)
std r4,_DSISR(r1)
andis. r0,r4,0xa450 /* weird error? */
- bne- .handle_page_fault /* if not, try to insert a HPTE */
+ bne- handle_page_fault /* if not, try to insert a HPTE */
BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */
- bne- .do_ste_alloc /* If so handle it */
+ bne- do_ste_alloc /* If so handle it */
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
/*
@@ -1324,7 +1325,7 @@ BEGIN_FW_FTR_SECTION
* because ret_from_except_lite will check for and handle pending
* interrupts if necessary.
*/
- beq .ret_from_except_lite
+ beq 13f
/* For a hash failure, we don't bother re-enabling interrupts */
ble- 12f
@@ -1346,14 +1347,14 @@ BEGIN_FW_FTR_SECTION
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
/* Here we have a page fault that hash_page can't handle. */
-_GLOBAL(handle_page_fault)
+handle_page_fault:
ENABLE_INTS
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_page_fault
cmpdi r3,0
- beq+ .ret_from_except_lite
+ beq+ 13f
bl .save_nvgprs
mr r5,r3
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1370,12 +1371,14 @@ _GLOBAL(handle_page_fault)
bl .low_hash_fault
b .ret_from_except
+13: b .ret_from_except_lite
+
/* here we have a segment miss */
-_GLOBAL(do_ste_alloc)
+do_ste_alloc:
bl .ste_allocate /* try to insert stab entry */
cmpdi r3,0
- beq+ fast_exception_return
- b .handle_page_fault
+ bne- handle_page_fault
+ b fast_exception_return
/*
* r13 points to the PACA, r9 contains the saved CR,
@@ -1580,11 +1583,6 @@ _STATIC(__start_initialization_iSeries)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
- LOAD_REG_IMMEDIATE(r3,cpu_specs)
- LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
- li r5,0
- bl .identify_cpu
-
LOAD_REG_IMMEDIATE(r2,__toc_start)
addi r2,r2,0x4000
addi r2,r2,0x4000
@@ -1646,6 +1644,8 @@ _GLOBAL(__start_initialization_multiplatform)
cmpwi r0,0x3c /* 970FX */
beq 1f
cmpwi r0,0x44 /* 970MP */
+ beq 1f
+ cmpwi r0,0x45 /* 970GX */
bne 2f
1: bl .__cpu_preinit_ppc970
2:
@@ -1964,13 +1964,6 @@ _STATIC(start_here_multiplatform)
addi r2,r2,0x4000
add r2,r2,r26
- LOAD_REG_IMMEDIATE(r3, cpu_specs)
- add r3,r3,r26
- LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
- add r4,r4,r26
- mr r5,r26
- bl .identify_cpu
-
/* Do very early kernel initializations, including initial hash table,
* stab and slb setup before we turn on relocation. */
@@ -2000,13 +1993,6 @@ _STATIC(start_here_common)
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
- /* Apply the CPUs-specific fixups (nop out sections not relevant
- * to this CPU
- */
- li r3,0
- bl .do_cpu_ftr_fixups
- bl .do_fw_ftr_fixups
-
/* ptr to current */
LOAD_REG_IMMEDIATE(r4, init_task)
std r4,PACACURRENT(r13)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f88a2a675d9..ba6b7256084 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -47,6 +47,17 @@ static int novmerge = 0;
static int novmerge = 1;
#endif
+static inline unsigned long iommu_num_pages(unsigned long vaddr,
+ unsigned long slen)
+{
+ unsigned long npages;
+
+ npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
+ npages >>= IOMMU_PAGE_SHIFT;
+
+ return npages;
+}
+
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
@@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
}
entry += tbl->it_offset; /* Offset into real TCE table */
- ret = entry << PAGE_SHIFT; /* Set the return dma address */
+ ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
/* Put the TCEs in the HW table */
- ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
+ ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction);
@@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long entry, free_entry;
unsigned long i;
- entry = dma_addr >> PAGE_SHIFT;
+ entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
@@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
- DBG("mapping %d elements:\n", nelems);
+ DBG("sg mapping %d elements:\n", nelems);
spin_lock_irqsave(&(tbl->it_lock), flags);
@@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long)page_address(s->page) + s->offset;
- npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
+ npages = iommu_num_pages(vaddr, slen);
+ entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
- dma_addr = entry << PAGE_SHIFT;
- dma_addr |= s->offset;
+ dma_addr = entry << IOMMU_PAGE_SHIFT;
+ dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
- DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
+ DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
- ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
+ ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
/* If we are in an open segment, try merging */
if (segstart != s) {
@@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" can't merge, new segment.\n");
} else {
outs->dma_length += s->length;
- DBG(" merged, new len: %lx\n", outs->dma_length);
+ DBG(" merged, new len: %ux\n", outs->dma_length);
}
}
@@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s->dma_length != 0) {
unsigned long vaddr, npages;
- vaddr = s->dma_address & PAGE_MASK;
- npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
- >> PAGE_SHIFT;
+ vaddr = s->dma_address & IOMMU_PAGE_MASK;
+ npages = iommu_num_pages(s->dma_address, s->dma_length);
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
@@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sglist->dma_length == 0)
break;
- npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
- - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
+ npages = iommu_num_pages(dma_handle,sglist->dma_length);
__iommu_free(tbl, dma_handle, npages);
sglist++;
}
@@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
- npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
+ npages = iommu_num_pages(uaddr, size);
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
- mask >> PAGE_SHIFT, 0);
+ mask >> IOMMU_PAGE_SHIFT, 0);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
@@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
tbl, vaddr, npages);
}
} else
- dma_handle |= (uaddr & ~PAGE_MASK);
+ dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
}
return dma_handle;
@@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
+ unsigned int npages;
+
BUG_ON(direction == DMA_NONE);
- if (tbl)
- iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
- (dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
+ if (tbl) {
+ npages = iommu_num_pages(dma_handle, size);
+ iommu_free(tbl, dma_handle, npages);
+ }
}
/* Allocates a contiguous real buffer and creates mappings over it.
@@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
{
void *ret = NULL;
dma_addr_t mapping;
- unsigned int npages, order;
+ unsigned int order;
+ unsigned int nio_pages, io_order;
struct page *page;
size = PAGE_ALIGN(size);
- npages = size >> PAGE_SHIFT;
order = get_order(size);
/*
@@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
- mask >> PAGE_SHIFT, order);
+ nio_pages = size >> IOMMU_PAGE_SHIFT;
+ io_order = get_iommu_order(size);
+ mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
+ mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
@@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- unsigned int npages;
-
if (tbl) {
+ unsigned int nio_pages;
+
+ size = PAGE_ALIGN(size);
+ nio_pages = size >> IOMMU_PAGE_SHIFT;
+ iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
- npages = size >> PAGE_SHIFT;
- iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 88fd73fdf04..412bea3cf81 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -102,80 +102,6 @@ _GLOBAL(reloc_got2)
blr
/*
- * identify_cpu,
- * called with r3 = data offset and r4 = CPU number
- * doesn't change r3
- */
-_GLOBAL(identify_cpu)
- addis r8,r3,cpu_specs@ha
- addi r8,r8,cpu_specs@l
- mfpvr r7
-1:
- lwz r5,CPU_SPEC_PVR_MASK(r8)
- and r5,r5,r7
- lwz r6,CPU_SPEC_PVR_VALUE(r8)
- cmplw 0,r6,r5
- beq 1f
- addi r8,r8,CPU_SPEC_ENTRY_SIZE
- b 1b
-1:
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- sub r8,r8,r3
- stw r8,0(r6)
- blr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
- /* Get CPU 0 features */
- addis r6,r3,cur_cpu_spec@ha
- addi r6,r6,cur_cpu_spec@l
- lwz r4,0(r6)
- add r4,r4,r3
- lwz r4,CPU_SPEC_FEATURES(r4)
-
- /* Get the fixup table */
- addis r6,r3,__start___ftr_fixup@ha
- addi r6,r6,__start___ftr_fixup@l
- addis r7,r3,__stop___ftr_fixup@ha
- addi r7,r7,__stop___ftr_fixup@l
-
- /* Do the fixup */
-1: cmplw 0,r6,r7
- bgelr
- addi r6,r6,16
- lwz r8,-16(r6) /* mask */
- and r8,r8,r4
- lwz r9,-12(r6) /* value */
- cmplw 0,r8,r9
- beq 1b
- lwz r8,-8(r6) /* section begin */
- lwz r9,-4(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- add r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
- andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
- beq 2f
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-2: addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
-
-/*
* call_setup_cpu - call the setup_cpu function for this cpu
* r3 = data offset, r24 = cpu number
*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index c70e20708a1..21fd2c662a9 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,130 +246,6 @@ _GLOBAL(__flush_dcache_icache)
isync
blr
-/*
- * identify_cpu and calls setup_cpu
- * In: r3 = base of the cpu_specs array
- * r4 = address of cur_cpu_spec
- * r5 = relocation offset
- */
-_GLOBAL(identify_cpu)
- mfpvr r7
-1:
- lwz r8,CPU_SPEC_PVR_MASK(r3)
- and r8,r8,r7
- lwz r9,CPU_SPEC_PVR_VALUE(r3)
- cmplw 0,r9,r8
- beq 1f
- addi r3,r3,CPU_SPEC_ENTRY_SIZE
- b 1b
-1:
- sub r0,r3,r5
- std r0,0(r4)
- ld r4,CPU_SPEC_SETUP(r3)
- cmpdi 0,r4,0
- add r4,r4,r5
- beqlr
- ld r4,0(r4)
- add r4,r4,r5
- mtctr r4
- /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
- mr r4,r3
- mr r3,r5
- bctr
-
-/*
- * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
- * and writes nop's over sections of code that don't apply for this cpu.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_cpu_ftr_fixups)
- /* Get CPU 0 features */
- LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
- sub r6,r6,r3
- ld r4,0(r6)
- sub r4,r4,r3
- ld r4,CPU_SPEC_FEATURES(r4)
- /* Get the fixup table */
- LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
- sub r6,r6,r3
- LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
- sub r7,r7,r3
- /* Do the fixup */
-1: cmpld r6,r7
- bgelr
- addi r6,r6,32
- ld r8,-32(r6) /* mask */
- and r8,r8,r4
- ld r9,-24(r6) /* value */
- cmpld r8,r9
- beq 1b
- ld r8,-16(r6) /* section begin */
- ld r9,-8(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- sub r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
- andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
- beq 2f
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-2: addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
-
-/*
- * do_fw_ftr_fixups - goes through the list of firmware feature fixups
- * and writes nop's over sections of code that don't apply for this firmware.
- * r3 = data offset (not changed)
- */
-_GLOBAL(do_fw_ftr_fixups)
- /* Get firmware features */
- LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
- sub r6,r6,r3
- ld r4,0(r6)
- /* Get the fixup table */
- LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
- sub r6,r6,r3
- LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
- sub r7,r7,r3
- /* Do the fixup */
-1: cmpld r6,r7
- bgelr
- addi r6,r6,32
- ld r8,-32(r6) /* mask */
- and r8,r8,r4
- ld r9,-24(r6) /* value */
- cmpld r8,r9
- beq 1b
- ld r8,-16(r6) /* section begin */
- ld r9,-8(r6) /* section end */
- subf. r9,r8,r9
- beq 1b
- /* write nops over the section of code */
- /* todo: if large section, add a branch at the start of it */
- srwi r9,r9,2
- mtctr r9
- sub r8,r8,r3
- lis r0,0x60000000@h /* nop */
-3: stw r0,0(r8)
-BEGIN_FTR_SECTION
- dcbst 0,r8 /* suboptimal, but simpler */
- sync
- icbi 0,r8
-END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
- addi r8,r8,4
- bdnz 3b
- sync /* additional sync needed on g4 */
- isync
- b 1b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 92f4e5f64f0..e2c3c6a85f3 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -24,6 +24,8 @@
#include <linux/kernel.h>
#include <linux/cache.h>
+#include "setup.h"
+
#if 0
#define DEBUGP printk
#else
@@ -269,33 +271,50 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return 0;
}
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ char *secstrings;
+ unsigned int i;
+
+ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
+ return &sechdrs[i];
+ return NULL;
+}
+
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
- char *secstrings;
- unsigned int i;
+ const Elf_Shdr *sect;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
- secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
- continue;
- me->arch.bug_table = (void *) sechdrs[i].sh_addr;
- me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
- break;
+ sect = find_section(hdr, sechdrs, "__bug_table");
+ if (sect != NULL) {
+ me->arch.bug_table = (void *) sect->sh_addr;
+ me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
- /*
+ /*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&me->arch.bug_list, &module_bug_list);
+ /* Apply feature fixups */
+ sect = find_section(hdr, sechdrs, "__ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
return 0;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ba34001fca8..8dd1f0aae5d 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -22,6 +22,9 @@
#include <linux/vmalloc.h>
#include <asm/module.h>
#include <asm/uaccess.h>
+#include <asm/firmware.h>
+
+#include "setup.h"
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
@@ -400,6 +403,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
| (value & 0x03fffffc);
break;
+ case R_PPC64_REL64:
+ /* 64 bits relative (used by features fixups) */
+ *location = value - (unsigned long)location;
+ break;
+
default:
printk("%s: Unknown ADD relocation: %lu\n",
me->name,
@@ -413,23 +421,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
LIST_HEAD(module_bug_list);
-int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs, struct module *me)
+static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
{
char *secstrings;
unsigned int i;
+ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < hdr->e_shnum; i++)
+ if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
+ return &sechdrs[i];
+ return NULL;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs, struct module *me)
+{
+ const Elf_Shdr *sect;
+
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
- secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
- continue;
- me->arch.bug_table = (void *) sechdrs[i].sh_addr;
- me->arch.num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
- break;
+ sect = find_section(hdr, sechdrs, "__bug_table");
+ if (sect != NULL) {
+ me->arch.bug_table = (void *) sect->sh_addr;
+ me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
/*
@@ -439,6 +457,19 @@ int module_finalize(const Elf_Ehdr *hdr,
*/
list_add(&me->arch.bug_list, &module_bug_list);
+ /* Apply feature fixups */
+ sect = find_section(hdr, sechdrs, "__ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
+ sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(powerpc_firmware_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
return 0;
}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 9b49f8691d2..0d9ff72e285 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -441,14 +441,14 @@ update_bridge_base(struct pci_bus *bus, int i)
end = res->end - off;
io_base_lo = (start >> 8) & PCI_IO_RANGE_MASK;
io_limit_lo = (end >> 8) & PCI_IO_RANGE_MASK;
- if (end > 0xffff) {
- pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
- start >> 16);
- pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
- end >> 16);
+ if (end > 0xffff)
io_base_lo |= PCI_IO_RANGE_TYPE_32;
- } else
+ else
io_base_lo |= PCI_IO_RANGE_TYPE_16;
+ pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
+ start >> 16);
+ pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
+ end >> 16);
pci_write_config_byte(dev, PCI_IO_BASE, io_base_lo);
pci_write_config_byte(dev, PCI_IO_LIMIT, io_limit_lo);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 78d3c0fc8df..9bae8a5bf67 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -199,8 +199,14 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev)
pci_setup_pci_controller(phb);
phb->arch_data = dev;
phb->is_dynamic = mem_init_done;
- if (dev)
- PHB_SET_NODE(phb, of_node_to_nid(dev));
+ if (dev) {
+ int nid = of_node_to_nid(dev);
+
+ if (nid < 0 || !node_online(nid))
+ nid = -1;
+
+ PHB_SET_NODE(phb, nid);
+ }
return phb;
}
diff --git a/arch/powerpc/kernel/perfmon_fsl_booke.c b/arch/powerpc/kernel/perfmon_fsl_booke.c
deleted file mode 100644
index e0dcf2b41fb..00000000000
--- a/arch/powerpc/kernel/perfmon_fsl_booke.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/* arch/powerpc/kernel/perfmon_fsl_booke.c
- * Freescale Book-E Performance Monitor code
- *
- * Author: Andy Fleming
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/a.out.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/prctl.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/reg.h>
-#include <asm/xmon.h>
-#include <asm/pmc.h>
-
-static inline u32 get_pmlca(int ctr);
-static inline void set_pmlca(int ctr, u32 pmlca);
-
-static inline u32 get_pmlca(int ctr)
-{
- u32 pmlca;
-
- switch (ctr) {
- case 0:
- pmlca = mfpmr(PMRN_PMLCA0);
- break;
- case 1:
- pmlca = mfpmr(PMRN_PMLCA1);
- break;
- case 2:
- pmlca = mfpmr(PMRN_PMLCA2);
- break;
- case 3:
- pmlca = mfpmr(PMRN_PMLCA3);
- break;
- default:
- panic("Bad ctr number\n");
- }
-
- return pmlca;
-}
-
-static inline void set_pmlca(int ctr, u32 pmlca)
-{
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- break;
- default:
- panic("Bad ctr number\n");
- }
-}
-
-void init_pmc_stop(int ctr)
-{
- u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
- PMLCA_FCM1 | PMLCA_FCM0);
- u32 pmlcb = 0;
-
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- mtpmr(PMRN_PMLCB0, pmlcb);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- mtpmr(PMRN_PMLCB1, pmlcb);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- mtpmr(PMRN_PMLCB2, pmlcb);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- mtpmr(PMRN_PMLCB3, pmlcb);
- break;
- default:
- panic("Bad ctr number!\n");
- }
-}
-
-void set_pmc_event(int ctr, int event)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
- ((event << PMLCA_EVENT_SHIFT) &
- PMLCA_EVENT_MASK);
-
- set_pmlca(ctr, pmlca);
-}
-
-void set_pmc_user_kernel(int ctr, int user, int kernel)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- if(user)
- pmlca &= ~PMLCA_FCU;
- else
- pmlca |= PMLCA_FCU;
-
- if(kernel)
- pmlca &= ~PMLCA_FCS;
- else
- pmlca |= PMLCA_FCS;
-
- set_pmlca(ctr, pmlca);
-}
-
-void set_pmc_marked(int ctr, int mark0, int mark1)
-{
- u32 pmlca = get_pmlca(ctr);
-
- if(mark0)
- pmlca &= ~PMLCA_FCM0;
- else
- pmlca |= PMLCA_FCM0;
-
- if(mark1)
- pmlca &= ~PMLCA_FCM1;
- else
- pmlca |= PMLCA_FCM1;
-
- set_pmlca(ctr, pmlca);
-}
-
-void pmc_start_ctr(int ctr, int enable)
-{
- u32 pmlca = get_pmlca(ctr);
-
- pmlca &= ~PMLCA_FC;
-
- if (enable)
- pmlca |= PMLCA_CE;
- else
- pmlca &= ~PMLCA_CE;
-
- set_pmlca(ctr, pmlca);
-}
-
-void pmc_start_ctrs(int enable)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 &= ~PMGC0_FAC;
- pmgc0 |= PMGC0_FCECE;
-
- if (enable)
- pmgc0 |= PMGC0_PMIE;
- else
- pmgc0 &= ~PMGC0_PMIE;
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-void pmc_stop_ctrs(void)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 |= PMGC0_FAC;
-
- pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-void dump_pmcs(void)
-{
- printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
- printk("pmc\t\tpmlca\t\tpmlcb\n");
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
- mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
- mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
- mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
- printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
- mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
-}
-
-EXPORT_SYMBOL(init_pmc_stop);
-EXPORT_SYMBOL(set_pmc_event);
-EXPORT_SYMBOL(set_pmc_user_kernel);
-EXPORT_SYMBOL(set_pmc_marked);
-EXPORT_SYMBOL(pmc_start_ctr);
-EXPORT_SYMBOL(pmc_start_ctrs);
-EXPORT_SYMBOL(pmc_stop_ctrs);
-EXPORT_SYMBOL(dump_pmcs);
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index a0a2efadeab..3d8f6f44641 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -71,7 +71,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
}
pmc_owner_caller = __builtin_return_address(0);
- perf_irq = new_perf_irq ? : dummy_perf;
+ perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
out:
spin_unlock(&pmc_owner_lock);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7b2f6452ba7..f3d4dd580dd 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -341,13 +341,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
static int instructions_to_print = 16;
-#ifdef CONFIG_PPC64
-#define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
- (REGION_ID(pc) != VMALLOC_REGION_ID))
-#else
-#define BAD_PC(pc) ((pc) < KERNELBASE)
-#endif
-
static void show_instructions(struct pt_regs *regs)
{
int i;
@@ -366,7 +359,8 @@ static void show_instructions(struct pt_regs *regs)
* bad address because the pc *should* only be a
* kernel address.
*/
- if (BAD_PC(pc) || __get_user(instr, (unsigned int __user *)pc)) {
+ if (!__kernel_text_address(pc) ||
+ __get_user(instr, (unsigned int __user *)pc)) {
printk("XXXXXXXX ");
} else {
if (regs->nip == pc)
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 865b9648d0d..bdb412d4b74 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -1014,7 +1014,7 @@ EXPORT_SYMBOL(find_all_nodes);
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
-int device_is_compatible(struct device_node *device, const char *compat)
+int device_is_compatible(const struct device_node *device, const char *compat)
{
const char* cp;
int cplen, l;
@@ -1491,7 +1491,8 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-struct property *of_find_property(struct device_node *np, const char *name,
+struct property *of_find_property(const struct device_node *np,
+ const char *name,
int *lenp)
{
struct property *pp;
@@ -1512,7 +1513,8 @@ struct property *of_find_property(struct device_node *np, const char *name,
* Find a property with a given name for a given node
* and return the value.
*/
-const void *get_property(struct device_node *np, const char *name, int *lenp)
+const void *get_property(const struct device_node *np, const char *name,
+ int *lenp)
{
struct property *pp = of_find_property(np,name,lenp);
return pp ? pp->value : NULL;
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 1442b63a75d..6f6fc977cb3 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -72,6 +72,10 @@
#define VALIDATE_BUF_SIZE 4096
#define RTAS_MSG_MAXLEN 64
+/* Quirk - RTAS requires 4k list length and block size */
+#define RTAS_BLKLIST_LENGTH 4096
+#define RTAS_BLK_SIZE 4096
+
struct flash_block {
char *data;
unsigned long length;
@@ -83,7 +87,7 @@ struct flash_block {
* into a version/length and translate the pointers
* to absolute.
*/
-#define FLASH_BLOCKS_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct flash_block))
+#define FLASH_BLOCKS_PER_NODE ((RTAS_BLKLIST_LENGTH - 16) / sizeof(struct flash_block))
struct flash_block_list {
unsigned long num_blocks;
struct flash_block_list *next;
@@ -96,6 +100,9 @@ struct flash_block_list_header { /* just the header of flash_block_list */
static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
+/* Use slab cache to guarantee 4k alignment */
+static kmem_cache_t *flash_block_cache = NULL;
+
#define FLASH_BLOCK_LIST_VERSION (1UL)
/* Local copy of the flash block list.
@@ -153,7 +160,7 @@ static int flash_list_valid(struct flash_block_list *flist)
return FLASH_IMG_NULL_DATA;
}
block_size = f->blocks[i].length;
- if (block_size <= 0 || block_size > PAGE_SIZE) {
+ if (block_size <= 0 || block_size > RTAS_BLK_SIZE) {
return FLASH_IMG_BAD_LEN;
}
image_size += block_size;
@@ -177,9 +184,9 @@ static void free_flash_list(struct flash_block_list *f)
while (f) {
for (i = 0; i < f->num_blocks; i++)
- free_page((unsigned long)(f->blocks[i].data));
+ kmem_cache_free(flash_block_cache, f->blocks[i].data);
next = f->next;
- free_page((unsigned long)f);
+ kmem_cache_free(flash_block_cache, f);
f = next;
}
}
@@ -278,6 +285,12 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
return msglen;
}
+/* constructor for flash_block_cache */
+void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
+{
+ memset(ptr, 0, RTAS_BLK_SIZE);
+}
+
/* We could be much more efficient here. But to keep this function
* simple we allocate a page to the block list no matter how small the
* count is. If the system is low on memory it will be just as well
@@ -302,7 +315,7 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
* proc file
*/
if (uf->flist == NULL) {
- uf->flist = (struct flash_block_list *) get_zeroed_page(GFP_KERNEL);
+ uf->flist = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!uf->flist)
return -ENOMEM;
}
@@ -313,21 +326,21 @@ static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
next_free = fl->num_blocks;
if (next_free == FLASH_BLOCKS_PER_NODE) {
/* Need to allocate another block_list */
- fl->next = (struct flash_block_list *)get_zeroed_page(GFP_KERNEL);
+ fl->next = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!fl->next)
return -ENOMEM;
fl = fl->next;
next_free = 0;
}
- if (count > PAGE_SIZE)
- count = PAGE_SIZE;
- p = (char *)get_zeroed_page(GFP_KERNEL);
+ if (count > RTAS_BLK_SIZE)
+ count = RTAS_BLK_SIZE;
+ p = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!p)
return -ENOMEM;
if(copy_from_user(p, buffer, count)) {
- free_page((unsigned long)p);
+ kmem_cache_free(flash_block_cache, p);
return -EFAULT;
}
fl->blocks[next_free].data = p;
@@ -791,6 +804,16 @@ int __init rtas_flash_init(void)
goto cleanup;
rtas_flash_term_hook = rtas_flash_firmware;
+
+ flash_block_cache = kmem_cache_create("rtas_flash_cache",
+ RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+ rtas_block_ctor, NULL);
+ if (!flash_block_cache) {
+ printk(KERN_ERR "%s: failed to create block cache\n",
+ __FUNCTION__);
+ rc = -ENOMEM;
+ goto cleanup;
+ }
return 0;
cleanup:
@@ -805,6 +828,10 @@ cleanup:
void __exit rtas_flash_cleanup(void)
{
rtas_flash_term_hook = NULL;
+
+ if (flash_block_cache)
+ kmem_cache_destroy(flash_block_cache);
+
remove_flash_pde(firmware_flash_pde);
remove_flash_pde(firmware_update_pde);
remove_flash_pde(validate_pde);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 191d0ab0922..a4c2964a3ca 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -91,6 +91,7 @@ int ucache_bsize;
unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long offset = reloc_offset();
+ struct cpu_spec *spec;
/* First zero the BSS -- use memset_io, some platforms don't have
* caches on yet */
@@ -100,8 +101,11 @@ unsigned long __init early_init(unsigned long dt_ptr)
* Identify the CPU type and fix up code sections
* that depend on which cpu we have.
*/
- identify_cpu(offset, 0);
- do_cpu_ftr_fixups(offset);
+ spec = identify_cpu(offset);
+
+ do_feature_fixups(spec->cpu_features,
+ PTRRELOC(&__start___ftr_fixup),
+ PTRRELOC(&__stop___ftr_fixup));
return KERNELBASE + offset;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4b2e32eab9d..16278968dab 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -170,6 +170,9 @@ void __init setup_paca(int cpu)
void __init early_setup(unsigned long dt_ptr)
{
+ /* Identify CPU type */
+ identify_cpu(0);
+
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
setup_paca(0);
@@ -348,6 +351,14 @@ void __init setup_system(void)
{
DBG(" -> setup_system()\n");
+ /* Apply the CPUs-specific and firmware specific fixups to kernel
+ * text (nop out sections not relevant to this CPU or this firmware)
+ */
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ &__start___ftr_fixup, &__stop___ftr_fixup);
+ do_feature_fixups(powerpc_firmware_features,
+ &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
+
/*
* Unflatten the device-tree passed by prom_init or kexec
*/
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5b59bc18dfe..46a24de36fe 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -220,11 +220,8 @@ static void account_process_time(struct pt_regs *regs)
*/
struct cpu_purr_data {
int initialized; /* thread is running */
- u64 tb0; /* timebase at origin time */
- u64 purr0; /* PURR at origin time */
u64 tb; /* last TB value read */
u64 purr; /* last PURR value read */
- u64 stolen; /* stolen time so far */
spinlock_t lock;
};
@@ -234,10 +231,8 @@ static void snapshot_tb_and_purr(void *data)
{
struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
- p->tb0 = mftb();
- p->purr0 = mfspr(SPRN_PURR);
- p->tb = p->tb0;
- p->purr = 0;
+ p->tb = mftb();
+ p->purr = mfspr(SPRN_PURR);
wmb();
p->initialized = 1;
}
@@ -258,37 +253,24 @@ void snapshot_timebases(void)
void calculate_steal_time(void)
{
- u64 tb, purr, t0;
+ u64 tb, purr;
s64 stolen;
- struct cpu_purr_data *p0, *pme, *phim;
- int cpu;
+ struct cpu_purr_data *pme;
if (!cpu_has_feature(CPU_FTR_PURR))
return;
- cpu = smp_processor_id();
- pme = &per_cpu(cpu_purr_data, cpu);
+ pme = &per_cpu(cpu_purr_data, smp_processor_id());
if (!pme->initialized)
return; /* this can happen in early boot */
- p0 = &per_cpu(cpu_purr_data, cpu & ~1);
- phim = &per_cpu(cpu_purr_data, cpu ^ 1);
- spin_lock(&p0->lock);
+ spin_lock(&pme->lock);
tb = mftb();
- purr = mfspr(SPRN_PURR) - pme->purr0;
- if (!phim->initialized || !cpu_online(cpu ^ 1)) {
- stolen = (tb - pme->tb) - (purr - pme->purr);
- } else {
- t0 = pme->tb0;
- if (phim->tb0 < t0)
- t0 = phim->tb0;
- stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
- }
- if (stolen > 0) {
+ purr = mfspr(SPRN_PURR);
+ stolen = (tb - pme->tb) - (purr - pme->purr);
+ if (stolen > 0)
account_steal_time(current, stolen);
- p0->stolen += stolen;
- }
pme->tb = tb;
pme->purr = purr;
- spin_unlock(&p0->lock);
+ spin_unlock(&pme->lock);
}
/*
@@ -297,30 +279,17 @@ void calculate_steal_time(void)
*/
static void snapshot_purr(void)
{
- int cpu;
- u64 purr;
- struct cpu_purr_data *p0, *pme, *phim;
+ struct cpu_purr_data *pme;
unsigned long flags;
if (!cpu_has_feature(CPU_FTR_PURR))
return;
- cpu = smp_processor_id();
- pme = &per_cpu(cpu_purr_data, cpu);
- p0 = &per_cpu(cpu_purr_data, cpu & ~1);
- phim = &per_cpu(cpu_purr_data, cpu ^ 1);
- spin_lock_irqsave(&p0->lock, flags);
- pme->tb = pme->tb0 = mftb();
- purr = mfspr(SPRN_PURR);
- if (!phim->initialized) {
- pme->purr = 0;
- pme->purr0 = purr;
- } else {
- /* set p->purr and p->purr0 for no change in p0->stolen */
- pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
- pme->purr0 = purr - pme->purr;
- }
+ pme = &per_cpu(cpu_purr_data, smp_processor_id());
+ spin_lock_irqsave(&pme->lock, flags);
+ pme->tb = mftb();
+ pme->purr = mfspr(SPRN_PURR);
pme->initialized = 1;
- spin_unlock_irqrestore(&p0->lock, flags);
+ spin_unlock_irqrestore(&pme->lock, flags);
}
#endif /* CONFIG_PPC_SPLPAR */
@@ -1045,48 +1014,6 @@ void __init time_init(void)
set_dec(tb_ticks_per_jiffy);
}
-#ifdef CONFIG_RTC_CLASS
-static int set_rtc_class_time(struct rtc_time *tm)
-{
- int err;
- struct class_device *class_dev =
- rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
-
- if (class_dev == NULL)
- return -ENODEV;
-
- err = rtc_set_time(class_dev, tm);
-
- rtc_class_close(class_dev);
-
- return 0;
-}
-
-static void get_rtc_class_time(struct rtc_time *tm)
-{
- int err;
- struct class_device *class_dev =
- rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
-
- if (class_dev == NULL)
- return;
-
- err = rtc_read_time(class_dev, tm);
-
- rtc_class_close(class_dev);
-
- return;
-}
-
-int __init rtc_class_hookup(void)
-{
- ppc_md.get_rtc_time = get_rtc_class_time;
- ppc_md.set_rtc_time = set_rtc_class_time;
-
- return 0;
-}
-#endif /* CONFIG_RTC_CLASS */
-
#define FEBRUARY 2
#define STARTOFTIME 1970
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d9f10f2fc37..c66b4771ef4 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -843,7 +843,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs)
{
- int fixed = 0;
+ int sig, code, fixed = 0;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
@@ -857,14 +857,16 @@ void alignment_exception(struct pt_regs *regs)
/* Operand address was bad */
if (fixed == -EFAULT) {
- if (user_mode(regs))
- _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
- else
- /* Search exception table */
- bad_page_fault(regs, regs->dar, SIGSEGV);
- return;
+ sig = SIGSEGV;
+ code = SEGV_ACCERR;
+ } else {
+ sig = SIGBUS;
+ code = BUS_ADRALN;
}
- _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
+ if (user_mode(regs))
+ _exception(sig, regs, code, regs->dar);
+ else
+ bad_page_fault(regs, regs->dar, sig);
}
void StackOverflow(struct pt_regs *regs)
@@ -900,14 +902,13 @@ void kernel_fp_unavailable_exception(struct pt_regs *regs)
void altivec_unavailable_exception(struct pt_regs *regs)
{
-#if !defined(CONFIG_ALTIVEC)
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
return;
}
-#endif
+
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 1a7e19cdab3..c913ad5cad2 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -36,6 +36,8 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
+#include "setup.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -586,6 +588,43 @@ static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
return 0;
}
+
+static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
+ struct lib64_elfinfo *v64)
+{
+ void *start32;
+ unsigned long size32;
+
+#ifdef CONFIG_PPC64
+ void *start64;
+ unsigned long size64;
+
+ start64 = find_section64(v64->hdr, "__ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ start64, start64 + size64);
+
+ start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(powerpc_firmware_features,
+ start64, start64 + size64);
+#endif /* CONFIG_PPC64 */
+
+ start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(cur_cpu_spec->cpu_features,
+ start32, start32 + size32);
+
+#ifdef CONFIG_PPC64
+ start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(powerpc_firmware_features,
+ start32, start32 + size32);
+#endif /* CONFIG_PPC64 */
+
+ return 0;
+}
+
static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
@@ -634,6 +673,9 @@ static __init int vdso_setup(void)
if (vdso_fixup_datapage(&v32, &v64))
return -1;
+ if (vdso_fixup_features(&v32, &v64))
+ return -1;
+
if (vdso_fixup_alt_funcs(&v32, &v64))
return -1;
@@ -714,6 +756,7 @@ void __init vdso_init(void)
* Setup the syscall map in the vDOS
*/
vdso_setup_syscall_map();
+
/*
* Initialize the vDSO images in memory, that is do necessary
* fixups of vDSO symbols, locate trampolines, etc...
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 6187af2d54c..26e138c4ce1 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -32,6 +32,18 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
+ . = ALIGN(8);
+ __ftr_fixup : {
+ *(__ftr_fixup)
+ }
+
+#ifdef CONFIG_PPC64
+ . = ALIGN(8);
+ __fw_ftr_fixup : {
+ *(__fw_ftr_fixup)
+ }
+#endif
+
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index 56e76ff5498..40ffd9b6cef 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -229,8 +229,10 @@ V_FUNCTION_BEGIN(__do_get_xsec)
xor r0,r8,r8 /* create dependency */
add r3,r3,r0
- /* Get TB & offset it */
- mftb r7
+ /* Get TB & offset it. We use the MFTB macro which will generate
+ * workaround code for Cell.
+ */
+ MFTB(r7)
ld r9,CFG_TB_ORIG_STAMP(r3)
subf r7,r9,r7
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4a2b6dc0960..2d70f35d50b 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -31,6 +31,16 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
+ . = ALIGN(8);
+ __ftr_fixup : {
+ *(__ftr_fixup)
+ }
+
+ . = ALIGN(8);
+ __fw_ftr_fixup : {
+ *(__fw_ftr_fixup)
+ }
+
/* Other stuff is appended to the text segment: */
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index cb87e71eec6..ed007878d1b 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -92,9 +92,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
&tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
- tbl->it_size = size >> PAGE_SHIFT;
+ tbl->it_size = size >> IOMMU_PAGE_SHIFT;
/* offset for VIO should always be 0 */
- tbl->it_offset = offset >> PAGE_SHIFT;
+ tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index cb0e8d46c3e..e8342d86753 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -108,13 +108,7 @@ SECTIONS
.initcall.init : {
__initcall_start = .;
- *(.initcall1.init)
- *(.initcall2.init)
- *(.initcall3.init)
- *(.initcall4.init)
- *(.initcall5.init)
- *(.initcall6.init)
- *(.initcall7.init)
+ INITCALLS
__initcall_end = .;
}