summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c7
-rw-r--r--arch/powerpc/kernel/cacheinfo.c10
-rw-r--r--arch/powerpc/kernel/dma-iommu.c4
-rw-r--r--arch/powerpc/kernel/head_64.S9
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S8
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/lparcfg.c10
-rw-r--r--arch/powerpc/kernel/machine_kexec.c25
-rw-r--r--arch/powerpc/kernel/pci-common.c5
-rw-r--r--arch/powerpc/kernel/pci_64.c6
-rw-r--r--arch/powerpc/kernel/prom.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/kernel/vio.c7
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S4
14 files changed, 81 insertions, 37 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9937fe44555..19ee491e9e2 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -56,6 +56,10 @@
#include "head_booke.h"
#endif
+#if defined(CONFIG_FSL_BOOKE)
+#include "../mm/mmu_decl.h"
+#endif
+
int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -382,6 +386,9 @@ int main(void)
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
+#ifdef CONFIG_FSL_BOOKE
+ DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
+#endif
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index b33f0417a4b..bb37b1d19a5 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -113,7 +113,7 @@ struct cache {
struct cache *next_local; /* next cache of >= level */
};
-static DEFINE_PER_CPU(struct cache_dir *, cache_dir);
+static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
/* traversal/modification of this list occurs only at cpu hotplug time;
* access is serialized by cpu hotplug locking
@@ -468,9 +468,9 @@ static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_i
cache_dir->kobj = kobj;
- WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL);
+ WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
- per_cpu(cache_dir, cpu_id) = cache_dir;
+ per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
return cache_dir;
err:
@@ -820,13 +820,13 @@ void cacheinfo_cpu_offline(unsigned int cpu_id)
/* Prevent userspace from seeing inconsistent state - remove
* the sysfs hierarchy first */
- cache_dir = per_cpu(cache_dir, cpu_id);
+ cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
/* careful, sysfs population may have failed */
if (cache_dir)
remove_cache_dir(cache_dir);
- per_cpu(cache_dir, cpu_id) = NULL;
+ per_cpu(cache_dir_pcpu, cpu_id) = NULL;
/* clear the CPU's bit in its cache chain, possibly freeing
* cache objects */
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 14183af1b3f..2983adac8cc 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -79,10 +79,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
"Warning: IOMMU offset too big for device mask\n");
if (tbl)
printk(KERN_INFO
- "mask: 0x%08lx, table offset: 0x%08lx\n",
+ "mask: 0x%08llx, table offset: 0x%08lx\n",
mask, tbl->it_offset);
else
- printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
+ printk(KERN_INFO "mask: 0x%08llx, table unavailable\n",
mask);
return 0;
} else
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b4bcf5a930f..ebaedafc8e6 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -1518,6 +1518,15 @@ _GLOBAL(pmac_secondary_start)
/* turn on 64-bit mode */
bl .enable_64b_mode
+ li r0,0
+ mfspr r3,SPRN_HID4
+ rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
+ sync
+ mtspr SPRN_HID4,r3
+ isync
+ sync
+ slbia
+
/* get TOC pointer (real address) */
bl .relative_toc
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 11b549acc03..36ffb3504a4 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -389,10 +389,6 @@ skpinv: addi r6,r6,1 /* Increment */
#endif
#endif
- mfspr r3,SPRN_TLB1CFG
- andi. r3,r3,0xfff
- lis r4,num_tlbcam_entries@ha
- stw r3,num_tlbcam_entries@l(r4)
/*
* Decide what sort of machine this is and initialize the MMU.
*/
@@ -711,7 +707,7 @@ interrupt_base:
EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
#ifdef CONFIG_PPC_E500MC
- EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2070, Doorbell, unknown_exception, EXC_XFER_STD)
#endif
/* Debug Interrupt */
@@ -909,7 +905,7 @@ KernelSPE:
_GLOBAL(loadcam_entry)
lis r4,TLBCAM@ha
addi r4,r4,TLBCAM@l
- mulli r5,r3,20
+ mulli r5,r3,TLBCAM_SIZE
add r3,r5,r4
lwz r4,0(r3)
mtspr SPRN_MAS0,r4
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 1bfa706b96e..fd51578e29d 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -239,12 +239,12 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_free: invalid entry\n");
printk(KERN_INFO "\tentry = 0x%lx\n", entry);
- printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
- printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
- printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
- printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
- printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
- printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
+ printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
+ printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
+ printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
+ printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
+ printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
+ printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
WARN_ON(1);
}
return;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d051e8cbcd0..182e0f642f3 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -240,7 +240,7 @@ static void parse_ppp_data(struct seq_file *m)
if (rc)
return;
- seq_printf(m, "partition_entitled_capacity=%ld\n",
+ seq_printf(m, "partition_entitled_capacity=%lld\n",
ppp_data.entitlement);
seq_printf(m, "group=%d\n", ppp_data.group_num);
seq_printf(m, "system_active_processors=%d\n",
@@ -265,7 +265,7 @@ static void parse_ppp_data(struct seq_file *m)
ppp_data.unallocated_weight);
seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
seq_printf(m, "capped=%d\n", ppp_data.capped);
- seq_printf(m, "unallocated_capacity=%ld\n",
+ seq_printf(m, "unallocated_capacity=%lld\n",
ppp_data.unallocated_entitlement);
}
@@ -509,10 +509,10 @@ static ssize_t update_ppp(u64 *entitlement, u8 *weight)
} else
return -EINVAL;
- pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
+ pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
__func__, ppp_data.entitlement, ppp_data.weight);
- pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
@@ -558,7 +558,7 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight)
pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__func__, mpp_data.entitled_mem, mpp_data.mem_weight);
- pr_debug("%s: new_entitled = %lu, new_weight = %u\n",
+ pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index b3abebb7ee6..d59e2b1bdcb 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -93,10 +93,35 @@ void __init reserve_crashkernel(void)
KDUMP_KERNELBASE);
crashk_res.start = KDUMP_KERNELBASE;
+#else
+ if (!crashk_res.start) {
+ /*
+ * unspecified address, choose a region of specified size
+ * can overlap with initrd (ignoring corruption when retained)
+ * ppc64 requires kernel and some stacks to be in first segemnt
+ */
+ crashk_res.start = KDUMP_KERNELBASE;
+ }
+
+ crash_base = PAGE_ALIGN(crashk_res.start);
+ if (crash_base != crashk_res.start) {
+ printk("Crash kernel base must be aligned to 0x%lx\n",
+ PAGE_SIZE);
+ crashk_res.start = crash_base;
+ }
+
#endif
crash_size = PAGE_ALIGN(crash_size);
crashk_res.end = crashk_res.start + crash_size - 1;
+ /* The crash region must not overlap the current kernel */
+ if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
+ printk(KERN_WARNING
+ "Crash kernel can not overlap current kernel\n");
+ crashk_res.start = crashk_res.end = 0;
+ return;
+ }
+
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index da5a3855a0c..19b12d2cbb4 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -16,8 +16,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#define DEBUG
-
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -258,7 +256,8 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
- oirq.controller->full_name);
+ oirq.controller ? oirq.controller->full_name :
+ "<default>");
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 586962f65c2..ea8eda8c87c 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -470,7 +470,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
if (bus->self) {
pr_debug("IO mapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
- pr_debug(" virt=0x%016lx...0x%016lx\n",
+ pr_debug(" virt=0x%016llx...0x%016llx\n",
bus->resource[0]->start + _IO_BASE,
bus->resource[0]->end + _IO_BASE);
return 0;
@@ -502,7 +502,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_base_phys - phys_page);
pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
- pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+ pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
hose->pci_io_size, size_page);
@@ -517,7 +517,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
- pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n",
+ pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n",
hose->io_resource.start, hose->io_resource.end);
return 0;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index c09cffafb6e..f00f83109ab 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -590,6 +590,11 @@ static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
+ slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
+ if (slb_size_ptr != NULL) {
+ mmu_slb_size = *slb_size_ptr;
+ return;
+ }
slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
if (slb_size_ptr != NULL) {
mmu_slb_size = *slb_size_ptr;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d8bd2161e73..2d34196bba8 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -434,8 +434,8 @@ void __init setup_system(void)
printk("Starting Linux PPC64 %s\n", init_utsname()->version);
printk("-----------------------------------------------------\n");
- printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
- printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size());
+ printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
+ printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size());
if (ppc64_caches.dline_size != 0x80)
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
@@ -493,7 +493,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
*/
- limit = min(0x10000000UL, lmb.rmo_size);
+ limit = min(0x10000000ULL, lmb.rmo_size);
for_each_possible_cpu(i) {
unsigned long sp;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 94aa7b011b2..d3694498f3a 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -492,14 +492,14 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
- if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
+ if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
atomic_inc(&viodev->cmo.allocs_failed);
return NULL;
}
ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
if (unlikely(ret == NULL)) {
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
@@ -513,7 +513,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
- vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
+ vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
@@ -572,6 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(!ret)) {
vio_cmo_dealloc(viodev, alloc_size);
atomic_inc(&viodev->cmo.allocs_failed);
+ return ret;
}
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 47bf15cd2c9..161b9b9691f 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -87,7 +87,9 @@ SECTIONS
/* The dummy segment contents for the bug workaround mentioned above
near PHDRS. */
.dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
- LONG(0xf177)
+ LONG(0)
+ LONG(0)
+ LONG(0)
} :kernel :dummy
/*