diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/gup.c | 37 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 12 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 161 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 9 | ||||
-rw-r--r-- | arch/powerpc/mm/subpage-prot.c | 4 |
9 files changed, 144 insertions, 91 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 8726779e140..76d8e7cc780 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -443,8 +443,12 @@ good_area: regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { + u32 page_ins; + preempt_disable(); - get_lppaca()->page_ins += (1 << PAGE_FACTOR); + page_ins = be32_to_cpu(get_lppaca()->page_ins); + page_ins += 1 << PAGE_FACTOR; + get_lppaca()->page_ins = cpu_to_be32(page_ins); preempt_enable(); } #endif /* CONFIG_PPC_SMLPAR */ diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index 49822d90ea9..6936547018b 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c @@ -117,8 +117,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } -int get_user_pages_fast(unsigned long start, int nr_pages, int write, - struct page **pages) +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) { struct mm_struct *mm = current->mm; unsigned long addr, len, end; @@ -135,7 +135,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, start, len))) - goto slow_irqon; + return 0; pr_devel(" aligned: %lx .. %lx\n", start, end); @@ -166,30 +166,35 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, (void *)pgd_val(pgd)); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) - goto slow; + break; if (pgd_huge(pgd)) { if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next, write, pages, &nr)) - goto slow; + break; } else if (is_hugepd(pgdp)) { if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT, addr, next, write, pages, &nr)) - goto slow; + break; } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) - goto slow; + break; } while (pgdp++, addr = next, addr != end); local_irq_enable(); - VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT); return nr; +} - { - int ret; +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages) +{ + struct mm_struct *mm = current->mm; + int nr, ret; + + start &= PAGE_MASK; + nr = __get_user_pages_fast(start, nr_pages, write, pages); + ret = nr; -slow: - local_irq_enable(); -slow_irqon: + if (nr < nr_pages) { pr_devel(" slow path ! nr = %d\n", nr); /* Try to get the remaining pages with get_user_pages */ @@ -198,7 +203,7 @@ slow_irqon: down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, start, - (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); + nr_pages - nr, write, 0, pages, NULL); up_read(&mm->mmap_sem); /* Have to be a bit careful with return values */ @@ -208,9 +213,9 @@ slow_irqon: else ret += nr; } - - return ret; } + + return ret; } #endif /* __HAVE_ARCH_PTE_SPECIAL */ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 3f0c30ae479..c33d939120c 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -43,6 +43,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) { unsigned long va; unsigned int penc; + unsigned long sllp; /* * We need 14 to 65 bits of va for a tlibe of 4K page @@ -64,7 +65,9 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) /* clear out bits after (52) [0....52.....63] */ va &= ~((1ul << (64 - 52)) - 1); va |= ssize << 8; - va |= mmu_psize_defs[apsize].sllp << 6; + sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | + ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); + va |= sllp << 5; asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) : "memory"); @@ -98,6 +101,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) { unsigned long va; unsigned int penc; + unsigned long sllp; /* VPN_SHIFT can be atmost 12 */ va = vpn << VPN_SHIFT; @@ -113,7 +117,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) /* clear out bits after(52) [0....52.....63] */ va &= ~((1ul << (64 - 52)) - 1); va |= ssize << 8; - va |= mmu_psize_defs[apsize].sllp << 6; + sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | + ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); + va |= sllp << 5; asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" : : "r"(va) : "memory"); break; @@ -554,6 +560,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, seg_off |= vpi << shift; } *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; + break; case MMU_SEGSIZE_1T: /* We only have 40 - 23 bits of seg_off in avpn */ seg_off = (avpn & 0x1ffff) << 23; @@ -563,6 +570,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, seg_off |= vpi << shift; } *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; + break; default: *vpn = size = 0; } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 6ecc38bd5b2..bde8b558975 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -907,7 +907,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea) if (ea >= spt->maxaddr) return 0; - if (ea < 0x100000000) { + if (ea < 0x100000000UL) { /* addresses below 4GB use spt->low_prot */ sbpm = spt->low_prot; } else { diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 01e2db97a21..d47d3dab487 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -52,7 +52,7 @@ #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) /* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */ #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) -#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" +#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_KERNEL_START" #endif #endif #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 7f4bea16202..1cf9c5b67f2 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -514,7 +514,7 @@ static int add_system_ram_resources(void) res->name = "System RAM"; res->start = base; res->end = base + size - 1; - res->flags = IORESOURCE_MEM; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; WARN_ON(request_resource(&iomem_resource, res) < 0); } } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 08397217e8a..c916127f10c 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -27,6 +27,7 @@ #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/slab.h> +#include <asm/cputhreads.h> #include <asm/sparsemem.h> #include <asm/prom.h> #include <asm/smp.h> @@ -57,7 +58,7 @@ static int form1_affinity; #define MAX_DISTANCE_REF_POINTS 4 static int distance_ref_points_depth; -static const unsigned int *distance_ref_points; +static const __be32 *distance_ref_points; static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; /* @@ -178,7 +179,7 @@ static void unmap_cpu_from_node(unsigned long cpu) #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ /* must hold reference to node during call */ -static const int *of_get_associativity(struct device_node *dev) +static const __be32 *of_get_associativity(struct device_node *dev) { return of_get_property(dev, "ibm,associativity", NULL); } @@ -188,9 +189,9 @@ static const int *of_get_associativity(struct device_node *dev) * it exists (the property exists only in kexec/kdump kernels, * added by kexec-tools) */ -static const u32 *of_get_usable_memory(struct device_node *memory) +static const __be32 *of_get_usable_memory(struct device_node *memory) { - const u32 *prop; + const __be32 *prop; u32 len; prop = of_get_property(memory, "linux,drconf-usable-memory", &len); if (!prop || len < sizeof(unsigned int)) @@ -218,7 +219,7 @@ int __node_distance(int a, int b) } static void initialize_distance_lookup_table(int nid, - const unsigned int *associativity) + const __be32 *associativity) { int i; @@ -226,29 +227,32 @@ static void initialize_distance_lookup_table(int nid, return; for (i = 0; i < distance_ref_points_depth; i++) { - distance_lookup_table[nid][i] = - associativity[distance_ref_points[i]]; + const __be32 *entry; + + entry = &associativity[be32_to_cpu(distance_ref_points[i])]; + distance_lookup_table[nid][i] = of_read_number(entry, 1); } } /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa * info is found. */ -static int associativity_to_nid(const unsigned int *associativity) +static int associativity_to_nid(const __be32 *associativity) { int nid = -1; if (min_common_depth == -1) goto out; - if (associativity[0] >= min_common_depth) - nid = associativity[min_common_depth]; + if (of_read_number(associativity, 1) >= min_common_depth) + nid = of_read_number(&associativity[min_common_depth], 1); /* POWER4 LPAR uses 0xffff as invalid node */ if (nid == 0xffff || nid >= MAX_NUMNODES) nid = -1; - if (nid > 0 && associativity[0] >= distance_ref_points_depth) + if (nid > 0 && + of_read_number(associativity, 1) >= distance_ref_points_depth) initialize_distance_lookup_table(nid, associativity); out: @@ -261,7 +265,7 @@ out: static int of_node_to_nid_single(struct device_node *device) { int nid = -1; - const unsigned int *tmp; + const __be32 *tmp; tmp = of_get_associativity(device); if (tmp) @@ -333,7 +337,7 @@ static int __init find_min_common_depth(void) } if (form1_affinity) { - depth = distance_ref_points[0]; + depth = of_read_number(distance_ref_points, 1); } else { if (distance_ref_points_depth < 2) { printk(KERN_WARNING "NUMA: " @@ -341,7 +345,7 @@ static int __init find_min_common_depth(void) goto err; } - depth = distance_ref_points[1]; + depth = of_read_number(&distance_ref_points[1], 1); } /* @@ -375,12 +379,12 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) of_node_put(memory); } -static unsigned long read_n_cells(int n, const unsigned int **buf) +static unsigned long read_n_cells(int n, const __be32 **buf) { unsigned long result = 0; while (n--) { - result = (result << 32) | **buf; + result = (result << 32) | of_read_number(*buf, 1); (*buf)++; } return result; @@ -390,17 +394,17 @@ static unsigned long read_n_cells(int n, const unsigned int **buf) * Read the next memblock list entry from the ibm,dynamic-memory property * and return the information in the provided of_drconf_cell structure. */ -static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) +static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp) { - const u32 *cp; + const __be32 *cp; drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); cp = *cellp; - drmem->drc_index = cp[0]; - drmem->reserved = cp[1]; - drmem->aa_index = cp[2]; - drmem->flags = cp[3]; + drmem->drc_index = of_read_number(cp, 1); + drmem->reserved = of_read_number(&cp[1], 1); + drmem->aa_index = of_read_number(&cp[2], 1); + drmem->flags = of_read_number(&cp[3], 1); *cellp = cp + 4; } @@ -412,16 +416,16 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) * list entries followed by N memblock list entries. Each memblock list entry * contains information as laid out in the of_drconf_cell struct above. */ -static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) +static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm) { - const u32 *prop; + const __be32 *prop; u32 len, entries; prop = of_get_property(memory, "ibm,dynamic-memory", &len); if (!prop || len < sizeof(unsigned int)) return 0; - entries = *prop++; + entries = of_read_number(prop++, 1); /* Now that we know the number of entries, revalidate the size * of the property read in to ensure we have everything @@ -439,7 +443,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) */ static u64 of_get_lmb_size(struct device_node *memory) { - const u32 *prop; + const __be32 *prop; u32 len; prop = of_get_property(memory, "ibm,lmb-size", &len); @@ -452,7 +456,7 @@ static u64 of_get_lmb_size(struct device_node *memory) struct assoc_arrays { u32 n_arrays; u32 array_sz; - const u32 *arrays; + const __be32 *arrays; }; /* @@ -468,15 +472,15 @@ struct assoc_arrays { static int of_get_assoc_arrays(struct device_node *memory, struct assoc_arrays *aa) { - const u32 *prop; + const __be32 *prop; u32 len; prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); if (!prop || len < 2 * sizeof(unsigned int)) return -1; - aa->n_arrays = *prop++; - aa->array_sz = *prop++; + aa->n_arrays = of_read_number(prop++, 1); + aa->array_sz = of_read_number(prop++, 1); /* Now that we know the number of arrays and size of each array, * revalidate the size of the property read in. @@ -503,7 +507,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, !(drmem->flags & DRCONF_MEM_AI_INVALID) && drmem->aa_index < aa->n_arrays) { index = drmem->aa_index * aa->array_sz + min_common_depth - 1; - nid = aa->arrays[index]; + nid = of_read_number(&aa->arrays[index], 1); if (nid == 0xffff || nid >= MAX_NUMNODES) nid = default_nid; @@ -594,7 +598,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, * Reads the counter for a given entry in * linux,drconf-usable-memory property */ -static inline int __init read_usm_ranges(const u32 **usm) +static inline int __init read_usm_ranges(const __be32 **usm) { /* * For each lmb in ibm,dynamic-memory a corresponding @@ -611,7 +615,7 @@ static inline int __init read_usm_ranges(const u32 **usm) */ static void __init parse_drconf_memory(struct device_node *memory) { - const u32 *uninitialized_var(dm), *usm; + const __be32 *uninitialized_var(dm), *usm; unsigned int n, rc, ranges, is_kexec_kdump = 0; unsigned long lmb_size, base, size, sz; int nid; @@ -720,7 +724,7 @@ static int __init parse_numa_properties(void) unsigned long size; int nid; int ranges; - const unsigned int *memcell_buf; + const __be32 *memcell_buf; unsigned int len; memcell_buf = of_get_property(memory, @@ -1105,7 +1109,7 @@ early_param("numa", early_numa); static int hot_add_drconf_scn_to_nid(struct device_node *memory, unsigned long scn_addr) { - const u32 *dm; + const __be32 *dm; unsigned int drconf_cell_cnt, rc; unsigned long lmb_size; struct assoc_arrays aa; @@ -1158,7 +1162,7 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr) for_each_node_by_type(memory, "memory") { unsigned long start, size; int ranges; - const unsigned int *memcell_buf; + const __be32 *memcell_buf; unsigned int len; memcell_buf = of_get_property(memory, "reg", &len); @@ -1231,7 +1235,7 @@ static u64 hot_add_drconf_memory_max(void) struct device_node *memory = NULL; unsigned int drconf_cell_cnt = 0; u64 lmb_size = 0; - const u32 *dm = 0; + const __be32 *dm = 0; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { @@ -1318,7 +1322,8 @@ static int update_cpu_associativity_changes_mask(void) } } if (changed) { - cpumask_set_cpu(cpu, changes); + cpumask_or(changes, changes, cpu_sibling_mask(cpu)); + cpu = cpu_last_thread_sibling(cpu); } } @@ -1335,40 +1340,41 @@ static int update_cpu_associativity_changes_mask(void) * Convert the associativity domain numbers returned from the hypervisor * to the sequence they would appear in the ibm,associativity property. */ -static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) +static int vphn_unpack_associativity(const long *packed, __be32 *unpacked) { int i, nr_assoc_doms = 0; - const u16 *field = (const u16*) packed; + const __be16 *field = (const __be16 *) packed; #define VPHN_FIELD_UNUSED (0xffff) #define VPHN_FIELD_MSB (0x8000) #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { - if (*field == VPHN_FIELD_UNUSED) { + if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) { /* All significant fields processed, and remaining * fields contain the reserved value of all 1's. * Just store them. */ - unpacked[i] = *((u32*)field); + unpacked[i] = *((__be32 *)field); field += 2; - } else if (*field & VPHN_FIELD_MSB) { + } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) { /* Data is in the lower 15 bits of this field */ - unpacked[i] = *field & VPHN_FIELD_MASK; + unpacked[i] = cpu_to_be32( + be16_to_cpup(field) & VPHN_FIELD_MASK); field++; nr_assoc_doms++; } else { /* Data is in the lower 15 bits of this field * concatenated with the next 16 bit field */ - unpacked[i] = *((u32*)field); + unpacked[i] = *((__be32 *)field); field += 2; nr_assoc_doms++; } } /* The first cell contains the length of the property */ - unpacked[0] = nr_assoc_doms; + unpacked[0] = cpu_to_be32(nr_assoc_doms); return nr_assoc_doms; } @@ -1377,7 +1383,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) * Retrieve the new associativity information for a virtual processor's * home node. */ -static long hcall_vphn(unsigned long cpu, unsigned int *associativity) +static long hcall_vphn(unsigned long cpu, __be32 *associativity) { long rc; long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; @@ -1391,7 +1397,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) } static long vphn_get_associativity(unsigned long cpu, - unsigned int *associativity) + __be32 *associativity) { long rc; @@ -1426,7 +1432,7 @@ static int update_cpu_topology(void *data) if (!data) return -EINVAL; - cpu = get_cpu(); + cpu = smp_processor_id(); for (update = data; update; update = update->next) { if (cpu != update->cpu) @@ -1446,12 +1452,12 @@ static int update_cpu_topology(void *data) */ int arch_update_cpu_topology(void) { - unsigned int cpu, changed = 0; + unsigned int cpu, sibling, changed = 0; struct topology_update_data *updates, *ud; - unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; cpumask_t updated_cpus; struct device *dev; - int weight, i = 0; + int weight, new_nid, i = 0; weight = cpumask_weight(&cpu_associativity_changes_mask); if (!weight) @@ -1464,19 +1470,46 @@ int arch_update_cpu_topology(void) cpumask_clear(&updated_cpus); for_each_cpu(cpu, &cpu_associativity_changes_mask) { - ud = &updates[i++]; - ud->cpu = cpu; - vphn_get_associativity(cpu, associativity); - ud->new_nid = associativity_to_nid(associativity); - - if (ud->new_nid < 0 || !node_online(ud->new_nid)) - ud->new_nid = first_online_node; + /* + * If siblings aren't flagged for changes, updates list + * will be too short. Skip on this update and set for next + * update. + */ + if (!cpumask_subset(cpu_sibling_mask(cpu), + &cpu_associativity_changes_mask)) { + pr_info("Sibling bits not set for associativity " + "change, cpu%d\n", cpu); + cpumask_or(&cpu_associativity_changes_mask, + &cpu_associativity_changes_mask, + cpu_sibling_mask(cpu)); + cpu = cpu_last_thread_sibling(cpu); + continue; + } - ud->old_nid = numa_cpu_lookup_table[cpu]; - cpumask_set_cpu(cpu, &updated_cpus); + /* Use associativity from first thread for all siblings */ + vphn_get_associativity(cpu, associativity); + new_nid = associativity_to_nid(associativity); + if (new_nid < 0 || !node_online(new_nid)) + new_nid = first_online_node; + + if (new_nid == numa_cpu_lookup_table[cpu]) { + cpumask_andnot(&cpu_associativity_changes_mask, + &cpu_associativity_changes_mask, + cpu_sibling_mask(cpu)); + cpu = cpu_last_thread_sibling(cpu); + continue; + } - if (i < weight) - ud->next = &updates[i]; + for_each_cpu(sibling, cpu_sibling_mask(cpu)) { + ud = &updates[i++]; + ud->cpu = sibling; + ud->new_nid = new_nid; + ud->old_nid = numa_cpu_lookup_table[sibling]; + cpumask_set_cpu(sibling, &updated_cpus); + if (i < weight) + ud->next = &updates[i]; + } + cpu = cpu_last_thread_sibling(cpu); } stop_machine(update_cpu_topology, &updates[0], &updated_cpus); @@ -1580,7 +1613,7 @@ int start_topology_update(void) #endif } } else if (firmware_has_feature(FW_FEATURE_VPHN) && - get_lppaca()->shared_proc) { + lppaca_shared_proc(get_lppaca())) { if (!vphn_enabled) { prrn_enabled = 0; vphn_enabled = 1; diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index a538c80db2d..9d1d33cd2be 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -66,8 +66,10 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, * we only update the current CPU's SLB shadow buffer. */ get_slb_shadow()->save_area[entry].esid = 0; - get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); - get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); + get_slb_shadow()->save_area[entry].vsid = + cpu_to_be64(mk_vsid_data(ea, ssize, flags)); + get_slb_shadow()->save_area[entry].esid = + cpu_to_be64(mk_esid_data(ea, ssize, entry)); } static inline void slb_shadow_clear(unsigned long entry) @@ -112,7 +114,8 @@ static void __slb_flush_and_rebolt(void) } else { /* Update stack entry; others don't change */ slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); - ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; + ksp_vsid_data = + be64_to_cpu(get_slb_shadow()->save_area[2].vsid); } /* We need to do this all in asm, so we're sure we don't touch diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index aa74acb0fdf..a770df2dae7 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c @@ -105,7 +105,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) limit = spt->maxaddr; for (; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); - if (addr < 0x100000000) { + if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; @@ -219,7 +219,7 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) for (limit = addr + len; addr < limit; addr = next) { next = pmd_addr_end(addr, limit); err = -ENOMEM; - if (addr < 0x100000000) { + if (addr < 0x100000000UL) { spm = spt->low_prot; } else { spm = spt->protptrs[addr >> SBP_L3_SHIFT]; |