diff options
Diffstat (limited to 'arch/arm/include/asm')
22 files changed, 664 insertions, 301 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 29035e86a59..b6e65dedfd7 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -187,6 +187,17 @@ #endif /* + * Instruction barrier + */ + .macro instr_sync +#if __LINUX_ARM_ARCH__ >= 7 + isb +#elif __LINUX_ARM_ARCH__ == 6 + mcr p15, 0, r0, c7, c5, 4 +#endif + .endm + +/* * SMP data memory barrier */ .macro smp_dmb mode diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h new file mode 100644 index 00000000000..a0ada3ea435 --- /dev/null +++ b/arch/arm/include/asm/cti.h @@ -0,0 +1,179 @@ +#ifndef __ASMARM_CTI_H +#define __ASMARM_CTI_H + +#include <asm/io.h> + +/* The registers' definition is from section 3.2 of + * Embedded Cross Trigger Revision: r0p0 + */ +#define CTICONTROL 0x000 +#define CTISTATUS 0x004 +#define CTILOCK 0x008 +#define CTIPROTECTION 0x00C +#define CTIINTACK 0x010 +#define CTIAPPSET 0x014 +#define CTIAPPCLEAR 0x018 +#define CTIAPPPULSE 0x01c +#define CTIINEN 0x020 +#define CTIOUTEN 0x0A0 +#define CTITRIGINSTATUS 0x130 +#define CTITRIGOUTSTATUS 0x134 +#define CTICHINSTATUS 0x138 +#define CTICHOUTSTATUS 0x13c +#define CTIPERIPHID0 0xFE0 +#define CTIPERIPHID1 0xFE4 +#define CTIPERIPHID2 0xFE8 +#define CTIPERIPHID3 0xFEC +#define CTIPCELLID0 0xFF0 +#define CTIPCELLID1 0xFF4 +#define CTIPCELLID2 0xFF8 +#define CTIPCELLID3 0xFFC + +/* The below are from section 3.6.4 of + * CoreSight v1.0 Architecture Specification + */ +#define LOCKACCESS 0xFB0 +#define LOCKSTATUS 0xFB4 + +/* write this value to LOCKACCESS will unlock the module, and + * other value will lock the module + */ +#define LOCKCODE 0xC5ACCE55 + +/** + * struct cti - cross trigger interface struct + * @base: mapped virtual address for the cti base + * @irq: irq number for the cti + * @trig_out_for_irq: triger out number which will cause + * the @irq happen + * + * cti struct used to operate cti registers. + */ +struct cti { + void __iomem *base; + int irq; + int trig_out_for_irq; +}; + +/** + * cti_init - initialize the cti instance + * @cti: cti instance + * @base: mapped virtual address for the cti base + * @irq: irq number for the cti + * @trig_out: triger out number which will cause + * the @irq happen + * + * called by machine code to pass the board dependent + * @base, @irq and @trig_out to cti. + */ +static inline void cti_init(struct cti *cti, + void __iomem *base, int irq, int trig_out) +{ + cti->base = base; + cti->irq = irq; + cti->trig_out_for_irq = trig_out; +} + +/** + * cti_map_trigger - use the @chan to map @trig_in to @trig_out + * @cti: cti instance + * @trig_in: trigger in number + * @trig_out: trigger out number + * @channel: channel number + * + * This function maps one trigger in of @trig_in to one trigger + * out of @trig_out using the channel @chan. + */ +static inline void cti_map_trigger(struct cti *cti, + int trig_in, int trig_out, int chan) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + CTIINEN + trig_in * 4); + val |= BIT(chan); + __raw_writel(val, base + CTIINEN + trig_in * 4); + + val = __raw_readl(base + CTIOUTEN + trig_out * 4); + val |= BIT(chan); + __raw_writel(val, base + CTIOUTEN + trig_out * 4); +} + +/** + * cti_enable - enable the cti module + * @cti: cti instance + * + * enable the cti module + */ +static inline void cti_enable(struct cti *cti) +{ + __raw_writel(0x1, cti->base + CTICONTROL); +} + +/** + * cti_disable - disable the cti module + * @cti: cti instance + * + * enable the cti module + */ +static inline void cti_disable(struct cti *cti) +{ + __raw_writel(0, cti->base + CTICONTROL); +} + +/** + * cti_irq_ack - clear the cti irq + * @cti: cti instance + * + * clear the cti irq + */ +static inline void cti_irq_ack(struct cti *cti) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + CTIINTACK); + val |= BIT(cti->trig_out_for_irq); + __raw_writel(val, base + CTIINTACK); +} + +/** + * cti_unlock - unlock cti module + * @cti: cti instance + * + * unlock the cti module, or else any writes to the cti + * module is not allowed. + */ +static inline void cti_unlock(struct cti *cti) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + LOCKSTATUS); + + if (val & 1) { + val = LOCKCODE; + __raw_writel(val, base + LOCKACCESS); + } +} + +/** + * cti_lock - lock cti module + * @cti: cti instance + * + * lock the cti module, so any writes to the cti + * module will be not allowed. + */ +static inline void cti_lock(struct cti *cti) +{ + void __iomem *base = cti->base; + unsigned long val; + + val = __raw_readl(base + LOCKSTATUS); + + if (!(val & 1)) { + val = ~LOCKCODE; + __raw_writel(val, base + LOCKACCESS); + } +} +#endif diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S deleted file mode 100644 index 3ceb85e4385..00000000000 --- a/arch/arm/include/asm/entry-macro-vic2.S +++ /dev/null @@ -1,57 +0,0 @@ -/* arch/arm/include/asm/entry-macro-vic2.S - * - * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S - * - * Copyright 2008 Openmoko, Inc. - * Copyright 2008 Simtec Electronics - * http://armlinux.simtec.co.uk/ - * Ben Dooks <ben@simtec.co.uk> - * - * Low-level IRQ helper macros for a device with two VICs - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. -*/ - -/* This should be included from <mach/entry-macro.S> with the necessary - * defines for virtual addresses and IRQ bases for the two vics. - * - * The code needs the following defined: - * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ - * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ - * VA_VIC0 Virtual address of VIC0 - * VA_VIC1 Virtual address of VIC1 - * - * Note, code assumes VIC0's virtual address is an ARM immediate constant - * away from VIC1. -*/ - -#include <asm/hardware/vic.h> - - .macro disable_fiq - .endm - - .macro get_irqnr_preamble, base, tmp - ldr \base, =VA_VIC0 - .endm - - .macro arch_ret_to_user, tmp1, tmp2 - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - - @ check the vic0 - mov \irqnr, #IRQ_VIC0_BASE + 31 - ldr \irqstat, [ \base, # VIC_IRQ_STATUS ] - teq \irqstat, #0 - - @ otherwise try vic1 - addeq \tmp, \base, #(VA_VIC1 - VA_VIC0) - addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE) - ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ] - teqeq \irqstat, #0 - - clzne \irqstat, \irqstat - subne \irqnr, \irqnr, \irqstat - .endm diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S deleted file mode 100644 index 74ebc803904..00000000000 --- a/arch/arm/include/asm/hardware/entry-macro-gic.S +++ /dev/null @@ -1,60 +0,0 @@ -/* - * arch/arm/include/asm/hardware/entry-macro-gic.S - * - * Low-level IRQ helper macros for GIC - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <asm/hardware/gic.h> - -#ifndef HAVE_GET_IRQNR_PREAMBLE - .macro get_irqnr_preamble, base, tmp - ldr \base, =gic_cpu_base_addr - ldr \base, [\base] - .endm -#endif - -/* - * The interrupt numbering scheme is defined in the - * interrupt controller spec. To wit: - * - * Interrupts 0-15 are IPI - * 16-31 are local. We allow 30 to be used for the watchdog. - * 32-1020 are global - * 1021-1022 are reserved - * 1023 is "spurious" (no interrupt) - * - * A simple read from the controller will tell us the number of the highest - * priority enabled interrupt. We then just need to check whether it is in the - * valid range for an IRQ (30-1020 inclusive). - */ - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp - - ldr \irqstat, [\base, #GIC_CPU_INTACK] - /* bits 12-10 = src CPU, 9-0 = int # */ - - ldr \tmp, =1021 - bic \irqnr, \irqstat, #0x1c00 - cmp \irqnr, #15 - cmpcc \irqnr, \irqnr - cmpne \irqnr, \tmp - cmpcs \irqnr, \irqnr - .endm - -/* We assume that irqstat (the raw value of the IRQ acknowledge - * register) is preserved from the macro above. - * If there is an IPI, we immediately signal end of interrupt on the - * controller, since this requires the original irqstat value which - * we won't easily be able to recreate later. - */ - - .macro test_for_ipi, irqnr, irqstat, base, tmp - bic \irqnr, \irqstat, #0x1c00 - cmp \irqnr, #16 - strcc \irqstat, [\base, #GIC_CPU_EOI] - cmpcs \irqnr, \irqnr - .endm diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 3e91f22046f..4bdfe001869 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -36,30 +36,22 @@ #include <linux/irqdomain.h> struct device_node; -extern void __iomem *gic_cpu_base_addr; extern struct irq_chip gic_arch_extn; -void gic_init(unsigned int, int, void __iomem *, void __iomem *); +void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, + u32 offset); int gic_of_init(struct device_node *node, struct device_node *parent); void gic_secondary_init(unsigned int); +void gic_handle_irq(struct pt_regs *regs); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); -struct gic_chip_data { - void __iomem *dist_base; - void __iomem *cpu_base; -#ifdef CONFIG_CPU_PM - u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; - u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; - u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; - u32 __percpu *saved_ppi_enable; - u32 __percpu *saved_ppi_conf; -#endif -#ifdef CONFIG_IRQ_DOMAIN - struct irq_domain domain; -#endif - unsigned int gic_irqs; -}; +static inline void gic_init(unsigned int nr, int start, + void __iomem *dist , void __iomem *cpu) +{ + gic_init_bases(nr, start, dist, cpu, 0); +} + #endif #endif diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h index 5d72550a809..f42ebd61959 100644 --- a/arch/arm/include/asm/hardware/vic.h +++ b/arch/arm/include/asm/hardware/vic.h @@ -41,7 +41,15 @@ #define VIC_PL192_VECT_ADDR 0xF00 #ifndef __ASSEMBLY__ +#include <linux/compiler.h> +#include <linux/types.h> + +struct device_node; +struct pt_regs; + void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); -#endif +int vic_of_init(struct device_node *node, struct device_node *parent); +void vic_handle_irq(struct pt_regs *regs); +#endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h new file mode 100644 index 00000000000..bf863edb517 --- /dev/null +++ b/arch/arm/include/asm/idmap.h @@ -0,0 +1,14 @@ +#ifndef __ASM_IDMAP_H +#define __ASM_IDMAP_H + +#include <linux/compiler.h> +#include <asm/pgtable.h> + +/* Tag a function as requiring to be executed via an identity mapping. */ +#define __idmap __section(.idmap.text) noinline notrace + +extern pgd_t *idmap_pgd; + +void setup_mm_for_reboot(void); + +#endif /* __ASM_IDMAP_H */ diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 2b0efc3104a..bcb0c883e21 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -31,10 +31,10 @@ struct machine_desc { unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ - unsigned int reserve_lp0 :1; /* never has lp0 */ - unsigned int reserve_lp1 :1; /* never has lp1 */ - unsigned int reserve_lp2 :1; /* never has lp2 */ - unsigned int soft_reboot :1; /* soft reboot */ + unsigned char reserve_lp0 :1; /* never has lp0 */ + unsigned char reserve_lp1 :1; /* never has lp1 */ + unsigned char reserve_lp2 :1; /* never has lp2 */ + char restart_mode; /* default restart mode */ void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ @@ -46,6 +46,7 @@ struct machine_desc { #ifdef CONFIG_MULTI_IRQ_HANDLER void (*handle_irq)(struct pt_regs *); #endif + void (*restart)(char, const char *); }; /* diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index ca94653f1ec..97b440c25c5 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -151,7 +151,11 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level-types.h> +#else #include <asm/pgtable-2level-types.h> +#endif #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 0f8e3827a89..99cfe360798 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -32,7 +32,4 @@ enum arm_perf_pmu_ids { extern enum arm_perf_pmu_ids armpmu_get_pmu_id(void); -extern int -armpmu_get_max_events(void); - #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 3e08fd3fbb6..943504f53f5 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -25,12 +25,34 @@ #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) +#ifdef CONFIG_ARM_LPAE + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + free_page((unsigned long)pmd); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); +} + +#else /* !CONFIG_ARM_LPAE */ + /* * Since we have only two-level page tables, these are trivial */ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(mm, pmd) do { } while (0) -#define pgd_populate(mm,pmd,pte) BUG() +#define pud_populate(mm,pmd,pte) BUG() + +#endif /* CONFIG_ARM_LPAE */ extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); @@ -109,7 +131,9 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, { pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); +#ifndef CONFIG_ARM_LPAE pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); +#endif flush_pmd_entry(pmdp); } diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 470457e1cfc..2317a71c8f8 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -140,4 +140,45 @@ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) +#ifndef __ASSEMBLY__ + +/* + * The "pud_xxx()" functions here are trivial when the pmd is folded into + * the pud: the pud entry is never bad, always exists, and can't be set or + * cleared. + */ +#define pud_none(pud) (0) +#define pud_bad(pud) (0) +#define pud_present(pud) (1) +#define pud_clear(pudp) do { } while (0) +#define set_pud(pud,pudp) do { } while (0) + +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud; +} + +#define pmd_bad(pmd) (pmd_val(pmd) & 2) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + pmdpd[0] = pmdps[0]; \ + pmdpd[1] = pmdps[1]; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + pmdp[0] = __pmd(0); \ + pmdp[1] = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +/* we don't need complex calculations here as the pmd is folded into the pgd */ +#define pmd_addr_end(addr,end) (end) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h new file mode 100644 index 00000000000..d7952824c5c --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -0,0 +1,77 @@ +/* + * arch/arm/include/asm/pgtable-3level-hwdef.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H +#define _ASM_PGTABLE_3LEVEL_HWDEF_H + +/* + * Hardware page table definitions. + * + * + Level 1/2 descriptor + * - common + */ +#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) +#define PMD_BIT4 (_AT(pmdval_t, 0)) +#define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) + +/* + * - section + */ +#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) +#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) +#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) +#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) +#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) +#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) +#define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) +#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0) << 2) /* strongly ordered */ +#define PMD_SECT_BUFFERED (_AT(pmdval_t, 1) << 2) /* normal non-cacheable */ +#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */ +#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */ +#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */ + +/* + * + Level 3 descriptor (PTE) + */ +#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) +#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) +#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ +#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ +#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ +#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ +#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ +#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ + +/* + * 40-bit physical address supported. + */ +#define PHYS_MASK_SHIFT (40) +#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1) + +#endif diff --git a/arch/arm/include/asm/pgtable-3level-types.h b/arch/arm/include/asm/pgtable-3level-types.h new file mode 100644 index 00000000000..921aa30259c --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level-types.h @@ -0,0 +1,70 @@ +/* + * arch/arm/include/asm/pgtable-3level-types.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H +#define _ASM_PGTABLE_3LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u64 pteval_t; +typedef u64 pmdval_t; +typedef u64 pgdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else /* !STRICT_MM_TYPECHECKS */ + +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pgdval_t pgd_t; +typedef pteval_t pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#endif /* _ASM_PGTABLE_3LEVEL_TYPES_H */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h new file mode 100644 index 00000000000..759af70f9a0 --- /dev/null +++ b/arch/arm/include/asm/pgtable-3level.h @@ -0,0 +1,155 @@ +/* + * arch/arm/include/asm/pgtable-3level.h + * + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_3LEVEL_H +#define _ASM_PGTABLE_3LEVEL_H + +/* + * With LPAE, there are 3 levels of page tables. Each level has 512 entries of + * 8 bytes each, occupying a 4K page. The first level table covers a range of + * 512GB, each entry representing 1GB. Since we are limited to 4GB input + * address range, only 4 entries in the PGD are used. + * + * There are enough spare bits in a page table entry for the kernel specific + * state. + */ +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 512 +#define PTRS_PER_PGD 4 + +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (0) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) + +/* + * PGDIR_SHIFT determines the size a top-level page table entry can map. + */ +#define PGDIR_SHIFT 30 + +/* + * PMD_SHIFT determines the size a middle-level page table entry can map. + */ +#define PMD_SHIFT 21 + +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT 21 +#define SECTION_SIZE (1UL << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) + +#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) + +/* + * "Linux" PTE definitions for LPAE. + * + * These bits overlap with the hardware bits but the naming is preserved for + * consistency with the classic page table format. + */ +#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */ +#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ +#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ +#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ +#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ +#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ +#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ +#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ + +/* + * To be used in assembly code with the upper page attributes. + */ +#define L_PTE_XN_HIGH (1 << (54 - 32)) +#define L_PTE_DIRTY_HIGH (1 << (55 - 32)) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */ +#define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2) + +/* + * Software PGD flags. + */ +#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ + +#ifndef __ASSEMBLY__ + +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) (!(pud_val(pud) & 2)) +#define pud_present(pud) (pud_val(pud)) + +#define pud_clear(pudp) \ + do { \ + *pudp = __pud(0); \ + clean_pmd_entry(pudp); \ + } while (0) + +#define set_pud(pudp, pud) \ + do { \ + *pudp = pud; \ + flush_pmd_entry(pudp); \ + } while (0) + +static inline pmd_t *pud_page_vaddr(pud_t pud) +{ + return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); +} + +/* Find an entry in the second-level page table.. */ +#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); +} + +#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + *pmdpd = *pmdps; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + *pmdp = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PGTABLE_3LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h index 183111164ce..8426229ba29 100644 --- a/arch/arm/include/asm/pgtable-hwdef.h +++ b/arch/arm/include/asm/pgtable-hwdef.h @@ -10,6 +10,10 @@ #ifndef _ASMARM_PGTABLE_HWDEF_H #define _ASMARM_PGTABLE_HWDEF_H +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level-hwdef.h> +#else #include <asm/pgtable-2level-hwdef.h> +#endif #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 2f659e23972..f66626d71e7 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -11,20 +11,24 @@ #define _ASMARM_PGTABLE_H #include <linux/const.h> -#include <asm-generic/4level-fixup.h> #include <asm/proc-fns.h> #ifndef CONFIG_MMU +#include <asm-generic/4level-fixup.h> #include "pgtable-nommu.h" #else +#include <asm-generic/pgtable-nopud.h> #include <asm/memory.h> -#include <mach/vmalloc.h> #include <asm/pgtable-hwdef.h> +#ifdef CONFIG_ARM_LPAE +#include <asm/pgtable-3level.h> +#else #include <asm/pgtable-2level.h> +#endif /* * Just any arbitrary offset to the start of the vmalloc VM area: the @@ -33,15 +37,10 @@ * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) - * - * Note that platforms may override VMALLOC_START, but they must provide - * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, - * which may not overlap IO space. */ -#ifndef VMALLOC_START #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#endif +#define VMALLOC_END 0xff000000UL #define LIBRARY_TEXT_START 0x0c000000 @@ -163,39 +162,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* to find an entry in a kernel page-table-directory */ #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pgd is never bad, and a pmd always exists (as it's folded - * into the pgd entry) - */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_present(pgd) (1) -#define pgd_clear(pgdp) do { } while (0) -#define set_pgd(pgd,pgdp) do { } while (0) -#define set_pud(pud,pudp) do { } while (0) - - -/* Find an entry in the second-level page table.. */ -#define pmd_offset(dir, addr) ((pmd_t *)(dir)) - #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_present(pmd) (pmd_val(pmd)) -#define pmd_bad(pmd) (pmd_val(pmd) & 2) - -#define copy_pmd(pmdpd,pmdps) \ - do { \ - pmdpd[0] = pmdps[0]; \ - pmdpd[1] = pmdps[1]; \ - flush_pmd_entry(pmdpd); \ - } while (0) - -#define pmd_clear(pmdp) \ - do { \ - pmdp[0] = __pmd(0); \ - pmdp[1] = __pmd(0); \ - clean_pmd_entry(pmdp); \ - } while (0) static inline pte_t *pmd_page_vaddr(pmd_t pmd) { @@ -204,10 +172,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) -/* we don't need complex calculations here as the pmd is folded into the pgd */ -#define pmd_addr_end(addr,end) (end) - - #ifndef CONFIG_HIGHPTE #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) #define __pte_unmap(pte) do { } while (0) @@ -229,7 +193,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) -#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #if __LINUX_ARM_ARCH__ < 6 @@ -347,9 +310,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pgtable_cache_init() do { } while (0) -void identity_mapping_add(pgd_t *, unsigned long, unsigned long); -void identity_mapping_del(pgd_t *, unsigned long, unsigned long); - #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 0bda22c094a..b5a5be2536c 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -27,13 +27,22 @@ enum arm_pmu_type { /* * struct arm_pmu_platdata - ARM PMU platform data * - * @handle_irq: an optional handler which will be called from the interrupt and - * passed the address of the low level handler, and can be used to implement - * any platform specific handling before or after calling it. + * @handle_irq: an optional handler which will be called from the + * interrupt and passed the address of the low level handler, + * and can be used to implement any platform specific handling + * before or after calling it. + * @enable_irq: an optional handler which will be called after + * request_irq and be used to handle some platform specific + * irq enablement + * @disable_irq: an optional handler which will be called before + * free_irq and be used to handle some platform specific + * irq disablement */ struct arm_pmu_platdata { irqreturn_t (*handle_irq)(int irq, void *dev, irq_handler_t pmu_handler); + void (*enable_irq)(int irq); + void (*disable_irq)(int irq); }; #ifdef CONFIG_CPU_HAS_PMU diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 9e92cb205e6..f3628fb3d2b 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -65,7 +65,11 @@ extern struct processor { * Set a possibly extended PTE. Non-extended PTEs should * ignore 'ext'. */ +#ifdef CONFIG_ARM_LPAE + void (*set_pte_ext)(pte_t *ptep, pte_t pte); +#else void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); +#endif /* Suspend/resume */ unsigned int suspend_size; @@ -79,7 +83,11 @@ extern void cpu_proc_fin(void); extern int cpu_do_idle(void); extern void cpu_dcache_clean_area(void *, int); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +#ifdef CONFIG_ARM_LPAE +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); +#else extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); +#endif extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); /* These three are private to arch/arm/kernel/suspend.c */ @@ -107,6 +115,18 @@ extern void cpu_resume(void); #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#ifdef CONFIG_ARM_LPAE +#define cpu_get_pgd() \ + ({ \ + unsigned long pg, pg2; \ + __asm__("mrrc p15, 0, %0, %1, c2" \ + : "=r" (pg), "=r" (pg2) \ + : \ + : "cc"); \ + pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ + (pgd_t *)phys_to_virt(pg); \ + }) +#else #define cpu_get_pgd() \ ({ \ unsigned long pg; \ @@ -115,6 +135,7 @@ extern void cpu_resume(void); pg &= ~0x3fff; \ (pgd_t *)phys_to_virt(pg); \ }) +#endif #endif diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index c8e6ddf3e86..e3f75726343 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -8,113 +8,7 @@ #ifndef ASM_SCHED_CLOCK #define ASM_SCHED_CLOCK -#include <linux/kernel.h> -#include <linux/types.h> - -struct clock_data { - u64 epoch_ns; - u32 epoch_cyc; - u32 epoch_cyc_copy; - u32 mult; - u32 shift; -}; - -#define DEFINE_CLOCK_DATA(name) struct clock_data name - -static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) -{ - return (cyc * mult) >> shift; -} - -/* - * Atomically update the sched_clock epoch. Your update callback will - * be called from a timer before the counter wraps - read the current - * counter value, and call this function to safely move the epochs - * forward. Only use this from the update callback. - */ -static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask) -{ - unsigned long flags; - u64 ns = cd->epoch_ns + - cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift); - - /* - * Write epoch_cyc and epoch_ns in a way that the update is - * detectable in cyc_to_fixed_sched_clock(). - */ - raw_local_irq_save(flags); - cd->epoch_cyc = cyc; - smp_wmb(); - cd->epoch_ns = ns; - smp_wmb(); - cd->epoch_cyc_copy = cyc; - raw_local_irq_restore(flags); -} - -/* - * If your clock rate is known at compile time, using this will allow - * you to optimize the mult/shift loads away. This is paired with - * init_fixed_sched_clock() to ensure that your mult/shift are correct. - */ -static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd, - u32 cyc, u32 mask, u32 mult, u32 shift) -{ - u64 epoch_ns; - u32 epoch_cyc; - - /* - * Load the epoch_cyc and epoch_ns atomically. We do this by - * ensuring that we always write epoch_cyc, epoch_ns and - * epoch_cyc_copy in strict order, and read them in strict order. - * If epoch_cyc and epoch_cyc_copy are not equal, then we're in - * the middle of an update, and we should repeat the load. - */ - do { - epoch_cyc = cd->epoch_cyc; - smp_rmb(); - epoch_ns = cd->epoch_ns; - smp_rmb(); - } while (epoch_cyc != cd->epoch_cyc_copy); - - return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift); -} - -/* - * Otherwise, you need to use this, which will obtain the mult/shift - * from the clock_data structure. Use init_sched_clock() with this. - */ -static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd, - u32 cyc, u32 mask) -{ - return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift); -} - -/* - * Initialize the clock data - calculate the appropriate multiplier - * and shift. Also setup a timer to ensure that the epoch is refreshed - * at the appropriate time interval, which will call your update - * handler. - */ -void init_sched_clock(struct clock_data *, void (*)(void), - unsigned int, unsigned long); - -/* - * Use this initialization function rather than init_sched_clock() if - * you're using cyc_to_fixed_sched_clock, which will warn if your - * constants are incorrect. - */ -static inline void init_fixed_sched_clock(struct clock_data *cd, - void (*update)(void), unsigned int bits, unsigned long rate, - u32 mult, u32 shift) -{ - init_sched_clock(cd, update, bits, rate); - if (cd->mult != mult || cd->shift != shift) { - pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n" - "sched_clock: fix multiply/shift to avoid scheduler hiccups\n", - mult, shift, cd->mult, cd->shift); - } -} - extern void sched_clock_postinit(void); +extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); #endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 984014b9264..53785828744 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -80,6 +80,14 @@ struct siginfo; void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); +#ifdef CONFIG_ARM_LPAE +#define FAULT_CODE_ALIGNMENT 33 +#define FAULT_CODE_DEBUG 34 +#else +#define FAULT_CODE_ALIGNMENT 1 +#define FAULT_CODE_DEBUG 2 +#endif + void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, int code, const char *name); @@ -101,6 +109,7 @@ extern int __pure cpu_architecture(void); extern void cpu_init(void); void arm_machine_restart(char mode, const char *cmd); +void soft_restart(unsigned long); extern void (*arm_pm_restart)(char str, const char *cmd); #define UDBG_UNDEFINED (1 << 0) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 265f908c4a6..5d3ed7e3856 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -202,8 +202,18 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb_remove_page(tlb, pte); } +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, + unsigned long addr) +{ +#ifdef CONFIG_ARM_LPAE + tlb_add_flush(tlb, addr); + tlb_remove_page(tlb, virt_to_page(pmdp)); +#endif +} + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) -#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) +#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) #define tlb_migrate_finish(mm) do { } while (0) |