diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2012-03-29 13:58:43 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2012-05-25 12:48:24 -0400 |
commit | d5d14ed6f2db7287a5088e1350cf422bf72140b3 (patch) | |
tree | 19f0bc20bb6f1995a1e4f75dc58e388c047f7d23 /arch/tile/include | |
parent | 47d632f9f8f3ed62b21f725e98b726d65769b6d7 (diff) |
arch/tile: Allow tilegx to build with either 16K or 64K page size
This change introduces new flags for the hv_install_context()
API that passes a page table pointer to the hypervisor. Clients
can explicitly request 4K, 16K, or 64K small pages when they
install a new context. In practice, the page size is fixed at
kernel compile time and the same size is always requested every
time a new page table is installed.
The <hv/hypervisor.h> header changes so that it provides more abstract
macros for managing "page" things like PFNs and page tables. For
example there is now a HV_DEFAULT_PAGE_SIZE_SMALL instead of the old
HV_PAGE_SIZE_SMALL. The various PFN routines have been eliminated and
only PA- or PTFN-based ones remain (since PTFNs are always expressed
in fixed 2KB "page" size). The page-table management macros are
renamed with a leading underscore and take page-size arguments with
the presumption that clients will use those macros in some single
place to provide the "real" macros they will use themselves.
I happened to notice the old hv_set_caching() API was totally broken
(it assumed 4KB pages) so I changed it so it would nominally work
correctly with other page sizes.
Tag modules with the page size so you can't load a module built with
a conflicting page size. (And add a test for SMP while we're at it.)
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/include')
-rw-r--r-- | arch/tile/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/mmu_context.h | 8 | ||||
-rw-r--r-- | arch/tile/include/asm/module.h | 40 | ||||
-rw-r--r-- | arch/tile/include/asm/page.h | 13 | ||||
-rw-r--r-- | arch/tile/include/asm/pgalloc.h | 92 | ||||
-rw-r--r-- | arch/tile/include/asm/pgtable.h | 10 | ||||
-rw-r--r-- | arch/tile/include/asm/pgtable_32.h | 14 | ||||
-rw-r--r-- | arch/tile/include/asm/pgtable_64.h | 28 | ||||
-rw-r--r-- | arch/tile/include/hv/drv_xgbe_intf.h | 2 | ||||
-rw-r--r-- | arch/tile/include/hv/hypervisor.h | 214 |
11 files changed, 276 insertions, 148 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 0bb42642343..6b2e681695e 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -21,7 +21,6 @@ generic-y += ipcbuf.h generic-y += irq_regs.h generic-y += kdebug.h generic-y += local.h -generic-y += module.h generic-y += msgbuf.h generic-y += mutex.h generic-y += param.h diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h index 92f94c77b6e..e2c78909679 100644 --- a/arch/tile/include/asm/mmu.h +++ b/arch/tile/include/asm/mmu.h @@ -21,7 +21,7 @@ struct mm_context { * Written under the mmap_sem semaphore; read without the * semaphore but atomically, but it is conservatively set. */ - unsigned int priority_cached; + unsigned long priority_cached; }; typedef struct mm_context mm_context_t; diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h index 15fb2464112..37f0b741dee 100644 --- a/arch/tile/include/asm/mmu_context.h +++ b/arch/tile/include/asm/mmu_context.h @@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) return 0; } -/* Note that arch/tile/kernel/head.S also calls hv_install_context() */ +/* + * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S + * also call hv_install_context(). + */ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) { /* FIXME: DIRECTIO should not always be set. FIXME. */ - int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); + int rc = hv_install_context(__pa(pgdir), prot, asid, + HV_CTX_DIRECTIO | CTX_PAGE_FLAG); if (rc < 0) panic("hv_install_context failed: %d", rc); } diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h new file mode 100644 index 00000000000..44ed07ccd3d --- /dev/null +++ b/arch/tile/include/asm/module.h @@ -0,0 +1,40 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_MODULE_H +#define _ASM_TILE_MODULE_H + +#include <arch/chip.h> + +#include <asm-generic/module.h> + +/* We can't use modules built with different page sizes. */ +#if defined(CONFIG_PAGE_SIZE_16KB) +# define MODULE_PGSZ " 16KB" +#elif defined(CONFIG_PAGE_SIZE_64KB) +# define MODULE_PGSZ " 64KB" +#else +# define MODULE_PGSZ "" +#endif + +/* We don't really support no-SMP so tag if someone tries. */ +#ifdef CONFIG_SMP +#define MODULE_NOSMP "" +#else +#define MODULE_NOSMP " nosmp" +#endif + +#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP + +#endif /* _ASM_TILE_MODULE_H */ diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index db93518fac0..c750943f961 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -20,8 +20,17 @@ #include <arch/chip.h> /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ -#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL -#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE +#if defined(CONFIG_PAGE_SIZE_16KB) +#define PAGE_SHIFT 14 +#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K +#elif defined(CONFIG_PAGE_SIZE_64KB) +#define PAGE_SHIFT 16 +#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K +#else +#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL +#define CTX_PAGE_FLAG 0 +#endif +#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h index e919c0bdc22..1b902508b66 100644 --- a/arch/tile/include/asm/pgalloc.h +++ b/arch/tile/include/asm/pgalloc.h @@ -19,24 +19,24 @@ #include <linux/mm.h> #include <linux/mmzone.h> #include <asm/fixmap.h> +#include <asm/page.h> #include <hv/hypervisor.h> /* Bits for the size of the second-level page table. */ -#define L2_KERNEL_PGTABLE_SHIFT \ - (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) +#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) + +/* How big is a kernel L2 page table? */ +#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT) /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ -#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL -#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL +#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT +#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT #else #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT #endif /* How many pages do we need, as an "order", for a user L2 page table? */ -#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) - -/* How big is a kernel L2 page table? */ -#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) +#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT) static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { @@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *ptep) { - set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, + set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)), __pgprot(_PAGE_PRESENT))); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page) { - set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), + set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))), __pgprot(_PAGE_PRESENT))); } @@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); -extern void pte_free(struct mm_struct *mm, struct page *pte); +extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address, + int order); +extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order); + +static inline pgtable_t pte_alloc_one(struct mm_struct *mm, + unsigned long address) +{ + return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER); +} + +static inline void pte_free(struct mm_struct *mm, struct page *pte) +{ + pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER); +} #define pmd_pgtable(pmd) pmd_page(pmd) @@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) pte_free(mm, virt_to_page(pte)); } -extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, - unsigned long address); +extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte, + unsigned long address, int order); +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, + unsigned long address) +{ + __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER); +} #define check_pgt_cache() do { } while (0) @@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd); void shatter_huge_page(unsigned long addr); #ifdef __tilegx__ -/* We share a single page allocator for both L1 and L2 page tables. */ -#if HV_L1_SIZE != HV_L2_SIZE -# error Rework assumption that L1 and L2 page tables are same size. -#endif -#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER + #define pud_populate(mm, pud, pmd) \ pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) -#define pmd_alloc_one(mm, addr) \ - ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) -#define pmd_free(mm, pmdp) \ - pte_free((mm), virt_to_page(pmdp)) -#define __pmd_free_tlb(tlb, pmdp, address) \ - __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) + +/* Bits for the size of the L1 (intermediate) page table. */ +#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT) + +/* How big is a kernel L2 page table? */ +#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT) + +/* We currently allocate L1 page tables by page. */ +#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT +#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT +#else +#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT #endif +/* How many pages do we need, as an "order", for an L1 page table? */ +#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT) + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) +{ + struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER); + return (pmd_t *)page_to_virt(p); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp) +{ + pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER); +} + +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, + unsigned long address) +{ + __pgtable_free_tlb(tlb, virt_to_page(pmdp), address, + L1_USER_PGTABLE_ORDER); +} + +#endif /* __tilegx__ */ + #endif /* _ASM_TILE_PGALLOC_H */ diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index ec907d4dbd7..319f4826d97 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -27,8 +27,10 @@ #include <linux/slab.h> #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/pfn.h> #include <asm/processor.h> #include <asm/fixmap.h> +#include <asm/page.h> struct mm_struct; struct vm_area_struct; @@ -162,7 +164,7 @@ extern void set_page_homes(void); (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } /* Just setting the PFN to zero suffices. */ -#define pte_pgprot(x) hv_pte_set_pfn((x), 0) +#define pte_pgprot(x) hv_pte_set_pa((x), 0) /* * For PTEs and PDEs, we must clear the Present bit first when @@ -262,7 +264,7 @@ static inline int pte_none(pte_t pte) static inline unsigned long pte_pfn(pte_t pte) { - return hv_pte_get_pfn(pte); + return PFN_DOWN(hv_pte_get_pa(pte)); } /* Set or get the remote cache cpu in a pgprot with remote caching. */ @@ -271,7 +273,7 @@ extern int get_remote_cache_cpu(pgprot_t prot); static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) { - return hv_pte_set_pfn(prot, pfn); + return hv_pte_set_pa(prot, PFN_PHYS(pfn)); } /* Support for priority mappings. */ @@ -471,7 +473,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) * OK for pte_lockptr(), since we just end up with potentially one * lock being used for several pte_t arrays. */ -#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) +#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd)))) static inline void pmd_clear(pmd_t *pmdp) { diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index 27e20f6844a..4ce4a7a99c2 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -20,11 +20,12 @@ * The level-1 index is defined by the huge page size. A PGD is composed * of PTRS_PER_PGD pgd_t's and is the top level of the page table. */ -#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE -#define PGDIR_SIZE HV_PAGE_SIZE_LARGE +#define PGDIR_SHIFT HPAGE_SHIFT +#define PGDIR_SIZE HPAGE_SIZE #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) -#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) +#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT) +#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) +#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT) /* * The level-2 index is defined by the difference between the huge @@ -33,8 +34,9 @@ * Note that the hypervisor docs use PTE for what we call pte_t, so * this nomenclature is somewhat confusing. */ -#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) -#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) +#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) +#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) +#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) #ifndef __ASSEMBLY__ diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index e105f3ada65..2492fa5478e 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -21,17 +21,19 @@ #define PGDIR_SIZE HV_L1_SPAN #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD HV_L0_ENTRIES -#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) +#define PGD_INDEX(va) HV_L0_INDEX(va) +#define SIZEOF_PGD HV_L0_SIZE /* * The level-1 index is defined by the huge page size. A PMD is composed * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. */ -#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE -#define PMD_SIZE HV_PAGE_SIZE_LARGE +#define PMD_SHIFT HPAGE_SHIFT +#define PMD_SIZE HPAGE_SIZE #define PMD_MASK (~(PMD_SIZE-1)) -#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT)) -#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t)) +#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT) +#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) +#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT) /* * The level-2 index is defined by the difference between the huge @@ -40,17 +42,19 @@ * Note that the hypervisor docs use PTE for what we call pte_t, so * this nomenclature is somewhat confusing. */ -#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) -#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) +#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) +#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) +#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) /* - * Align the vmalloc area to an L2 page table, and leave a guard page - * at the beginning and end. The vmalloc code also puts in an internal + * Align the vmalloc area to an L2 page table. Omit guard pages at + * the beginning and end for simplicity (particularly in the per-cpu + * memory allocation code). The vmalloc code puts in an internal * guard page between each allocation. */ #define _VMALLOC_END HUGE_VMAP_BASE -#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) -#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) +#define VMALLOC_END _VMALLOC_END +#define VMALLOC_START _VMALLOC_START #define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) @@ -98,7 +102,7 @@ static inline int pud_bad(pud_t pud) * A pud_t points to a pmd_t array. Since we can have multiple per * page, we don't have a one-to-one mapping of pud_t's to pages. */ -#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud))) +#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud)))) static inline unsigned long pud_index(unsigned long address) { diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h index f13188ac281..2a20b266d94 100644 --- a/arch/tile/include/hv/drv_xgbe_intf.h +++ b/arch/tile/include/hv/drv_xgbe_intf.h @@ -460,7 +460,7 @@ typedef void* lepp_comp_t; * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for * our page size of exactly 65536. We add one for a "body" fragment. */ -#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1) +#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1) /** Total number of bytes needed for an lepp_tso_cmd_t. */ #define LEPP_TSO_CMD_SIZE(num_frags, header_size) \ diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h index df74223944b..f27871775b7 100644 --- a/arch/tile/include/hv/hypervisor.h +++ b/arch/tile/include/hv/hypervisor.h @@ -17,8 +17,8 @@ * The hypervisor's public API. */ -#ifndef _TILE_HV_H -#define _TILE_HV_H +#ifndef _HV_HV_H +#define _HV_HV_H #include <arch/chip.h> @@ -42,25 +42,29 @@ */ #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) -/** The log2 of the size of small pages, in bytes. This value should - * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). +/** The log2 of the initial size of small pages, in bytes. + * See HV_DEFAULT_PAGE_SIZE_SMALL. */ -#define HV_LOG2_PAGE_SIZE_SMALL 16 +#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16 -/** The size of small pages, in bytes. This value should be verified +/** The initial size of small pages, in bytes. This value should be verified * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). + * It may also be modified when installing a new context. */ -#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) +#define HV_DEFAULT_PAGE_SIZE_SMALL \ + (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL) -/** The log2 of the size of large pages, in bytes. This value should be - * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). +/** The log2 of the initial size of large pages, in bytes. + * See HV_DEFAULT_PAGE_SIZE_LARGE. */ -#define HV_LOG2_PAGE_SIZE_LARGE 24 +#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24 -/** The size of large pages, in bytes. This value should be verified +/** The initial size of large pages, in bytes. This value should be verified * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). + * It may also be modified when installing a new context. */ -#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE) +#define HV_DEFAULT_PAGE_SIZE_LARGE \ + (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE) /** The log2 of the granularity at which page tables must be aligned; * in other words, the CPA for a page table must have this many zero @@ -401,7 +405,13 @@ typedef enum { * that the temperature has hit an upper limit and is no longer being * accurately tracked. */ - HV_SYSCONF_BOARD_TEMP = 6 + HV_SYSCONF_BOARD_TEMP = 6, + + /** Legal page size bitmask for hv_install_context(). + * For example, if 16KB and 64KB small pages are supported, + * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K". + */ + HV_SYSCONF_VALID_PAGE_SIZES = 7, } HV_SysconfQuery; @@ -654,6 +664,12 @@ void hv_set_rtc(HV_RTCTime time); * new page table does not need to contain any mapping for the * hv_install_context address itself. * + * At most one HV_CTX_PG_SM_* flag may be specified in "flags"; + * if multiple flags are specified, HV_EINVAL is returned. + * Specifying none of the flags results in using the default page size. + * All cores participating in a given client must request the same + * page size, or the results are undefined. + * * @param page_table Root of the page table. * @param access PTE providing info on how to read the page table. This * value must be consistent between multiple tiles sharing a page table, @@ -672,6 +688,11 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid, #define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from PL0. */ +#define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */ +#define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */ +#define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */ +#define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */ + #ifndef __ASSEMBLER__ /** Value returned from hv_inquire_context(). */ @@ -1248,11 +1269,14 @@ HV_Errno hv_set_command_line(HV_VirtAddr buf, int length); * with the existing priority pages) or "red/black" (if they don't). * The bitmask provides information on which parts of the cache * have been used for pinned pages so far on this tile; if (1 << N) - * appears in the bitmask, that indicates that a page has been marked - * "priority" whose PFN equals N, mod 8. + * appears in the bitmask, that indicates that a 4KB region of the + * cache starting at (N * 4KB) is in use by a "priority" page. + * The portion of cache used by a particular page can be computed + * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting + * all the "4KB" bits corresponding to the actual page size. * @param bitmask A bitmap of priority page set values */ -void hv_set_caching(unsigned int bitmask); +void hv_set_caching(unsigned long bitmask); /** Zero out a specified number of pages. @@ -1884,15 +1908,6 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control, of word */ #define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */ -/** Position of the PFN field within the PTE (subset of the PTFN). */ -#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \ - HV_LOG2_PAGE_TABLE_ALIGN)) - -/** Length of the PFN field within the PTE (subset of the PTFN). */ -#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \ - (HV_LOG2_PAGE_SIZE_SMALL - \ - HV_LOG2_PAGE_TABLE_ALIGN)) - /* * Legal values for the PTE's mode field */ @@ -2245,40 +2260,11 @@ hv_pte_set_mode(HV_PTE pte, unsigned int val) * * This field contains the upper bits of the CPA (client physical * address) of the target page; the complete CPA is this field with - * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it. + * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it. * - * For PTEs in a level-1 page table where the Page bit is set, the - * CPA must be aligned modulo the large page size. - */ -static __inline unsigned int -hv_pte_get_pfn(const HV_PTE pte) -{ - return pte.val >> HV_PTE_INDEX_PFN; -} - - -/** Set the page frame number into a PTE. See hv_pte_get_pfn. */ -static __inline HV_PTE -hv_pte_set_pfn(HV_PTE pte, unsigned int val) -{ - /* - * Note that the use of "PTFN" in the next line is intentional; we - * don't want any garbage lower bits left in that field. - */ - pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN); - pte.val |= (__hv64) val << HV_PTE_INDEX_PFN; - return pte; -} - -/** Get the page table frame number from the PTE. - * - * This field contains the upper bits of the CPA (client physical - * address) of the target page table; the complete CPA is this field with - * with HV_PAGE_TABLE_ALIGN zero bits appended to it. - * - * For PTEs in a level-1 page table when the Page bit is not set, the - * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and - * the level-2 page table size. + * For all PTEs in the lowest-level page table, and for all PTEs with + * the Page bit set in all page tables, the CPA must be aligned modulo + * the relevant page size. */ static __inline unsigned long hv_pte_get_ptfn(const HV_PTE pte) @@ -2286,7 +2272,6 @@ hv_pte_get_ptfn(const HV_PTE pte) return pte.val >> HV_PTE_INDEX_PTFN; } - /** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */ static __inline HV_PTE hv_pte_set_ptfn(HV_PTE pte, unsigned long val) @@ -2296,6 +2281,20 @@ hv_pte_set_ptfn(HV_PTE pte, unsigned long val) return pte; } +/** Get the client physical address from the PTE. See hv_pte_set_ptfn. */ +static __inline HV_PhysAddr +hv_pte_get_pa(const HV_PTE pte) +{ + return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN; +} + +/** Set the client physical address into a PTE. See hv_pte_get_ptfn. */ +static __inline HV_PTE +hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa) +{ + return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN); +} + /** Get the remote tile caching this page. * @@ -2331,28 +2330,20 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val) #endif /* !__ASSEMBLER__ */ -/** Converts a client physical address to a pfn. */ -#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL) - -/** Converts a pfn to a client physical address. */ -#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL) - /** Converts a client physical address to a ptfn. */ #define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN) /** Converts a ptfn to a client physical address. */ #define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN) -/** Converts a ptfn to a pfn. */ -#define HV_PTFN_TO_PFN(p) \ - ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) - -/** Converts a pfn to a ptfn. */ -#define HV_PFN_TO_PTFN(p) \ - ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) - #if CHIP_VA_WIDTH() > 32 +/* + * Note that we currently do not allow customizing the page size + * of the L0 pages, but fix them at 4GB, so we do not use the + * "_HV_xxx" nomenclature for the L0 macros. + */ + /** Log number of HV_PTE entries in L0 page table */ #define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN) @@ -2382,69 +2373,104 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val) #endif /* CHIP_VA_WIDTH() > 32 */ /** Log number of HV_PTE entries in L1 page table */ -#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE) +#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \ + (HV_LOG2_L1_SPAN - log2_page_size_large) /** Number of HV_PTE entries in L1 page table */ -#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES) +#define _HV_L1_ENTRIES(log2_page_size_large) \ + (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large)) /** Log size of L1 page table in bytes */ -#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES) +#define _HV_LOG2_L1_SIZE(log2_page_size_large) \ + (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large)) /** Size of L1 page table in bytes */ -#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE) +#define _HV_L1_SIZE(log2_page_size_large) \ + (1 << _HV_LOG2_L1_SIZE(log2_page_size_large)) /** Log number of HV_PTE entries in level-2 page table */ -#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL) +#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \ + (log2_page_size_large - log2_page_size_small) /** Number of HV_PTE entries in level-2 page table */ -#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES) +#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \ + (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small)) /** Log size of level-2 page table in bytes */ -#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES) +#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \ + (HV_LOG2_PTE_SIZE + \ + _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small)) /** Size of level-2 page table in bytes */ -#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE) +#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \ + (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small)) #ifdef __ASSEMBLER__ #if CHIP_VA_WIDTH() > 32 /** Index in L1 for a specific VA */ -#define HV_L1_INDEX(va) \ - (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) +#define _HV_L1_INDEX(va, log2_page_size_large) \ + (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1)) #else /* CHIP_VA_WIDTH() > 32 */ /** Index in L1 for a specific VA */ -#define HV_L1_INDEX(va) \ - (((va) >> HV_LOG2_PAGE_SIZE_LARGE)) +#define _HV_L1_INDEX(va, log2_page_size_large) \ + (((va) >> log2_page_size_large)) #endif /* CHIP_VA_WIDTH() > 32 */ /** Index in level-2 page table for a specific VA */ -#define HV_L2_INDEX(va) \ - (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) +#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \ + (((va) >> log2_page_size_small) & \ + (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1)) #else /* __ASSEMBLER __ */ #if CHIP_VA_WIDTH() > 32 /** Index in L1 for a specific VA */ -#define HV_L1_INDEX(va) \ - (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) +#define _HV_L1_INDEX(va, log2_page_size_large) \ + (((HV_VirtAddr)(va) >> log2_page_size_large) & \ + (_HV_L1_ENTRIES(log2_page_size_large) - 1)) #else /* CHIP_VA_WIDTH() > 32 */ /** Index in L1 for a specific VA */ -#define HV_L1_INDEX(va) \ - (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE)) +#define _HV_L1_INDEX(va, log2_page_size_large) \ + (((HV_VirtAddr)(va) >> log2_page_size_large)) #endif /* CHIP_VA_WIDTH() > 32 */ /** Index in level-2 page table for a specific VA */ -#define HV_L2_INDEX(va) \ - (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) +#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \ + (((HV_VirtAddr)(va) >> log2_page_size_small) & \ + (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1)) #endif /* __ASSEMBLER __ */ -#endif /* _TILE_HV_H */ +/** Position of the PFN field within the PTE (subset of the PTFN). */ +#define _HV_PTE_INDEX_PFN(log2_page_size) \ + (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) + +/** Length of the PFN field within the PTE (subset of the PTFN). */ +#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \ + (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) + +/** Converts a client physical address to a pfn. */ +#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size) + +/** Converts a pfn to a client physical address. */ +#define _HV_PFN_TO_CPA(p, log2_page_size) \ + (((HV_PhysAddr)(p)) << log2_page_size) + +/** Converts a ptfn to a pfn. */ +#define _HV_PTFN_TO_PFN(p, log2_page_size) \ + ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) + +/** Converts a pfn to a ptfn. */ +#define _HV_PFN_TO_PTFN(p, log2_page_size) \ + ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN)) + +#endif /* _HV_HV_H */ |