diff options
Diffstat (limited to 'arch/tile/include/asm')
-rw-r--r-- | arch/tile/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops.h | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops_32.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/cache.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/cacheflush.h | 55 | ||||
-rw-r--r-- | arch/tile/include/asm/edac.h | 29 | ||||
-rw-r--r-- | arch/tile/include/asm/futex.h | 27 | ||||
-rw-r--r-- | arch/tile/include/asm/hugetlb.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/irqflags.h | 18 | ||||
-rw-r--r-- | arch/tile/include/asm/page.h | 34 | ||||
-rw-r--r-- | arch/tile/include/asm/pgalloc.h | 7 | ||||
-rw-r--r-- | arch/tile/include/asm/pgtable.h | 31 | ||||
-rw-r--r-- | arch/tile/include/asm/pgtable_32.h | 8 | ||||
-rw-r--r-- | arch/tile/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/ptrace.h | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/spinlock_32.h | 83 | ||||
-rw-r--r-- | arch/tile/include/asm/stack.h | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/system.h | 19 | ||||
-rw-r--r-- | arch/tile/include/asm/thread_info.h | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/timex.h | 3 |
21 files changed, 141 insertions, 195 deletions
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index 3b8f55b82de..849ab2fa1f5 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -1,3 +1,4 @@ include include/asm-generic/Kbuild.asm header-y += ucontext.h +header-y += hardwall.h diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h index b8c49f98a44..75a16028a95 100644 --- a/arch/tile/include/asm/atomic.h +++ b/arch/tile/include/asm/atomic.h @@ -32,7 +32,7 @@ */ static inline int atomic_read(const atomic_t *v) { - return v->counter; + return ACCESS_ONCE(v->counter); } /** diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index 6d4f0ff2c68..132e6bbd07e 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -122,7 +122,6 @@ static inline unsigned long __arch_hweight64(__u64 w) #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/ext2-non-atomic.h> -#include <asm-generic/bitops/minix.h> +#include <asm-generic/bitops/le.h> #endif /* _ASM_TILE_BITOPS_H */ diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 7a93c001ac1..2638be51a16 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -122,7 +122,7 @@ static inline int test_and_change_bit(unsigned nr, return (_atomic_xor(addr, mask) & mask) != 0; } -/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */ +/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */ #define smp_mb__before_clear_bit() smp_mb() #define smp_mb__after_clear_bit() do {} while (0) diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 08a2815b5e4..392e5333dd8 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h @@ -40,7 +40,7 @@ #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES /* Group together read-mostly things to avoid cache false sharing */ -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) /* * Attribute for data that is kept read/write coherent until the end of diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h index 14a3f8556ac..12fb0fb330e 100644 --- a/arch/tile/include/asm/cacheflush.h +++ b/arch/tile/include/asm/cacheflush.h @@ -138,55 +138,12 @@ static inline void finv_buffer(void *buffer, size_t size) } /* - * Flush & invalidate a VA range that is homed remotely on a single core, - * waiting until the memory controller holds the flushed values. + * Flush and invalidate a VA range that is homed remotely, waiting + * until the memory controller holds the flushed values. If "hfh" is + * true, we will do a more expensive flush involving additional loads + * to make sure we have touched all the possible home cpus of a buffer + * that is homed with "hash for home". */ -static inline void finv_buffer_remote(void *buffer, size_t size) -{ - char *p; - int i; - - /* - * Flush and invalidate the buffer out of the local L1/L2 - * and request the home cache to flush and invalidate as well. - */ - __finv_buffer(buffer, size); - - /* - * Wait for the home cache to acknowledge that it has processed - * all the flush-and-invalidate requests. This does not mean - * that the flushed data has reached the memory controller yet, - * but it does mean the home cache is processing the flushes. - */ - __insn_mf(); - - /* - * Issue a load to the last cache line, which can't complete - * until all the previously-issued flushes to the same memory - * controller have also completed. If we weren't striping - * memory, that one load would be sufficient, but since we may - * be, we also need to back up to the last load issued to - * another memory controller, which would be the point where - * we crossed an 8KB boundary (the granularity of striping - * across memory controllers). Keep backing up and doing this - * until we are before the beginning of the buffer, or have - * hit all the controllers. - */ - for (i = 0, p = (char *)buffer + size - 1; - i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer; - ++i) { - const unsigned long STRIPE_WIDTH = 8192; - - /* Force a load instruction to issue. */ - *(volatile char *)p; - - /* Jump to end of previous stripe. */ - p -= STRIPE_WIDTH; - p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1)); - } - - /* Wait for the loads (and thus flushes) to have completed. */ - __insn_mf(); -} +void finv_buffer_remote(void *buffer, size_t size, int hfh); #endif /* _ASM_TILE_CACHEFLUSH_H */ diff --git a/arch/tile/include/asm/edac.h b/arch/tile/include/asm/edac.h new file mode 100644 index 00000000000..87fc83eeaff --- /dev/null +++ b/arch/tile/include/asm/edac.h @@ -0,0 +1,29 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#ifndef _ASM_TILE_EDAC_H +#define _ASM_TILE_EDAC_H + +/* ECC atomic, DMA, SMP and interrupt safe scrub function */ + +static inline void atomic_scrub(void *va, u32 size) +{ + /* + * These is nothing to be done here because CE is + * corrected by the mshim. + */ + return; +} + +#endif /* _ASM_TILE_EDAC_H */ diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index fe0d10dcae5..d03ec124a59 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h @@ -29,16 +29,16 @@ #include <linux/uaccess.h> #include <linux/errno.h> -extern struct __get_user futex_set(int __user *v, int i); -extern struct __get_user futex_add(int __user *v, int n); -extern struct __get_user futex_or(int __user *v, int n); -extern struct __get_user futex_andn(int __user *v, int n); -extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); +extern struct __get_user futex_set(u32 __user *v, int i); +extern struct __get_user futex_add(u32 __user *v, int n); +extern struct __get_user futex_or(u32 __user *v, int n); +extern struct __get_user futex_andn(u32 __user *v, int n); +extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); #ifndef __tilegx__ -extern struct __get_user futex_xor(int __user *v, int n); +extern struct __get_user futex_xor(u32 __user *v, int n); #else -static inline struct __get_user futex_xor(int __user *uaddr, int n) +static inline struct __get_user futex_xor(u32 __user *uaddr, int n) { struct __get_user asm_ret = __get_user_4(uaddr); if (!asm_ret.err) { @@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n) } #endif -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; pagefault_disable(); @@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, - int newval) +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) { struct __get_user asm_ret; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; asm_ret = futex_cmpxchg(uaddr, oldval, newval); - return asm_ret.err ? asm_ret.err : asm_ret.val; + *uval = asm_ret.val; + return asm_ret.err; } #ifndef __tilegx__ diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h index 0521c277bbd..d396d180516 100644 --- a/arch/tile/include/asm/hugetlb.h +++ b/arch/tile/include/asm/hugetlb.h @@ -54,7 +54,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER); + set_pte(ptep, pte); } static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 641e4ff3d80..5db0ce54284 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h @@ -18,6 +18,8 @@ #include <arch/interrupts.h> #include <arch/chip.h> +#if !defined(__tilegx__) && defined(__ASSEMBLY__) + /* * The set of interrupts we want to allow when interrupts are nominally * disabled. The remainder are effectively "NMI" interrupts from @@ -25,6 +27,16 @@ * interrupts (aka "non-queued") are not blocked by the mask in any case. */ #if CHIP_HAS_AUX_PERF_COUNTERS() +#define LINUX_MASKABLE_INTERRUPTS_HI \ + (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) +#else +#define LINUX_MASKABLE_INTERRUPTS_HI \ + (~(INT_MASK_HI(INT_PERF_COUNT))) +#endif + +#else + +#if CHIP_HAS_AUX_PERF_COUNTERS() #define LINUX_MASKABLE_INTERRUPTS \ (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) #else @@ -32,6 +44,8 @@ (~(INT_MASK(INT_PERF_COUNT))) #endif +#endif + #ifndef __ASSEMBLY__ /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ @@ -224,11 +238,11 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); #define IRQ_DISABLE(tmp0, tmp1) \ { \ movei tmp0, -1; \ - moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ + moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \ }; \ { \ mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \ - auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ + auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \ }; \ mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1 diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h index 7979a45430d..3eb53525bf9 100644 --- a/arch/tile/include/asm/page.h +++ b/arch/tile/include/asm/page.h @@ -16,10 +16,11 @@ #define _ASM_TILE_PAGE_H #include <linux/const.h> +#include <hv/pagesize.h> /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ -#define PAGE_SHIFT 16 -#define HPAGE_SHIFT 24 +#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL +#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) @@ -29,25 +30,18 @@ #ifdef __KERNEL__ -#include <hv/hypervisor.h> -#include <arch/chip.h> - /* - * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx - * definitions in <hv/hypervisor.h>. We validate this at build time - * here, and again at runtime during early boot. We provide a - * separate definition since userspace doesn't have <hv/hypervisor.h>. - * - * Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since - * they are the same on i386 but not TILE. + * If the Kconfig doesn't specify, set a maximum zone order that + * is enough so that we can create huge pages from small pages given + * the respective sizes of the two page types. See <linux/mmzone.h>. */ -#if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT -# error Small page size mismatch in Linux -#endif -#if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT -# error Huge page size mismatch in Linux +#ifndef CONFIG_FORCE_MAX_ZONEORDER +#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1) #endif +#include <hv/hypervisor.h> +#include <arch/chip.h> + #ifndef __ASSEMBLY__ #include <linux/types.h> @@ -81,12 +75,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, * Hypervisor page tables are made of the same basic structure. */ -typedef __u64 pteval_t; -typedef __u64 pmdval_t; -typedef __u64 pudval_t; -typedef __u64 pgdval_t; -typedef __u64 pgprotval_t; - typedef HV_PTE pte_t; typedef HV_PTE pgd_t; typedef HV_PTE pgprot_t; diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h index cf52791a550..e919c0bdc22 100644 --- a/arch/tile/include/asm/pgalloc.h +++ b/arch/tile/include/asm/pgalloc.h @@ -41,9 +41,9 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { #ifdef CONFIG_64BIT - set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER); + set_pte(pmdp, pmd); #else - set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER); + set_pte(&pmdp->pud.pgd, pmd.pud.pgd); #endif } @@ -100,6 +100,9 @@ pte_t *get_prealloc_pte(unsigned long pfn); /* During init, we can shatter kernel huge pages if needed. */ void shatter_pmd(pmd_t *pmd); +/* After init, a more complex technique is required. */ +void shatter_huge_page(unsigned long addr); + #ifdef __tilegx__ /* We share a single page allocator for both L1 and L2 page tables. */ #if HV_L1_SIZE != HV_L2_SIZE diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index a6604e9485d..1a20b7ef8ea 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -233,15 +233,23 @@ static inline void __pte_clear(pte_t *ptep) #define pgd_ERROR(e) \ pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) +/* Return PA and protection info for a given kernel VA. */ +int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte); + +/* + * __set_pte() ensures we write the 64-bit PTE with 32-bit words in + * the right order on 32-bit platforms and also allows us to write + * hooks to check valid PTEs, etc., if we want. + */ +void __set_pte(pte_t *ptep, pte_t pte); + /* - * set_pte_order() sets the given PTE and also sanity-checks the + * set_pte() sets the given PTE and also sanity-checks the * requested PTE against the page homecaching. Unspecified parts * of the PTE are filled in when it is written to memory, i.e. all * caching attributes if "!forcecache", or the home cpu if "anyhome". */ -extern void set_pte_order(pte_t *ptep, pte_t pte, int order); - -#define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0) +extern void set_pte(pte_t *ptep, pte_t pte); #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) @@ -293,21 +301,6 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); #define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) }) /* - * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); - * - * dst - pointer to pgd range anwhere on a pgd page - * src - "" - * count - the number of pgds to copy. - * - * dst and src can be on the same page, but the range must not overlap, - * and must not cross a page boundary. - */ -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) -{ - memcpy(dst, src, count * sizeof(pgd_t)); -} - -/* * Conversion functions: convert a page and protection to a page entry, * and a page entry and page directory to the page they refer to. */ diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index 53ec3488474..9f98529761f 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -24,6 +24,7 @@ #define PGDIR_SIZE HV_PAGE_SIZE_LARGE #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) +#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) /* * The level-2 index is defined by the difference between the huge @@ -33,6 +34,7 @@ * this nomenclature is somewhat confusing. */ #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) +#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) #ifndef __ASSEMBLY__ @@ -94,7 +96,6 @@ static inline int pgd_addr_invalid(unsigned long addr) */ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR extern int ptep_test_and_clear_young(struct vm_area_struct *, unsigned long addr, pte_t *); @@ -110,6 +111,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, return pte; } +static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval) +{ + set_pte(&pmdp->pud.pgd, pmdval.pud.pgd); +} + /* Create a pmd from a PTFN. */ static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) { diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index a9e7c876033..e6889474038 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h @@ -269,7 +269,6 @@ extern char chip_model[64]; /* Data on which physical memory controller corresponds to which NUMA node. */ extern int node_controller[]; - /* Do we dump information to the console when a user application crashes? */ extern int show_crashinfo; diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h index ac6d343129d..6be2246e015 100644 --- a/arch/tile/include/asm/ptrace.h +++ b/arch/tile/include/asm/ptrace.h @@ -141,6 +141,9 @@ struct single_step_state { /* Single-step the instruction at regs->pc */ extern void single_step_once(struct pt_regs *regs); +/* Clean up after execve(). */ +extern void single_step_execve(void); + struct task_struct; extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index 88efdde8dd2..a8f2c6e31a8 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h @@ -78,13 +78,6 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock); #define _RD_COUNT_SHIFT 24 #define _RD_COUNT_WIDTH 8 -/* Internal functions; do not use. */ -void arch_read_lock_slow(arch_rwlock_t *, u32); -int arch_read_trylock_slow(arch_rwlock_t *); -void arch_read_unlock_slow(arch_rwlock_t *); -void arch_write_lock_slow(arch_rwlock_t *, u32); -void arch_write_unlock_slow(arch_rwlock_t *, u32); - /** * arch_read_can_lock() - would read_trylock() succeed? */ @@ -104,94 +97,32 @@ static inline int arch_write_can_lock(arch_rwlock_t *rwlock) /** * arch_read_lock() - acquire a read lock. */ -static inline void arch_read_lock(arch_rwlock_t *rwlock) -{ - u32 val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val << _RD_COUNT_WIDTH)) { - arch_read_lock_slow(rwlock, val); - return; - } - rwlock->lock = val + (1 << _RD_COUNT_SHIFT); -} +void arch_read_lock(arch_rwlock_t *rwlock); /** - * arch_read_lock() - acquire a write lock. + * arch_write_lock() - acquire a write lock. */ -static inline void arch_write_lock(arch_rwlock_t *rwlock) -{ - u32 val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val != 0)) { - arch_write_lock_slow(rwlock, val); - return; - } - rwlock->lock = 1 << _WR_NEXT_SHIFT; -} +void arch_write_lock(arch_rwlock_t *rwlock); /** * arch_read_trylock() - try to acquire a read lock. */ -static inline int arch_read_trylock(arch_rwlock_t *rwlock) -{ - int locked; - u32 val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val & 1)) - return arch_read_trylock_slow(rwlock); - locked = (val << _RD_COUNT_WIDTH) == 0; - rwlock->lock = val + (locked << _RD_COUNT_SHIFT); - return locked; -} +int arch_read_trylock(arch_rwlock_t *rwlock); /** * arch_write_trylock() - try to acquire a write lock. */ -static inline int arch_write_trylock(arch_rwlock_t *rwlock) -{ - u32 val = __insn_tns((int *)&rwlock->lock); - - /* - * If a tns is in progress, or there's a waiting or active locker, - * or active readers, we can't take the lock, so give up. - */ - if (unlikely(val != 0)) { - if (!(val & 1)) - rwlock->lock = val; - return 0; - } - - /* Set the "next" field to mark it locked. */ - rwlock->lock = 1 << _WR_NEXT_SHIFT; - return 1; -} +int arch_write_trylock(arch_rwlock_t *rwlock); /** * arch_read_unlock() - release a read lock. */ -static inline void arch_read_unlock(arch_rwlock_t *rwlock) -{ - u32 val; - mb(); /* guarantee anything modified under the lock is visible */ - val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val & 1)) { - arch_read_unlock_slow(rwlock); - return; - } - rwlock->lock = val - (1 << _RD_COUNT_SHIFT); -} +void arch_read_unlock(arch_rwlock_t *rwlock); /** * arch_write_unlock() - release a write lock. */ -static inline void arch_write_unlock(arch_rwlock_t *rwlock) -{ - u32 val; - mb(); /* guarantee anything modified under the lock is visible */ - val = __insn_tns((int *)&rwlock->lock); - if (unlikely(val != (1 << _WR_NEXT_SHIFT))) { - arch_write_unlock_slow(rwlock, val); - return; - } - rwlock->lock = 0; -} +void arch_write_unlock(arch_rwlock_t *rwlock); #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h index f908473c322..4d97a2db932 100644 --- a/arch/tile/include/asm/stack.h +++ b/arch/tile/include/asm/stack.h @@ -18,13 +18,14 @@ #include <linux/types.h> #include <linux/sched.h> #include <asm/backtrace.h> +#include <asm/page.h> #include <hv/hypervisor.h> /* Everything we need to keep track of a backtrace iteration */ struct KBacktraceIterator { BacktraceIterator it; struct task_struct *task; /* task we are backtracing */ - HV_PTE *pgtable; /* page table for user space access */ + pte_t *pgtable; /* page table for user space access */ int end; /* iteration complete. */ int new_context; /* new context is starting */ int profile; /* profiling, so stop on async intrpt */ diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h index 5388850deeb..23d1842f483 100644 --- a/arch/tile/include/asm/system.h +++ b/arch/tile/include/asm/system.h @@ -90,7 +90,24 @@ #endif #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() -int __mb_incoherent(void); /* Helper routine for mb_incoherent(). */ +#include <hv/syscall_public.h> +/* + * Issue an uncacheable load to each memory controller, then + * wait until those loads have completed. + */ +static inline void __mb_incoherent(void) +{ + long clobber_r10; + asm volatile("swint2" + : "=R10" (clobber_r10) + : "R10" (HV_SYS_fence_incoherent) + : "r0", "r1", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", + "r11", "r12", "r13", "r14", + "r15", "r16", "r17", "r18", "r19", + "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29"); +} #endif /* Fence to guarantee visibility of stores to incoherent memory. */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 3872f2b345d..3405b52853b 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -68,6 +68,7 @@ struct thread_info { #else #define THREAD_SIZE_ORDER (0) #endif +#define THREAD_SIZE_PAGES (1 << THREAD_SIZE_ORDER) #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER) @@ -83,7 +84,7 @@ register unsigned long stack_pointer __asm__("sp"); ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR -extern struct thread_info *alloc_thread_info(struct task_struct *task); +extern struct thread_info *alloc_thread_info_node(struct task_struct *task, int node); extern void free_thread_info(struct thread_info *info); /* Sit on a nap instruction until interrupted. */ diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h index 3baf5fc4c0a..29921f0b86d 100644 --- a/arch/tile/include/asm/timex.h +++ b/arch/tile/include/asm/timex.h @@ -38,6 +38,9 @@ static inline cycles_t get_cycles(void) cycles_t get_clock_rate(void); +/* Convert nanoseconds to core clock cycles. */ +cycles_t ns2cycles(unsigned long nsecs); + /* Called at cpu initialization to set some low-level constants. */ void setup_clock(void); |