summaryrefslogtreecommitdiffstats
path: root/arch/tile/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/include/asm')
-rw-r--r--arch/tile/include/asm/atomic_32.h37
-rw-r--r--arch/tile/include/asm/backtrace.h4
-rw-r--r--arch/tile/include/asm/bitops.h9
-rw-r--r--arch/tile/include/asm/cache.h5
-rw-r--r--arch/tile/include/asm/highmem.h2
-rw-r--r--arch/tile/include/asm/page.h6
-rw-r--r--arch/tile/include/asm/scatterlist.h21
-rw-r--r--arch/tile/include/asm/setup.h8
-rw-r--r--arch/tile/include/asm/siginfo.h4
-rw-r--r--arch/tile/include/asm/uaccess.h4
10 files changed, 28 insertions, 72 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 40a5a3a876d..ed359aee883 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -255,43 +255,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
#define smp_mb__after_atomic_dec() do { } while (0)
#define smp_mb__after_atomic_inc() do { } while (0)
-
-/*
- * Support "tns" atomic integers. These are atomic integers that can
- * hold any value but "1". They are more efficient than regular atomic
- * operations because the "lock" (aka acquire) step is a single "tns"
- * in the uncontended case, and the "unlock" (aka release) step is a
- * single "store" without an mf. (However, note that on tilepro the
- * "tns" will evict the local cache line, so it's not all upside.)
- *
- * Note that you can ONLY observe the value stored in the pointer
- * using these operations; a direct read of the value may confusingly
- * return the special value "1".
- */
-
-int __tns_atomic_acquire(atomic_t *);
-void __tns_atomic_release(atomic_t *p, int v);
-
-static inline void tns_atomic_set(atomic_t *v, int i)
-{
- __tns_atomic_acquire(v);
- __tns_atomic_release(v, i);
-}
-
-static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n)
-{
- int ret = __tns_atomic_acquire(v);
- __tns_atomic_release(v, (ret == o) ? n : ret);
- return ret;
-}
-
-static inline int tns_atomic_xchg(atomic_t *v, int n)
-{
- int ret = __tns_atomic_acquire(v);
- __tns_atomic_release(v, n);
- return ret;
-}
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h
index 6970bfcad54..758ca4619d5 100644
--- a/arch/tile/include/asm/backtrace.h
+++ b/arch/tile/include/asm/backtrace.h
@@ -21,7 +21,9 @@
#include <arch/chip.h>
-#if CHIP_VA_WIDTH() > 32
+#if defined(__tile__)
+typedef unsigned long VirtualAddress;
+#elif CHIP_VA_WIDTH() > 32
typedef unsigned long long VirtualAddress;
#else
typedef unsigned int VirtualAddress;
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 84600f3514d..6832b4be899 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -98,26 +98,27 @@ static inline int fls64(__u64 w)
return (sizeof(__u64) * 8) - __builtin_clzll(w);
}
-static inline unsigned int hweight32(unsigned int w)
+static inline unsigned int __arch_hweight32(unsigned int w)
{
return __builtin_popcount(w);
}
-static inline unsigned int hweight16(unsigned int w)
+static inline unsigned int __arch_hweight16(unsigned int w)
{
return __builtin_popcount(w & 0xffff);
}
-static inline unsigned int hweight8(unsigned int w)
+static inline unsigned int __arch_hweight8(unsigned int w)
{
return __builtin_popcount(w & 0xff);
}
-static inline unsigned long hweight64(__u64 w)
+static inline unsigned long __arch_hweight64(__u64 w)
{
return __builtin_popcountll(w);
}
+#include <asm-generic/bitops/const_hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index f6101840c9e..08a2815b5e4 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -27,11 +27,10 @@
#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
/*
- * TILE-Gx is fully coherents so we don't need to define
- * ARCH_KMALLOC_MINALIGN.
+ * TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN.
*/
#ifndef __tilegx__
-#define ARCH_KMALLOC_MINALIGN L2_CACHE_BYTES
+#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
#endif
/* use the cache line size for the L2, which is where it counts */
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index efdd12e9102..d155db6fa9b 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -60,7 +60,7 @@ void *kmap_fix_kpte(struct page *page, int finished);
/* This macro is used only in map_new_virtual() to map "page". */
#define kmap_prot page_to_kpgprot(page)
-void kunmap_atomic(void *kvaddr, enum km_type type);
+void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
struct page *kmap_atomic_to_page(void *ptr);
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index f894a9016da..7d90641cf18 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -129,6 +129,11 @@ static inline u64 pmd_val(pmd_t pmd)
#endif
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ return BITS_PER_LONG - __builtin_clzl((size - 1) >> PAGE_SHIFT);
+}
+
#endif /* !__ASSEMBLY__ */
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
@@ -332,7 +337,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
-#include <asm-generic/getorder.h>
#endif /* __KERNEL__ */
diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h
index c5604242c0d..35d786fe93a 100644
--- a/arch/tile/include/asm/scatterlist.h
+++ b/arch/tile/include/asm/scatterlist.h
@@ -1,22 +1 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- */
-
-#ifndef _ASM_TILE_SCATTERLIST_H
-#define _ASM_TILE_SCATTERLIST_H
-
-#define ISA_DMA_THRESHOLD (~0UL)
-
#include <asm-generic/scatterlist.h>
-
-#endif /* _ASM_TILE_SCATTERLIST_H */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index 823ddd47ff6..7caf0f36b03 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -15,6 +15,10 @@
#ifndef _ASM_TILE_SETUP_H
#define _ASM_TILE_SETUP_H
+#define COMMAND_LINE_SIZE 2048
+
+#ifdef __KERNEL__
+
#include <linux/pfn.h>
#include <linux/init.h>
@@ -23,10 +27,10 @@
*/
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
-#define COMMAND_LINE_SIZE 2048
-
void early_panic(const char *fmt, ...);
void warn_early_printk(void);
void __init disable_early_printk(void);
+#endif /* __KERNEL__ */
+
#endif /* _ASM_TILE_SETUP_H */
diff --git a/arch/tile/include/asm/siginfo.h b/arch/tile/include/asm/siginfo.h
index 0c12d1b9ddf..56d661bb010 100644
--- a/arch/tile/include/asm/siginfo.h
+++ b/arch/tile/include/asm/siginfo.h
@@ -17,6 +17,10 @@
#define __ARCH_SI_TRAPNO
+#ifdef __LP64__
+# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#endif
+
#include <asm-generic/siginfo.h>
/*
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index ed17a80ec0e..ef34d2caa5b 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -389,14 +389,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
-extern unsigned long __copy_in_user_asm(
+extern unsigned long __copy_in_user_inatomic(
void __user *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_sleep();
- return __copy_in_user_asm(to, from, n);
+ return __copy_in_user_inatomic(to, from, n);
}
static inline unsigned long __must_check