summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/bitops.h50
-rw-r--r--include/asm-ia64/compat.h2
-rw-r--r--include/asm-ia64/gcc_intrin.h2
-rw-r--r--include/asm-ia64/mca.h6
-rw-r--r--include/asm-ia64/mca_asm.h3
-rw-r--r--include/asm-ia64/pgalloc.h16
-rw-r--r--include/asm-ia64/processor.h5
-rw-r--r--include/asm-ia64/sal.h14
8 files changed, 50 insertions, 48 deletions
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index a1b9719f5fb..953d3df9dd2 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -122,38 +122,40 @@ clear_bit_unlock (int nr, volatile void *addr)
}
/**
- * __clear_bit_unlock - Non-atomically clear a bit with release
+ * __clear_bit_unlock - Non-atomically clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
*
- * This is like clear_bit_unlock, but the implementation uses a store
+ * Similarly to clear_bit_unlock, the implementation uses a store
* with release semantics. See also __raw_spin_unlock().
*/
static __inline__ void
-__clear_bit_unlock(int nr, volatile void *addr)
+__clear_bit_unlock(int nr, void *addr)
{
- __u32 mask, new;
- volatile __u32 *m;
+ __u32 * const m = (__u32 *) addr + (nr >> 5);
+ __u32 const new = *m & ~(1 << (nr & 31));
- m = (volatile __u32 *)addr + (nr >> 5);
- mask = ~(1 << (nr & 31));
- new = *m & mask;
- barrier();
ia64_st4_rel_nta(m, new);
}
/**
* __clear_bit - Clears a bit in memory (non-atomic version)
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
*/
static __inline__ void
__clear_bit (int nr, volatile void *addr)
{
- volatile __u32 *p = (__u32 *) addr + (nr >> 5);
- __u32 m = 1 << (nr & 31);
- *p &= ~m;
+ *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
/**
* change_bit - Toggle a bit in memory
- * @nr: Bit to clear
+ * @nr: Bit to toggle
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
@@ -178,7 +180,7 @@ change_bit (int nr, volatile void *addr)
/**
* __change_bit - Toggle a bit in memory
- * @nr: the bit to set
+ * @nr: the bit to toggle
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
@@ -197,7 +199,7 @@ __change_bit (int nr, volatile void *addr)
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_set_bit (int nr, volatile void *addr)
@@ -247,11 +249,11 @@ __test_and_set_bit (int nr, volatile void *addr)
/**
* test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_clear_bit (int nr, volatile void *addr)
@@ -272,7 +274,7 @@ test_and_clear_bit (int nr, volatile void *addr)
/**
* __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
@@ -292,11 +294,11 @@ __test_and_clear_bit(int nr, volatile void * addr)
/**
* test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_change_bit (int nr, volatile void *addr)
@@ -315,8 +317,12 @@ test_and_change_bit (int nr, volatile void *addr)
return (old & bit) != 0;
}
-/*
- * WARNING: non atomic version.
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
*/
static __inline__ int
__test_and_change_bit (int nr, void *addr)
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
index 0f6e5264ab8..dfcf75b8426 100644
--- a/include/asm-ia64/compat.h
+++ b/include/asm-ia64/compat.h
@@ -181,7 +181,7 @@ struct compat_shmid64_ds {
/*
* A pointer passed in from user mode. This should not be used for syscall parameters,
* just declare them as pointers because the syscall entry code will have appropriately
- * comverted them already.
+ * converted them already.
*/
typedef u32 compat_uptr_t;
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index 5b6665c754c..de2ed2cbdd8 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -24,7 +24,9 @@
extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void);
+#ifdef __KERNEL__
register unsigned long ia64_r13 asm ("r13") __used;
+#endif
#define ia64_setreg(regnum, val) \
({ \
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index 823553bf12e..f1663aa94a5 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -3,9 +3,9 @@
* Purpose: Machine check handling specific defines
*
* Copyright (C) 1999, 2004 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
- * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
- * Copyright (C) Russ Anderson (rja@sgi.com)
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Russ Anderson <rja@sgi.com>
*/
#ifndef _ASM_IA64_MCA_H
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h
index 76203f9a871..dd2a5b13439 100644
--- a/include/asm-ia64/mca_asm.h
+++ b/include/asm-ia64/mca_asm.h
@@ -1,8 +1,9 @@
/*
* File: mca_asm.h
+ * Purpose: Machine check handling specific defines
*
* Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 67552cad517..556d988123a 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -27,7 +27,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pgd_free(pgd_t * pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, NULL, pgd);
}
@@ -44,11 +44,11 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pud_free(pud_t * pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
quicklist_free(0, NULL, pud);
}
-#define __pud_free_tlb(tlb, pud) pud_free(pud)
+#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_4 */
static inline void
@@ -62,12 +62,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pmd_free(pmd_t * pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
quicklist_free(0, NULL, pmd);
}
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
@@ -94,12 +94,12 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
quicklist_free_page(0, NULL, pte);
}
-static inline void pte_free_kernel(pte_t * pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(0, NULL, pte);
}
@@ -109,6 +109,6 @@ static inline void check_pgt_cache(void)
quicklist_trim(0, NULL, 25, 16);
}
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index be3b0ae4327..741f7ecb986 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -31,7 +31,8 @@
* each (assuming 8KB page size), for a total of 8TB of user virtual
* address space.
*/
-#define TASK_SIZE (current->thread.task_size)
+#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
+#define TASK_SIZE TASK_SIZE_OF(current)
/*
* This decides where the kernel will search for a free chunk of vm
@@ -472,7 +473,7 @@ ia64_set_psr (__u64 psr)
{
ia64_stop();
ia64_setreg(_IA64_REG_PSR_L, psr);
- ia64_srlz_d();
+ ia64_srlz_i();
}
/*
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index 1f5412d6f9b..2251118894a 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -649,17 +649,6 @@ typedef struct err_rec {
* Now define a couple of inline functions for improved type checking
* and convenience.
*/
-static inline long
-ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
- unsigned long *drift_info)
-{
- struct ia64_sal_retval isrv;
-
- SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
- *ticks_per_second = isrv.v0;
- *drift_info = isrv.v1;
- return isrv.status;
-}
extern s64 ia64_sal_cache_flush (u64 cache_type);
extern void __init check_sal_cache_flush (void);
@@ -841,6 +830,9 @@ extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
+extern long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+ unsigned long *drift_info);
#ifdef CONFIG_HOTPLUG_CPU
/*
* System Abstraction Layer Specification