diff options
author | Jes Sorensen <jes@sgi.com> | 2006-02-17 05:18:43 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2006-02-27 15:26:58 -0800 |
commit | 7aa6ba41362a7f888ad11fdcfe51ca8d92226cd3 (patch) | |
tree | 3d7b177ee966f21d1ac6e630061bdb0b256e76d8 /include/asm-ia64 | |
parent | e95a9ec1bb66e07b138861c743192f06e7b3e4de (diff) |
[IA64-SGI] SN2-XP reduce kmalloc wrapper inlining
Take advantage of kzalloc() as well as reduce the size of code generated
for the error returns in xpc_setup_infrastructure().
Signed-off-by: Jes Sorensen <jes@sgi.com>
Acked-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/sn/xpc.h | 22 |
1 files changed, 0 insertions, 22 deletions
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h index df7f5f4f3cd..aa3b8ace903 100644 --- a/include/asm-ia64/sn/xpc.h +++ b/include/asm-ia64/sn/xpc.h @@ -1227,28 +1227,6 @@ xpc_map_bte_errors(bte_result_t error) -static inline void * -xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) -{ - /* see if kmalloc will give us cachline aligned memory by default */ - *base = kmalloc(size, flags); - if (*base == NULL) { - return NULL; - } - if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { - return *base; - } - kfree(*base); - - /* nope, we'll have to do it ourselves */ - *base = kmalloc(size + L1_CACHE_BYTES, flags); - if (*base == NULL) { - return NULL; - } - return (void *) L1_CACHE_ALIGN((u64) *base); -} - - /* * Check to see if there is any channel activity to/from the specified * partition. |