summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2015-02-05 18:01:53 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2015-02-27 18:05:55 +0000
commita1e50a82256ed2f1312e70c52a84323e2e378f49 (patch)
treed1a3059412f8323c6644f2fceb7649cba84a093c
parentf6242cac10427c546271050b31c891a078e490cd (diff)
arm64: Increase the swiotlb buffer size 64MB
With commit 3690951fc6d4 (arm64: Use swiotlb late initialisation), the swiotlb buffer size is limited to MAX_ORDER_NR_PAGES. However, there are platforms with 32-bit only devices that require bounce buffering via swiotlb. This patch changes the swiotlb initialisation to an early 64MB memblock allocation. In order to get the swiotlb buffer correctly allocated (via memblock_virt_alloc_low_nopanic), this patch also defines ARCH_LOW_ADDRESS_LIMIT to the maximum physical address capable of 32-bit DMA. Reported-by: Kefeng Wang <wangkefeng.wang@huawei.com> Tested-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/mm/dma-mapping.c16
-rw-r--r--arch/arm64/mm/init.c14
3 files changed, 14 insertions, 19 deletions
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index f9be30ea1cb..20e9591a60c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -45,7 +45,8 @@
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
-#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
+extern phys_addr_t arm64_dma_phys_limit;
+#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
#endif /* __KERNEL__ */
struct debug_info {
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0a24b9b8c69..58e0c2bdde0 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -348,8 +348,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
};
-extern int swiotlb_late_init_with_default_size(size_t default_size);
-
static int __init atomic_pool_init(void)
{
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
@@ -411,21 +409,13 @@ out:
return -ENOMEM;
}
-static int __init swiotlb_late_init(void)
+static int __init arm64_dma_init(void)
{
- size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+ int ret;
dma_ops = &swiotlb_dma_ops;
- return swiotlb_late_init_with_default_size(swiotlb_size);
-}
-
-static int __init arm64_dma_init(void)
-{
- int ret = 0;
-
- ret |= swiotlb_late_init();
- ret |= atomic_pool_init();
+ ret = atomic_pool_init();
return ret;
}
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 71145f95207..ae85da6307b 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -33,6 +33,7 @@
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
+#include <linux/swiotlb.h>
#include <asm/fixmap.h>
#include <asm/memory.h>
@@ -45,6 +46,7 @@
#include "mm.h"
phys_addr_t memstart_addr __read_mostly = 0;
+phys_addr_t arm64_dma_phys_limit __read_mostly;
#ifdef CONFIG_BLK_DEV_INITRD
static int __init early_initrd(char *p)
@@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
- max_dma = PFN_DOWN(max_zone_dma_phys());
+ max_dma = PFN_DOWN(arm64_dma_phys_limit);
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
@@ -156,8 +158,6 @@ early_param("mem", early_mem);
void __init arm64_memblock_init(void)
{
- phys_addr_t dma_phys_limit = 0;
-
memblock_enforce_memory_limit(memory_limit);
/*
@@ -174,8 +174,10 @@ void __init arm64_memblock_init(void)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
- dma_phys_limit = max_zone_dma_phys();
- dma_contiguous_reserve(dma_phys_limit);
+ arm64_dma_phys_limit = max_zone_dma_phys();
+ else
+ arm64_dma_phys_limit = PHYS_MASK + 1;
+ dma_contiguous_reserve(arm64_dma_phys_limit);
memblock_allow_resize();
memblock_dump_all();
@@ -276,6 +278,8 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
+ swiotlb_init(1);
+
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
#ifndef CONFIG_SPARSEMEM_VMEMMAP