diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/Kconfig | 16 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_base.c | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_virtio.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 19 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/thread_info.h | 5 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 24 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/dma-mapping.h | 4 | ||||
-rw-r--r-- | arch/x86/kernel/genx2apic_uv_x.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/pci-swiotlb_64.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 14 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 18 |
14 files changed, 91 insertions, 61 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 70b7645ce74..8116a3328a1 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -241,19 +241,17 @@ config PACK_STACK Say Y if you are unsure. config SMALL_STACK - bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb" - depends on PACK_STACK && !LOCKDEP + bool "Use 8kb for kernel stack instead of 16kb" + depends on PACK_STACK && 64BIT && !LOCKDEP help If you say Y here and the compiler supports the -mkernel-backchain - option the kernel will use a smaller kernel stack size. For 31 bit - the reduced size is 4kb instead of 8kb and for 64 bit it is 8kb - instead of 16kb. This allows to run more thread on a system and - reduces the pressure on the memory management for higher order - page allocations. + option the kernel will use a smaller kernel stack size. The reduced + size is 8kb instead of 16kb. This allows to run more threads on a + system and reduces the pressure on the memory management for higher + order page allocations. Say N if you are unsure. - config CHECK_STACK bool "Detect kernel stack overflow" help @@ -384,7 +382,7 @@ config IPL choice prompt "IPL method generated into head.S" depends on IPL - default IPL_TAPE + default IPL_VM help Select "tape" if you want to IPL the image from a Tape. diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index a7f8979fb92..a06a47cdd5e 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -424,7 +424,7 @@ out: */ int appldata_register_ops(struct appldata_ops *ops) { - if ((ops->size > APPLDATA_MAX_REC_SIZE) || (ops->size < 0)) + if (ops->size > APPLDATA_MAX_REC_SIZE) return -EINVAL; ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL); diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h index 146100224de..c13568b9351 100644 --- a/arch/s390/include/asm/kvm_virtio.h +++ b/arch/s390/include/asm/kvm_virtio.h @@ -52,7 +52,7 @@ struct kvm_vqconfig { #ifdef __KERNEL__ /* early virtio console setup */ -#ifdef CONFIG_VIRTIO_CONSOLE +#ifdef CONFIG_S390_GUEST extern void s390_virtio_console_init(void); #else static inline void s390_virtio_console_init(void) diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 5dd5e7b3476..d2b4ff83147 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -7,7 +7,8 @@ typedef struct { unsigned long asce_bits; unsigned long asce_limit; int noexec; - int pgstes; + int has_pgste; /* The mmu context has extended page tables */ + int alloc_pgste; /* cloned contexts will have extended page tables */ } mm_context_t; #endif diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 4c2fbf48c9c..28ec870655a 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -20,12 +20,25 @@ static inline int init_new_context(struct task_struct *tsk, #ifdef CONFIG_64BIT mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif - if (current->mm->context.pgstes) { + if (current->mm->context.alloc_pgste) { + /* + * alloc_pgste indicates, that any NEW context will be created + * with extended page tables. The old context is unchanged. The + * page table allocation and the page table operations will + * look at has_pgste to distinguish normal and extended page + * tables. The only way to create extended page tables is to + * set alloc_pgste and then create a new context (e.g. dup_mm). + * The page table allocation is called after init_new_context + * and if has_pgste is set, it will create extended page + * tables. + */ mm->context.noexec = 0; - mm->context.pgstes = 1; + mm->context.has_pgste = 1; + mm->context.alloc_pgste = 1; } else { mm->context.noexec = s390_noexec; - mm->context.pgstes = 0; + mm->context.has_pgste = 0; + mm->context.alloc_pgste = 0; } mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 1a928f84afd..7fc76133b3e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -679,7 +679,7 @@ static inline void pmd_clear(pmd_t *pmd) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if (mm->context.pgstes) + if (mm->context.has_pgste) ptep_rcp_copy(ptep); pte_val(*ptep) = _PAGE_TYPE_EMPTY; if (mm->context.noexec) @@ -763,7 +763,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, struct page *page; unsigned int skey; - if (!mm->context.pgstes) + if (!mm->context.has_pgste) return -EINVAL; rcp_lock(ptep); pgste = (unsigned long *) (ptep + PTRS_PER_PTE); @@ -794,7 +794,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, int young; unsigned long *pgste; - if (!vma->vm_mm->context.pgstes) + if (!vma->vm_mm->context.has_pgste) return 0; physpage = pte_val(*ptep) & PAGE_MASK; pgste = (unsigned long *) (ptep + PTRS_PER_PTE); @@ -844,7 +844,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) static inline void ptep_invalidate(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - if (mm->context.pgstes) { + if (mm->context.has_pgste) { rcp_lock(ptep); __ptep_ipte(address, ptep); ptep_rcp_copy(ptep); diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index de3fad60c68..c1eaf9604da 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -15,13 +15,8 @@ * Size of kernel stack for each process */ #ifndef __s390x__ -#ifndef __SMALL_STACK #define THREAD_ORDER 1 #define ASYNC_ORDER 1 -#else -#define THREAD_ORDER 0 -#define ASYNC_ORDER 0 -#endif #else /* __s390x__ */ #ifndef __SMALL_STACK #define THREAD_ORDER 2 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 9e8b1f9b8f4..b5595688a47 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -1119,9 +1119,7 @@ out: return rc; } -static ssize_t __ref rescan_store(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, +static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf, size_t count) { int rc; @@ -1129,12 +1127,10 @@ static ssize_t __ref rescan_store(struct sys_device *dev, rc = smp_rescan_cpus(); return rc ? rc : count; } -static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); +static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t dispatching_show(struct sys_device *dev, - struct sysdev_attribute *attr, - char *buf) +static ssize_t dispatching_show(struct sysdev_class *class, char *buf) { ssize_t count; @@ -1144,9 +1140,8 @@ static ssize_t dispatching_show(struct sys_device *dev, return count; } -static ssize_t dispatching_store(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, size_t count) +static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf, + size_t count) { int val, rc; char delim; @@ -1168,7 +1163,8 @@ out: put_online_cpus(); return rc ? rc : count; } -static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store); +static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, + dispatching_store); static int __init topology_init(void) { @@ -1178,13 +1174,11 @@ static int __init topology_init(void) register_cpu_notifier(&smp_cpu_nb); #ifdef CONFIG_HOTPLUG_CPU - rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, - &attr_rescan.attr); + rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan); if (rc) return rc; #endif - rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, - &attr_dispatching.attr); + rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching); if (rc) return rc; for_each_present_cpu(cpu) { diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 3d98ba82ea6..ef3635b52fc 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) unsigned long *table; unsigned long bits; - bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; + bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; spin_lock(&mm->page_table_lock); page = NULL; if (!list_empty(&mm->context.pgtable_list)) { @@ -186,7 +186,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) pgtable_page_ctor(page); page->flags &= ~FRAG_MASK; table = (unsigned long *) page_to_phys(page); - if (mm->context.pgstes) + if (mm->context.has_pgste) clear_table_pgstes(table); else clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); @@ -210,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) struct page *page; unsigned long bits; - bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL; + bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); @@ -257,7 +257,7 @@ int s390_enable_sie(void) struct mm_struct *mm, *old_mm; /* Do we have pgstes? if yes, we are done */ - if (tsk->mm->context.pgstes) + if (tsk->mm->context.has_pgste) return 0; /* lets check if we are allowed to replace the mm */ @@ -269,14 +269,14 @@ int s390_enable_sie(void) } task_unlock(tsk); - /* we copy the mm with pgstes enabled */ - tsk->mm->context.pgstes = 1; + /* we copy the mm and let dup_mm create the page tables with_pgstes */ + tsk->mm->context.alloc_pgste = 1; mm = dup_mm(tsk); - tsk->mm->context.pgstes = 0; + tsk->mm->context.alloc_pgste = 0; if (!mm) return -ENOMEM; - /* Now lets check again if somebody attached ptrace etc */ + /* Now lets check again if something happened */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 4a5397bfce2..7f225a4b2a2 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -255,9 +255,11 @@ static inline unsigned long dma_alloc_coherent_mask(struct device *dev, static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) { -#ifdef CONFIG_X86_64 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); + if (dma_mask <= DMA_24BIT_MASK) + gfp |= GFP_DMA; +#ifdef CONFIG_X86_64 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) gfp |= GFP_DMA32; #endif diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index 680a06557c5..2c7dbdb9827 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c @@ -15,7 +15,6 @@ #include <linux/ctype.h> #include <linux/init.h> #include <linux/sched.h> -#include <linux/bootmem.h> #include <linux/module.h> #include <linux/hardirq.h> #include <asm/smp.h> @@ -398,16 +397,16 @@ void __init uv_system_init(void) printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); - uv_blade_info = alloc_bootmem_pages(bytes); + uv_blade_info = kmalloc(bytes, GFP_KERNEL); get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); - uv_node_to_blade = alloc_bootmem_pages(bytes); + uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); memset(uv_node_to_blade, 255, bytes); bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); - uv_cpu_to_blade = alloc_bootmem_pages(bytes); + uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); memset(uv_cpu_to_blade, 255, bytes); blade = 0; diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c index c4ce0332759..3c539d111ab 100644 --- a/arch/x86/kernel/pci-swiotlb_64.c +++ b/arch/x86/kernel/pci-swiotlb_64.c @@ -18,9 +18,21 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size, return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); } +static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + void *vaddr; + + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); + if (vaddr) + return vaddr; + + return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); +} + struct dma_mapping_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, - .alloc_coherent = swiotlb_alloc_coherent, + .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, .map_single = swiotlb_map_single_phys, .unmap_single = swiotlb_unmap_single, diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index b8e461d4941..f79a02f64d1 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, * pagetable pages as RO. So assume someone who pre-setup * these mappings are more intelligent. */ - if (pte_val(*pte)) + if (pte_val(*pte)) { + pages++; continue; + } if (0) printk(" pte=%p addr=%lx pte=%016lx\n", @@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, * not differ with respect to page frame and * attributes. */ - if (page_size_mask & (1 << PG_LEVEL_2M)) + if (page_size_mask & (1 << PG_LEVEL_2M)) { + pages++; continue; + } new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); } @@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, * not differ with respect to page frame and * attributes. */ - if (page_size_mask & (1 << PG_LEVEL_1G)) + if (page_size_mask & (1 << PG_LEVEL_1G)) { + pages++; continue; + } prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); } @@ -831,7 +837,7 @@ int arch_add_memory(int nid, u64 start, u64 size) unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - last_mapped_pfn = init_memory_mapping(start, start + size-1); + last_mapped_pfn = init_memory_mapping(start, start + size); if (last_mapped_pfn > max_pfn_mapped) max_pfn_mapped = last_mapped_pfn; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d4d52f5a1cf..aba77b2b7d1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -246,11 +246,21 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; - pte_t *pte = lookup_address(address, &level); - unsigned offset = address & ~PAGE_MASK; + pte_t *pte; + unsigned offset; - BUG_ON(pte == NULL); + /* + * if the PFN is in the linear mapped vaddr range, we can just use + * the (quick) virt_to_machine() p2m lookup + */ + if (virt_addr_valid(vaddr)) + return virt_to_machine(vaddr); + /* otherwise we have to do a (slower) full page-table walk */ + + pte = lookup_address(address, &level); + BUG_ON(pte == NULL); + offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); } @@ -410,7 +420,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, xen_mc_batch(); - u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; + u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; u.val = pte_val_ma(pte); xen_extend_mmu_update(&u); |