diff options
-rw-r--r-- | arch/ia64/ia32/binfmt_elf32.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/sys_x86_64.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/mmap_64.c | 5 | ||||
-rw-r--r-- | fs/binfmt_elf.c | 7 |
4 files changed, 15 insertions, 14 deletions
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c index 2a662215359..4f0c30c38e9 100644 --- a/arch/ia64/ia32/binfmt_elf32.c +++ b/arch/ia64/ia32/binfmt_elf32.c @@ -222,7 +222,8 @@ elf32_set_personality (void) } static unsigned long -elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long unused) +elf32_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt, + int prot, int type, unsigned long unused) { unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK; diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 95485e63fd2..bd802a5e1aa 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -182,9 +182,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* check if free_area_cache is useful for us */ if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } /* either no address requested or can't fit in requested address hole */ addr = mm->free_area_cache; @@ -213,9 +213,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* remember the address as a hint for next time */ return (mm->free_area_cache = addr); - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; /* try just below the current vma->vm_start */ addr = vma->vm_start-len; @@ -229,7 +229,7 @@ bottomup: * allocations. */ mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; + mm->free_area_cache = TASK_UNMAPPED_BASE; addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); /* * Restore the topdown base: diff --git a/arch/x86/mm/mmap_64.c b/arch/x86/mm/mmap_64.c index 8cf03ea651f..65b34f226f1 100644 --- a/arch/x86/mm/mmap_64.c +++ b/arch/x86/mm/mmap_64.c @@ -100,7 +100,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) /* ia32_pick_mmap_layout has its own. */ return ia32_pick_mmap_layout(mm); #endif - } else if(mmap_is_legacy()) { + } else if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; @@ -111,7 +111,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (current->flags & PF_RANDOMIZE) rnd = -rnd; } - if (current->flags & PF_RANDOMIZE) { + if (current->flags & PF_RANDOMIZE) mm->mmap_base += ((long)rnd) << PAGE_SHIFT; - } } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 8193d24be15..b8bca1ebc1a 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -45,7 +45,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs); static int load_elf_library(struct file *); -static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long); +static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, + int, int, unsigned long); /* * If we don't support core dumping, then supply a NULL so we @@ -435,7 +436,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, load_addr = -vaddr; map_addr = elf_map(interpreter, load_addr + vaddr, - eppnt, elf_prot, elf_type, total_size); + eppnt, elf_prot, elf_type, total_size); total_size = 0; if (!*interp_map_addr) *interp_map_addr = map_addr; @@ -936,7 +937,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, - elf_prot, elf_flags,0); + elf_prot, elf_flags, 0); if (BAD_ADDR(error)) { send_sig(SIGKILL, current, 0); retval = IS_ERR((void *)error) ? |