diff options
-rw-r--r-- | fs/exec.c | 6 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/sched.c | 5 | ||||
-rw-r--r-- | kernel/sched_debug.c | 8 | ||||
-rw-r--r-- | kernel/sched_stats.h | 3 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 1 | ||||
-rw-r--r-- | mm/shmem.c | 5 |
8 files changed, 23 insertions, 11 deletions
diff --git a/fs/exec.c b/fs/exec.c index 4ccaaa4b13b..282240afe99 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1780,6 +1780,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) but keep the previous behaviour for now. */ if (!ispipe && !S_ISREG(inode->i_mode)) goto close_fail; + /* + * Dont allow local users get cute and trick others to coredump + * into their pre-created files: + */ + if (inode->i_uid != current->fsuid) + goto close_fail; if (!file->f_op) goto close_fail; if (!file->f_op->write) diff --git a/include/linux/sched.h b/include/linux/sched.h index ee800e7a70d..ac3d496fbd2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -282,6 +282,10 @@ static inline void touch_all_softlockup_watchdogs(void) /* Attach to any functions which should be ignored in wchan output. */ #define __sched __attribute__((__section__(".sched.text"))) + +/* Linker adds these: start and end of __sched functions */ +extern char __sched_text_start[], __sched_text_end[]; + /* Is this address in the __sched functions? */ extern int in_sched_functions(unsigned long addr); diff --git a/kernel/sched.c b/kernel/sched.c index 38933cafea8..98dcdf272db 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5466,7 +5466,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) return table; } -static ctl_table * sd_alloc_ctl_cpu_table(int cpu) +static ctl_table *sd_alloc_ctl_cpu_table(int cpu) { struct ctl_table *entry, *table; struct sched_domain *sd; @@ -6708,9 +6708,6 @@ void __init sched_init_smp(void) int in_sched_functions(unsigned long addr) { - /* Linker adds these: start and end of __sched functions */ - extern char __sched_text_start[], __sched_text_end[]; - return in_lock_functions(addr) || (addr >= (unsigned long)__sched_text_start && addr < (unsigned long)__sched_text_end); diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 5d0d623a546..d30467b47dd 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -327,10 +327,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) avg_atom = -1LL; avg_per_cpu = p->se.sum_exec_runtime; - if (p->se.nr_migrations) - avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations); - else + if (p->se.nr_migrations) { + avg_per_cpu = div64_64(avg_per_cpu, + p->se.nr_migrations); + } else { avg_per_cpu = -1LL; + } __PN(avg_atom); __PN(avg_per_cpu); diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 630178e53bb..5b32433e7ee 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -52,7 +52,8 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->lb_nobusyq[itype], sd->lb_nobusyg[itype]); } - seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", + seq_printf(seq, + " %u %u %u %u %u %u %u %u %u %u %u %u\n", sd->alb_count, sd->alb_failed, sd->alb_pushed, sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 27a2338deb4..cb89fa8db11 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -133,6 +133,8 @@ void tick_nohz_update_jiffies(void) if (!ts->tick_stopped) return; + touch_softlockup_watchdog(); + cpu_clear(cpu, nohz_cpu_mask); now = ktime_get(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 12376ae3f73..4ffed1cd158 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -305,7 +305,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) { int i; - VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); /* * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO * and __GFP_HIGHMEM from hard or soft interrupt context. diff --git a/mm/shmem.c b/mm/shmem.c index 253d205914b..51b3d6ccdda 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1072,7 +1072,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); pvma.vm_pgoff = idx; pvma.vm_end = PAGE_SIZE; - page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); + page = alloc_page_vma(gfp, &pvma, 0); mpol_free(pvma.vm_policy); return page; } @@ -1093,7 +1093,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) static inline struct page * shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) { - return alloc_page(gfp | __GFP_ZERO); + return alloc_page(gfp); } #endif @@ -1306,6 +1306,7 @@ repeat: info->alloced++; spin_unlock(&info->lock); + clear_highpage(filepage); flush_dcache_page(filepage); SetPageUptodate(filepage); } |