summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.instrumentation49
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/capability.c113
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c24
-rw-r--r--kernel/hrtimer.c9
-rw-r--r--kernel/kallsyms.c11
-rw-r--r--kernel/kprobes.c9
-rw-r--r--kernel/latency.c280
-rw-r--r--kernel/notifier.c1
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/pm_qos_params.c425
-rw-r--r--kernel/posix-timers.c11
-rw-r--r--kernel/power/Kconfig65
-rw-r--r--kernel/power/disk.c208
-rw-r--r--kernel/power/main.c171
-rw-r--r--kernel/power/power.h90
-rw-r--r--kernel/power/snapshot.c35
-rw-r--r--kernel/power/swap.c33
-rw-r--r--kernel/power/swsusp.c48
-rw-r--r--kernel/power/user.c109
-rw-r--r--kernel/printk.c36
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/relay.c25
-rw-r--r--kernel/signal.c110
-rw-r--r--kernel/softlockup.c30
-rw-r--r--kernel/srcu.c3
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/sys.c37
-rw-r--r--kernel/sys_ni.c7
-rw-r--r--kernel/sysctl.c61
-rw-r--r--kernel/sysctl_check.c7
-rw-r--r--kernel/test_kprobes.c16
-rw-r--r--kernel/time.c13
-rw-r--r--kernel/time/clocksource.c19
-rw-r--r--kernel/timer.c10
36 files changed, 1277 insertions, 821 deletions
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation
deleted file mode 100644
index 468f47ad750..00000000000
--- a/kernel/Kconfig.instrumentation
+++ /dev/null
@@ -1,49 +0,0 @@
-menuconfig INSTRUMENTATION
- bool "Instrumentation Support"
- default y
- ---help---
- Say Y here to get to see options related to performance measurement,
- system-wide debugging, and testing. This option alone does not add any
- kernel code.
-
- If you say N, all options in this submenu will be skipped and
- disabled. If you're trying to debug the kernel itself, go see the
- Kernel Hacking menu.
-
-if INSTRUMENTATION
-
-config PROFILING
- bool "Profiling support (EXPERIMENTAL)"
- help
- Say Y here to enable the extended profiling support mechanisms used
- by profilers such as OProfile.
-
-config OPROFILE
- tristate "OProfile system profiling (EXPERIMENTAL)"
- depends on PROFILING && !UML
- depends on ARCH_SUPPORTS_OPROFILE || ALPHA || ARM || BLACKFIN || IA64 || M32R || PARISC || PPC || S390 || SUPERH || SPARC
- help
- OProfile is a profiling system capable of profiling the
- whole system, include the kernel, kernel modules, libraries,
- and applications.
-
- If unsure, say N.
-
-config KPROBES
- bool "Kprobes"
- depends on KALLSYMS && MODULES && !UML
- depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
-config MARKERS
- bool "Activate markers"
- help
- Place an empty function call at each marker site. Can be
- dynamically changed for a probe function.
-
-endif # INSTRUMENTATION
diff --git a/kernel/Makefile b/kernel/Makefile
index 8885627ea02..135a1b94344 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,8 +8,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
- utsname.o notifier.o
+ hrtimer.o rwsem.o nsproxy.o srcu.o \
+ utsname.o notifier.o ksysfs.o pm_qos_params.o
obj-$(CONFIG_SYSCTL) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -49,7 +49,6 @@ obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_SYSFS) += ksysfs.o
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
diff --git a/kernel/capability.c b/kernel/capability.c
index efbd9cdce13..39e8193b41e 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -22,6 +22,37 @@
static DEFINE_SPINLOCK(task_capability_lock);
/*
+ * Leveraged for setting/resetting capabilities
+ */
+
+const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET;
+const kernel_cap_t __cap_full_set = CAP_FULL_SET;
+const kernel_cap_t __cap_init_eff_set = CAP_INIT_EFF_SET;
+
+EXPORT_SYMBOL(__cap_empty_set);
+EXPORT_SYMBOL(__cap_full_set);
+EXPORT_SYMBOL(__cap_init_eff_set);
+
+/*
+ * More recent versions of libcap are available from:
+ *
+ * http://www.kernel.org/pub/linux/libs/security/linux-privs/
+ */
+
+static void warn_legacy_capability_use(void)
+{
+ static int warned;
+ if (!warned) {
+ char name[sizeof(current->comm)];
+
+ printk(KERN_INFO "warning: `%s' uses 32-bit capabilities"
+ " (legacy support in use)\n",
+ get_task_comm(name, current));
+ warned = 1;
+ }
+}
+
+/*
* For sys_getproccap() and sys_setproccap(), any of the three
* capability set pointers may be NULL -- indicating that that set is
* uninteresting and/or not to be changed.
@@ -42,12 +73,21 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
pid_t pid;
__u32 version;
struct task_struct *target;
- struct __user_cap_data_struct data;
+ unsigned tocopy;
+ kernel_cap_t pE, pI, pP;
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -71,14 +111,47 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
} else
target = current;
- ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
+ ret = security_capget(target, &pE, &pI, &pP);
out:
read_unlock(&tasklist_lock);
spin_unlock(&task_capability_lock);
- if (!ret && copy_to_user(dataptr, &data, sizeof data))
- return -EFAULT;
+ if (!ret) {
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i;
+
+ for (i = 0; i < tocopy; i++) {
+ kdata[i].effective = pE.cap[i];
+ kdata[i].permitted = pP.cap[i];
+ kdata[i].inheritable = pI.cap[i];
+ }
+
+ /*
+ * Note, in the case, tocopy < _LINUX_CAPABILITY_U32S,
+ * we silently drop the upper capabilities here. This
+ * has the effect of making older libcap
+ * implementations implicitly drop upper capability
+ * bits when they perform a: capget/modify/capset
+ * sequence.
+ *
+ * This behavior is considered fail-safe
+ * behavior. Upgrading the application to a newer
+ * version of libcap will enable access to the newer
+ * capabilities.
+ *
+ * An alternative would be to return an error here
+ * (-ERANGE), but that causes legacy applications to
+ * unexpectidly fail; the capget/modify/capset aborts
+ * before modification is attempted and the application
+ * fails.
+ */
+
+ if (copy_to_user(dataptr, kdata, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
+ return -EFAULT;
+ }
+ }
return ret;
}
@@ -167,6 +240,8 @@ static inline int cap_set_all(kernel_cap_t *effective,
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i, tocopy;
kernel_cap_t inheritable, permitted, effective;
__u32 version;
struct task_struct *target;
@@ -176,7 +251,15 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -188,10 +271,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP))
return -EPERM;
- if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
- copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
- copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
+ if (copy_from_user(&kdata, data, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
return -EFAULT;
+ }
+
+ for (i = 0; i < tocopy; i++) {
+ effective.cap[i] = kdata[i].effective;
+ permitted.cap[i] = kdata[i].permitted;
+ inheritable.cap[i] = kdata[i].inheritable;
+ }
+ while (i < _LINUX_CAPABILITY_U32S) {
+ effective.cap[i] = 0;
+ permitted.cap[i] = 0;
+ inheritable.cap[i] = 0;
+ i++;
+ }
spin_lock(&task_capability_lock);
read_lock(&tasklist_lock);
diff --git a/kernel/exit.c b/kernel/exit.c
index bfb1c0e940e..eb9934a82fc 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -50,8 +50,6 @@
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
-extern void sem_exit (void);
-
static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p)
@@ -1085,11 +1083,12 @@ do_group_exit(int exit_code)
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
- if (sig->flags & SIGNAL_GROUP_EXIT)
+ if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
+ sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
@@ -1591,8 +1590,6 @@ repeat:
goto repeat;
if (retval != 0) /* He released the lock. */
goto end;
- } else if (p->exit_state == EXIT_DEAD) {
- continue;
} else if (p->exit_state == EXIT_ZOMBIE) {
/*
* Eligible but we cannot release it yet:
@@ -1607,7 +1604,7 @@ repeat:
/* He released the lock. */
if (retval != 0)
goto end;
- } else {
+ } else if (p->exit_state != EXIT_DEAD) {
check_continued:
/*
* It's running now, so it might later
diff --git a/kernel/fork.c b/kernel/fork.c
index 05e0b6f4365..3995297567a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -325,7 +325,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm)
static inline void mm_free_pgd(struct mm_struct * mm)
{
- pgd_free(mm->pgd);
+ pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
@@ -1118,6 +1118,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_SECURITY
p->security = NULL;
#endif
+ p->cap_bset = current->cap_bset;
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
@@ -1398,7 +1399,7 @@ fork_out:
return ERR_PTR(retval);
}
-noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
+noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
@@ -1450,6 +1451,23 @@ long do_fork(unsigned long clone_flags,
int trace = 0;
long nr;
+ /*
+ * We hope to recycle these flags after 2.6.26
+ */
+ if (unlikely(clone_flags & CLONE_STOPPED)) {
+ static int __read_mostly count = 100;
+
+ if (count > 0 && printk_ratelimit()) {
+ char comm[TASK_COMM_LEN];
+
+ count--;
+ printk(KERN_INFO "fork(): process `%s' used deprecated "
+ "clone flags 0x%lx\n",
+ get_task_comm(comm, current),
+ clone_flags & CLONE_STOPPED);
+ }
+ }
+
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
@@ -1492,7 +1510,7 @@ long do_fork(unsigned long clone_flags,
if (!(clone_flags & CLONE_STOPPED))
wake_up_new_task(p, clone_flags);
else
- p->state = TASK_STOPPED;
+ __set_task_state(p, TASK_STOPPED);
if (unlikely (trace)) {
current->ptrace_message = nr;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1069998fe25..668f3967eb3 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -306,7 +306,7 @@ EXPORT_SYMBOL_GPL(ktime_sub_ns);
/*
* Divide a ktime value by a nanosecond value
*/
-unsigned long ktime_divns(const ktime_t kt, s64 div)
+u64 ktime_divns(const ktime_t kt, s64 div)
{
u64 dclc, inc, dns;
int sft = 0;
@@ -321,7 +321,7 @@ unsigned long ktime_divns(const ktime_t kt, s64 div)
dclc >>= sft;
do_div(dclc, (unsigned long) div);
- return (unsigned long) dclc;
+ return dclc;
}
#endif /* BITS_PER_LONG >= 64 */
@@ -656,10 +656,9 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
* Forward the timer expiry so it will expire in the future.
* Returns the number of overruns.
*/
-unsigned long
-hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
- unsigned long orun = 1;
+ u64 orun = 1;
ktime_t delta;
delta = ktime_sub(now, timer->expires);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 7dadc71ce51..f091d13def0 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -53,14 +53,6 @@ static inline int is_kernel_inittext(unsigned long addr)
return 0;
}
-static inline int is_kernel_extratext(unsigned long addr)
-{
- if (addr >= (unsigned long)_sextratext
- && addr <= (unsigned long)_eextratext)
- return 1;
- return 0;
-}
-
static inline int is_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext)
@@ -80,8 +72,7 @@ static int is_ksym_addr(unsigned long addr)
if (all_var)
return is_kernel(addr);
- return is_kernel_text(addr) || is_kernel_inittext(addr) ||
- is_kernel_extratext(addr);
+ return is_kernel_text(addr) || is_kernel_inittext(addr);
}
/* expand a compressed symbol data into the resulting uncompressed string,
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d0493eafea3..7a86e643233 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -699,6 +699,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
struct kretprobe_instance, uflist);
ri->rp = rp;
ri->task = current;
+
+ if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ return 0;
+ }
+
arch_prepare_kretprobe(ri, regs);
/* XXX(hch): why is there no hlist_move_head? */
@@ -745,7 +751,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
INIT_HLIST_HEAD(&rp->used_instances);
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
- inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
+ inst = kmalloc(sizeof(struct kretprobe_instance) +
+ rp->data_size, GFP_KERNEL);
if (inst == NULL) {
free_rp_inst(rp);
return -ENOMEM;
diff --git a/kernel/latency.c b/kernel/latency.c
deleted file mode 100644
index e63fcacb61a..00000000000
--- a/kernel/latency.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * latency.c: Explicit system-wide latency-expectation infrastructure
- *
- * The purpose of this infrastructure is to allow device drivers to set
- * latency constraint they have and to collect and summarize these
- * expectations globally. The cummulated result can then be used by
- * power management and similar users to make decisions that have
- * tradoffs with a latency component.
- *
- * An example user of this are the x86 C-states; each higher C state saves
- * more power, but has a higher exit latency. For the idle loop power
- * code to make a good decision which C-state to use, information about
- * acceptable latencies is required.
- *
- * An example announcer of latency is an audio driver that knowns it
- * will get an interrupt when the hardware has 200 usec of samples
- * left in the DMA buffer; in that case the driver can set a latency
- * constraint of, say, 150 usec.
- *
- * Multiple drivers can each announce their maximum accepted latency,
- * to keep these appart, a string based identifier is used.
- *
- *
- * (C) Copyright 2006 Intel Corporation
- * Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/latency.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/jiffies.h>
-#include <asm/atomic.h>
-
-struct latency_info {
- struct list_head list;
- int usecs;
- char *identifier;
-};
-
-/*
- * locking rule: all modifications to current_max_latency and
- * latency_list need to be done while holding the latency_lock.
- * latency_lock needs to be taken _irqsave.
- */
-static atomic_t current_max_latency;
-static DEFINE_SPINLOCK(latency_lock);
-
-static LIST_HEAD(latency_list);
-static BLOCKING_NOTIFIER_HEAD(latency_notifier);
-
-/*
- * This function returns the maximum latency allowed, which
- * happens to be the minimum of all maximum latencies on the
- * list.
- */
-static int __find_max_latency(void)
-{
- int min = INFINITE_LATENCY;
- struct latency_info *info;
-
- list_for_each_entry(info, &latency_list, list) {
- if (info->usecs < min)
- min = info->usecs;
- }
- return min;
-}
-
-/**
- * set_acceptable_latency - sets the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function sleeps and can only be called from process
- * context.
- * Calling this function with an existing identifier is valid
- * and will cause the existing latency setting to be changed.
- */
-void set_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *info, *iter;
- unsigned long flags;
- int found_old = 0;
-
- info = kzalloc(sizeof(struct latency_info), GFP_KERNEL);
- if (!info)
- return;
- info->usecs = usecs;
- info->identifier = kstrdup(identifier, GFP_KERNEL);
- if (!info->identifier)
- goto free_info;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier)==0) {
- found_old = 1;
- iter->usecs = usecs;
- break;
- }
- }
- if (!found_old)
- list_add(&info->list, &latency_list);
-
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
-
- spin_unlock_irqrestore(&latency_lock, flags);
-
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-
- /*
- * if we inserted the new one, we're done; otherwise there was
- * an existing one so we need to free the redundant data
- */
- if (!found_old)
- return;
-
- kfree(info->identifier);
-free_info:
- kfree(info);
-}
-EXPORT_SYMBOL_GPL(set_acceptable_latency);
-
-/**
- * modify_acceptable_latency - changes the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- *
- * Due to the atomic nature of this function, the modified latency
- * value will only be used for future decisions; past decisions
- * can still lead to longer latencies in the near future.
- */
-void modify_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *iter;
- unsigned long flags;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- iter->usecs = usecs;
- break;
- }
- }
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(modify_acceptable_latency);
-
-/**
- * remove_acceptable_latency - removes the maximum latency acceptable
- * @identifier: string that identifies this driver
- *
- * This function removes a previously set maximum latency setting
- * for the driver and frees up any resources associated with the
- * bookkeeping needed for this.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- */
-void remove_acceptable_latency(char *identifier)
-{
- unsigned long flags;
- int newmax = 0;
- struct latency_info *iter, *temp;
-
- spin_lock_irqsave(&latency_lock, flags);
-
- list_for_each_entry_safe(iter, temp, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- list_del(&iter->list);
- newmax = iter->usecs;
- kfree(iter->identifier);
- kfree(iter);
- break;
- }
- }
-
- /* If we just deleted the system wide value, we need to
- * recalculate with a full search
- */
- if (newmax == atomic_read(&current_max_latency)) {
- newmax = __find_max_latency();
- atomic_set(&current_max_latency, newmax);
- }
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(remove_acceptable_latency);
-
-/**
- * system_latency_constraint - queries the system wide latency maximum
- *
- * This function returns the system wide maximum latency in
- * microseconds.
- *
- * This function does not sleep and can be called in any context.
- */
-int system_latency_constraint(void)
-{
- return atomic_read(&current_max_latency);
-}
-EXPORT_SYMBOL_GPL(system_latency_constraint);
-
-/**
- * synchronize_acceptable_latency - recalculates all latency decisions
- *
- * This function will cause a callback to various kernel pieces that
- * will make those pieces rethink their latency decisions. This implies
- * that if there are overlong latencies in hardware state already, those
- * latencies get taken right now. When this call completes no overlong
- * latency decisions should be active anymore.
- *
- * Typical usecase of this is after a modify_acceptable_latency() call,
- * which in itself is non-blocking and non-synchronizing.
- *
- * This function blocks and should not be called with locks held.
- */
-
-void synchronize_acceptable_latency(void)
-{
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-}
-EXPORT_SYMBOL_GPL(synchronize_acceptable_latency);
-
-/*
- * Latency notifier: this notifier gets called when a non-atomic new
- * latency value gets set. The expectation nof the caller of the
- * non-atomic set is that when the call returns, future latencies
- * are within bounds, so the functions on the notifier list are
- * expected to take the overlong latencies immediately, inside the
- * callback, and not make a overlong latency decision anymore.
- *
- * The callback gets called when the new latency value is made
- * active so system_latency_constraint() returns the new latency.
- */
-int register_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_register(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(register_latency_notifier);
-
-int unregister_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_unregister(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_latency_notifier);
-
-static __init int latency_init(void)
-{
- atomic_set(&current_max_latency, INFINITE_LATENCY);
- /*
- * we don't want by default to have longer latencies than 2 ticks,
- * since that would cause lost ticks
- */
- set_acceptable_latency("kernel", 2*1000000/HZ);
- return 0;
-}
-
-module_init(latency_init);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4253f472f06..643360d1bb1 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -4,6 +4,7 @@
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
+#include <linux/reboot.h>
/*
* Notifier list for kernel code which wants to be called
diff --git a/kernel/params.c b/kernel/params.c
index 42fe5e6126c..e28c70628bb 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -272,7 +272,7 @@ static int param_array(const char *name,
unsigned int min, unsigned int max,
void *elem, int elemsize,
int (*set)(const char *, struct kernel_param *kp),
- int *num)
+ unsigned int *num)
{
int ret;
struct kernel_param kp;
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
new file mode 100644
index 00000000000..0afe32be4c8
--- /dev/null
+++ b/kernel/pm_qos_params.c
@@ -0,0 +1,425 @@
+/*
+ * This module exposes the interface to kernel space for specifying
+ * QoS dependencies. It provides infrastructure for registration of:
+ *
+ * Dependents on a QoS value : register requirements
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ *
+ * There are 3 basic classes of QoS parameter: latency, timeout, throughput
+ * each have defined units:
+ * latency: usec
+ * timeout: usec <-- currently not used.
+ * throughput: kbs (kilo byte / sec)
+ *
+ * There are lists of pm_qos_objects each one wrapping requirements, notifiers
+ *
+ * User mode requirements on a QOS parameter register themselves to the
+ * subsystem by opening the device node /dev/... and writing there request to
+ * the node. As long as the process holds a file handle open to the node the
+ * client continues to be accounted for. Upon file release the usermode
+ * requirement is removed and a new qos target is computed. This way when the
+ * requirement that the application has is cleaned up when closes the file
+ * pointer or exits the pm_qos_object will get an opportunity to clean up.
+ *
+ * mark gross mgross@linux.intel.com
+ */
+
+#include <linux/pm_qos_params.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+
+#include <linux/uaccess.h>
+
+/*
+ * locking rule: all changes to target_value or requirements or notifiers lists
+ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct requirement_list {
+ struct list_head list;
+ union {
+ s32 value;
+ s32 usec;
+ s32 kbps;
+ };
+ char *name;
+};
+
+static s32 max_compare(s32 v1, s32 v2);
+static s32 min_compare(s32 v1, s32 v2);
+
+struct pm_qos_object {
+ struct requirement_list requirements;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice pm_qos_power_miscdev;
+ char *name;
+ s32 default_value;
+ s32 target_value;
+ s32 (*comparitor)(s32, s32);
+};
+
+static struct pm_qos_object null_pm_qos;
+static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+static struct pm_qos_object cpu_dma_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(cpu_dma_pm_qos.requirements.list)},
+ .notifiers = &cpu_dma_lat_notifier,
+ .name = "cpu_dma_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+static struct pm_qos_object network_lat_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(network_lat_pm_qos.requirements.list)},
+ .notifiers = &network_lat_notifier,
+ .name = "network_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+static struct pm_qos_object network_throughput_pm_qos = {
+ .requirements =
+ {LIST_HEAD_INIT(network_throughput_pm_qos.requirements.list)},
+ .notifiers = &network_throughput_notifier,
+ .name = "network_throughput",
+ .default_value = 0,
+ .target_value = 0,
+ .comparitor = max_compare
+};
+
+
+static struct pm_qos_object *pm_qos_array[] = {
+ &null_pm_qos,
+ &cpu_dma_pm_qos,
+ &network_lat_pm_qos,
+ &network_throughput_pm_qos
+};
+
+static DEFINE_SPINLOCK(pm_qos_lock);
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos);
+static int pm_qos_power_open(struct inode *inode, struct file *filp);
+static int pm_qos_power_release(struct inode *inode, struct file *filp);
+
+static const struct file_operations pm_qos_power_fops = {
+ .write = pm_qos_power_write,
+ .open = pm_qos_power_open,
+ .release = pm_qos_power_release,
+};
+
+/* static helper functions */
+static s32 max_compare(s32 v1, s32 v2)
+{
+ return max(v1, v2);
+}
+
+static s32 min_compare(s32 v1, s32 v2)
+{
+ return min(v1, v2);
+}
+
+
+static void update_target(int target)
+{
+ s32 extreme_value;
+ struct requirement_list *node;
+ unsigned long flags;
+ int call_notifier = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ extreme_value = pm_qos_array[target]->default_value;
+ list_for_each_entry(node,
+ &pm_qos_array[target]->requirements.list, list) {
+ extreme_value = pm_qos_array[target]->comparitor(
+ extreme_value, node->value);
+ }
+ if (pm_qos_array[target]->target_value != extreme_value) {
+ call_notifier = 1;
+ pm_qos_array[target]->target_value = extreme_value;
+ pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
+ pm_qos_array[target]->target_value);
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ if (call_notifier)
+ blocking_notifier_call_chain(pm_qos_array[target]->notifiers,
+ (unsigned long) extreme_value, NULL);
+}
+
+static int register_pm_qos_misc(struct pm_qos_object *qos)
+{
+ qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->pm_qos_power_miscdev.name = qos->name;
+ qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
+
+ return misc_register(&qos->pm_qos_power_miscdev);
+}
+
+static int find_pm_qos_object_by_minor(int minor)
+{
+ int pm_qos_class;
+
+ for (pm_qos_class = 0;
+ pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
+ if (minor ==
+ pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
+ return pm_qos_class;
+ }
+ return -1;
+}
+
+/**
+ * pm_qos_requirement - returns current system wide qos expectation
+ * @pm_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value in an atomic manner.
+ */
+int pm_qos_requirement(int pm_qos_class)
+{
+ int ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ ret_val = pm_qos_array[pm_qos_class]->target_value;
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ return ret_val;
+}
+EXPORT_SYMBOL_GPL(pm_qos_requirement);
+
+/**
+ * pm_qos_add_requirement - inserts new qos request into the list
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the pm_qos_class list of requested qos
+ * performance charactoistics. It recomputes the agregate QoS expectations for
+ * the pm_qos_class of parrameters.
+ */
+int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
+{
+ struct requirement_list *dep;
+ unsigned long flags;
+
+ dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ if (dep) {
+ if (value == PM_QOS_DEFAULT_VALUE)
+ dep->value = pm_qos_array[pm_qos_class]->default_value;
+ else
+ dep->value = value;
+ dep->name = kstrdup(name, GFP_KERNEL);
+ if (!dep->name)
+ goto cleanup;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_add(&dep->list,
+ &pm_qos_array[pm_qos_class]->requirements.list);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ update_target(pm_qos_class);
+
+ return 0;
+ }
+
+cleanup:
+ kfree(dep);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
+
+/**
+ * pm_qos_update_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * Updates an existing qos requierement for the pm_qos_class of parameters along
+ * with updating the target pm_qos_class value.
+ *
+ * If the named request isn't in the lest then no change is made.
+ */
+int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ if (new_value == PM_QOS_DEFAULT_VALUE)
+ node->value =
+ pm_qos_array[pm_qos_class]->default_value;
+ else
+ node->value = new_value;
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
+
+/**
+ * pm_qos_remove_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ *
+ * Will remove named qos request from pm_qos_class list of parrameters and
+ * recompute the current target value for the pm_qos_class.
+ */
+void pm_qos_remove_requirement(int pm_qos_class, char *name)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ kfree(node->name);
+ list_del(&node->list);
+ kfree(node);
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
+
+/**
+ * pm_qos_add_notifier - sets notification entry for changes to target value
+ * @pm_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
+
+/**
+ * pm_qos_remove_notifier - deletes notification entry from chain.
+ * @pm_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
+
+#define PID_NAME_LEN sizeof("process_1234567890")
+static char name[PID_NAME_LEN];
+
+static int pm_qos_power_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ long pm_qos_class;
+
+ pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
+ if (pm_qos_class >= 0) {
+ filp->private_data = (void *)pm_qos_class;
+ sprintf(name, "process_%d", current->pid);
+ ret = pm_qos_add_requirement(pm_qos_class, name,
+ PM_QOS_DEFAULT_VALUE);
+ if (ret >= 0)
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int pm_qos_power_release(struct inode *inode, struct file *filp)
+{
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_remove_requirement(pm_qos_class, name);
+
+ return 0;
+}
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ if (count != sizeof(s32))
+ return -EINVAL;
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_update_requirement(pm_qos_class, name, value);
+
+ return sizeof(s32);
+}
+
+
+static int __init pm_qos_power_init(void)
+{
+ int ret = 0;
+
+ ret = register_pm_qos_misc(&cpu_dma_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_lat_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_throughput_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: network_throughput setup failed\n");
+
+ return ret;
+}
+
+late_initcall(pm_qos_power_init);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 35b4bbfc78f..122d5c787fe 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -256,8 +256,9 @@ static void schedule_next_timer(struct k_itimer *timr)
if (timr->it.real.interval.tv64 == 0)
return;
- timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
- timr->it.real.interval);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer,
+ timer->base->get_time(),
+ timr->it.real.interval);
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1;
@@ -386,7 +387,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
now = ktime_add(now, kj);
}
#endif
- timr->it_overrun +=
+ timr->it_overrun += (unsigned int)
hrtimer_forward(timer, now,
timr->it.real.interval);
ret = HRTIMER_RESTART;
@@ -493,7 +494,7 @@ sys_timer_create(const clockid_t which_clock,
goto retry;
else if (error) {
/*
- * Wierd looking, but we return EAGAIN if the IDR is
+ * Weird looking, but we return EAGAIN if the IDR is
* full (proper POSIX return value for this)
*/
error = -EAGAIN;
@@ -662,7 +663,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
*/
if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
- timr->it_overrun += hrtimer_forward(timer, now, iv);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
remaining = ktime_sub(timer->expires, now);
/* Return 0 only, when the timer is expired and not pending */
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 8e186c67814..ef9b802738a 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -44,9 +44,30 @@ config PM_VERBOSE
---help---
This option enables verbose messages from the Power Management code.
+config CAN_PM_TRACE
+ def_bool y
+ depends on PM_DEBUG && PM_SLEEP && EXPERIMENTAL
+
config PM_TRACE
+ bool
+ help
+ This enables code to save the last PM event point across
+ reboot. The architecture needs to support this, x86 for
+ example does by saving things in the RTC, see below.
+
+ The architecture specific code must provide the extern
+ functions from <linux/resume-trace.h> as well as the
+ <asm/resume-trace.h> header with a TRACE_RESUME() macro.
+
+ The way the information is presented is architecture-
+ dependent, x86 will print the information during a
+ late_initcall.
+
+config PM_TRACE_RTC
bool "Suspend/resume event tracing"
- depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL
+ depends on CAN_PM_TRACE
+ depends on X86
+ select PM_TRACE
default n
---help---
This enables some cheesy code to save the last PM event point in the
@@ -63,7 +84,8 @@ config PM_TRACE
config PM_SLEEP_SMP
bool
- depends on SUSPEND_SMP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
+ depends on SMP
+ depends on ARCH_SUSPEND_POSSIBLE || ARCH_HIBERNATION_POSSIBLE
depends on PM_SLEEP
select HOTPLUG_CPU
default y
@@ -73,46 +95,29 @@ config PM_SLEEP
depends on SUSPEND || HIBERNATION
default y
-config SUSPEND_UP_POSSIBLE
- bool
- depends on (X86 && !X86_VOYAGER) || PPC || ARM || BLACKFIN || MIPS \
- || SUPERH || FRV
- depends on !SMP
- default y
-
-config SUSPEND_SMP_POSSIBLE
- bool
- depends on (X86 && !X86_VOYAGER) \
- || (PPC && (PPC_PSERIES || PPC_PMAC)) || ARM
- depends on SMP
- default y
-
config SUSPEND
bool "Suspend to RAM and standby"
- depends on PM
- depends on SUSPEND_UP_POSSIBLE || SUSPEND_SMP_POSSIBLE
+ depends on PM && ARCH_SUSPEND_POSSIBLE
default y
---help---
Allow the system to enter sleep states in which main memory is
powered and thus its contents are preserved, such as the
- suspend-to-RAM state (i.e. the ACPI S3 state).
+ suspend-to-RAM state (e.g. the ACPI S3 state).
-config HIBERNATION_UP_POSSIBLE
- bool
- depends on X86 || PPC64_SWSUSP || PPC32
- depends on !SMP
+config SUSPEND_FREEZER
+ bool "Enable freezer for suspend to RAM/standby" \
+ if ARCH_WANTS_FREEZER_CONTROL || BROKEN
+ depends on SUSPEND
default y
+ help
+ This allows you to turn off the freezer for suspend. If this is
+ done, no tasks are frozen for suspend to RAM/standby.
-config HIBERNATION_SMP_POSSIBLE
- bool
- depends on (X86 && !X86_VOYAGER) || PPC64_SWSUSP
- depends on SMP
- default y
+ Turning OFF this setting is NOT recommended! If in doubt, say Y.
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
- depends on PM && SWAP
- depends on HIBERNATION_UP_POSSIBLE || HIBERNATION_SMP_POSSIBLE
+ depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index b138b431e27..859a8e59773 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -26,7 +26,7 @@
static int noresume = 0;
-char resume_file[256] = CONFIG_PM_STD_PARTITION;
+static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
@@ -54,8 +54,8 @@ static struct platform_hibernation_ops *hibernation_ops;
void hibernation_set_ops(struct platform_hibernation_ops *ops)
{
- if (ops && !(ops->start && ops->pre_snapshot && ops->finish
- && ops->prepare && ops->enter && ops->pre_restore
+ if (ops && !(ops->begin && ops->end && ops->pre_snapshot
+ && ops->prepare && ops->finish && ops->enter && ops->pre_restore
&& ops->restore_cleanup)) {
WARN_ON(1);
return;
@@ -70,15 +70,55 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
mutex_unlock(&pm_mutex);
}
+#ifdef CONFIG_PM_DEBUG
+static void hibernation_debug_sleep(void)
+{
+ printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n");
+ mdelay(5000);
+}
+
+static int hibernation_testmode(int mode)
+{
+ if (hibernation_mode == mode) {
+ hibernation_debug_sleep();
+ return 1;
+ }
+ return 0;
+}
+
+static int hibernation_test(int level)
+{
+ if (pm_test_level == level) {
+ hibernation_debug_sleep();
+ return 1;
+ }
+ return 0;
+}
+#else /* !CONFIG_PM_DEBUG */
+static int hibernation_testmode(int mode) { return 0; }
+static int hibernation_test(int level) { return 0; }
+#endif /* !CONFIG_PM_DEBUG */
+
/**
- * platform_start - tell the platform driver that we're starting
+ * platform_begin - tell the platform driver that we're starting
* hibernation
*/
-static int platform_start(int platform_mode)
+static int platform_begin(int platform_mode)
{
return (platform_mode && hibernation_ops) ?
- hibernation_ops->start() : 0;
+ hibernation_ops->begin() : 0;
+}
+
+/**
+ * platform_end - tell the platform driver that we've entered the
+ * working state
+ */
+
+static void platform_end(int platform_mode)
+{
+ if (platform_mode && hibernation_ops)
+ hibernation_ops->end();
}
/**
@@ -145,7 +185,7 @@ static void platform_restore_cleanup(int platform_mode)
* reappears in this routine after a restore.
*/
-int create_image(int platform_mode)
+static int create_image(int platform_mode)
{
int error;
@@ -162,19 +202,25 @@ int create_image(int platform_mode)
*/
error = device_power_down(PMSG_FREEZE);
if (error) {
- printk(KERN_ERR "Some devices failed to power down, "
- KERN_ERR "aborting suspend\n");
+ printk(KERN_ERR "PM: Some devices failed to power down, "
+ "aborting hibernation\n");
goto Enable_irqs;
}
+ if (hibernation_test(TEST_CORE))
+ goto Power_up;
+
+ in_suspend = 1;
save_processor_state();
error = swsusp_arch_suspend();
if (error)
- printk(KERN_ERR "Error %d while creating the image\n", error);
+ printk(KERN_ERR "PM: Error %d creating hibernation image\n",
+ error);
/* Restore control flow magically appears here */
restore_processor_state();
if (!in_suspend)
platform_leave(platform_mode);
+ Power_up:
/* NOTE: device_power_up() is just a resume() for devices
* that suspended with irqs off ... no overall powerup.
*/
@@ -202,36 +248,90 @@ int hibernation_snapshot(int platform_mode)
if (error)
return error;
- error = platform_start(platform_mode);
+ error = platform_begin(platform_mode);
if (error)
- return error;
+ goto Close;
suspend_console();
error = device_suspend(PMSG_FREEZE);
if (error)
goto Resume_console;
- error = platform_pre_snapshot(platform_mode);
- if (error)
+ if (hibernation_test(TEST_DEVICES))
goto Resume_devices;
+ error = platform_pre_snapshot(platform_mode);
+ if (error || hibernation_test(TEST_PLATFORM))
+ goto Finish;
+
error = disable_nonboot_cpus();
if (!error) {
- if (hibernation_mode != HIBERNATION_TEST) {
- in_suspend = 1;
- error = create_image(platform_mode);
- /* Control returns here after successful restore */
- } else {
- printk("swsusp debug: Waiting for 5 seconds.\n");
- mdelay(5000);
- }
+ if (hibernation_test(TEST_CPUS))
+ goto Enable_cpus;
+
+ if (hibernation_testmode(HIBERNATION_TEST))
+ goto Enable_cpus;
+
+ error = create_image(platform_mode);
+ /* Control returns here after successful restore */
}
+ Enable_cpus:
enable_nonboot_cpus();
- Resume_devices:
+ Finish:
platform_finish(platform_mode);
+ Resume_devices:
device_resume();
Resume_console:
resume_console();
+ Close:
+ platform_end(platform_mode);
+ return error;
+}
+
+/**
+ * resume_target_kernel - prepare devices that need to be suspended with
+ * interrupts off, restore the contents of highmem that have not been
+ * restored yet from the image and run the low level code that will restore
+ * the remaining contents of memory and switch to the just restored target
+ * kernel.
+ */
+
+static int resume_target_kernel(void)
+{
+ int error;
+
+ local_irq_disable();
+ error = device_power_down(PMSG_PRETHAW);
+ if (error) {
+ printk(KERN_ERR "PM: Some devices failed to power down, "
+ "aborting resume\n");
+ goto Enable_irqs;
+ }
+ /* We'll ignore saved state, but this gets preempt count (etc) right */
+ save_processor_state();
+ error = restore_highmem();
+ if (!error) {
+ error = swsusp_arch_resume();
+ /*
+ * The code below is only ever reached in case of a failure.
+ * Otherwise execution continues at place where
+ * swsusp_arch_suspend() was called
+ */
+ BUG_ON(!error);
+ /* This call to restore_highmem() undos the previous one */
+ restore_highmem();
+ }
+ /*
+ * The only reason why swsusp_arch_resume() can fail is memory being
+ * very tight, so we have to free it as soon as we can to avoid
+ * subsequent failures
+ */
+ swsusp_free();
+ restore_processor_state();
+ touch_softlockup_watchdog();
+ device_power_up();
+ Enable_irqs:
+ local_irq_enable();
return error;
}
@@ -258,7 +358,7 @@ int hibernation_restore(int platform_mode)
if (!error) {
error = disable_nonboot_cpus();
if (!error)
- error = swsusp_resume();
+ error = resume_target_kernel();
enable_nonboot_cpus();
}
platform_restore_cleanup(platform_mode);
@@ -286,9 +386,9 @@ int hibernation_platform_enter(void)
* hibernation_ops->finish() before saving the image, so we should let
* the firmware know that we're going to enter the sleep state after all
*/
- error = hibernation_ops->start();
+ error = hibernation_ops->begin();
if (error)
- return error;
+ goto Close;
suspend_console();
error = device_suspend(PMSG_SUSPEND);
@@ -322,6 +422,8 @@ int hibernation_platform_enter(void)
device_resume();
Resume_console:
resume_console();
+ Close:
+ hibernation_ops->end();
return error;
}
@@ -352,24 +454,17 @@ static void power_down(void)
* Valid image is on the disk, if we continue we risk serious data
* corruption after resume.
*/
- printk(KERN_CRIT "Please power me down manually\n");
+ printk(KERN_CRIT "PM: Please power down manually\n");
while(1);
}
-static void unprepare_processes(void)
-{
- thaw_processes();
- pm_restore_console();
-}
-
static int prepare_processes(void)
{
int error = 0;
- pm_prepare_console();
if (freeze_processes()) {
error = -EBUSY;
- unprepare_processes();
+ thaw_processes();
}
return error;
}
@@ -389,6 +484,7 @@ int hibernate(void)
goto Unlock;
}
+ pm_prepare_console();
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (error)
goto Exit;
@@ -398,7 +494,7 @@ int hibernate(void)
if (error)
goto Exit;
- printk("Syncing filesystems ... ");
+ printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
@@ -406,11 +502,12 @@ int hibernate(void)
if (error)
goto Finish;
- if (hibernation_mode == HIBERNATION_TESTPROC) {
- printk("swsusp debug: Waiting for 5 seconds.\n");
- mdelay(5000);
+ if (hibernation_test(TEST_FREEZER))
goto Thaw;
- }
+
+ if (hibernation_testmode(HIBERNATION_TESTPROC))
+ goto Thaw;
+
error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
if (in_suspend && !error) {
unsigned int flags = 0;
@@ -427,11 +524,12 @@ int hibernate(void)
swsusp_free();
}
Thaw:
- unprepare_processes();
+ thaw_processes();
Finish:
free_basic_memory_bitmaps();
Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION);
+ pm_restore_console();
atomic_inc(&snapshot_device_available);
Unlock:
mutex_unlock(&pm_mutex);
@@ -473,22 +571,23 @@ static int software_resume(void)
return -ENOENT;
}
swsusp_resume_device = name_to_dev_t(resume_file);
- pr_debug("swsusp: Resume From Partition %s\n", resume_file);
+ pr_debug("PM: Resume from partition %s\n", resume_file);
} else {
- pr_debug("swsusp: Resume From Partition %d:%d\n",
- MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
+ pr_debug("PM: Resume from partition %d:%d\n",
+ MAJOR(swsusp_resume_device),
+ MINOR(swsusp_resume_device));
}
if (noresume) {
/**
- * FIXME: If noresume is specified, we need to find the partition
- * and reset it back to normal swap space.
+ * FIXME: If noresume is specified, we need to find the
+ * partition and reset it back to normal swap space.
*/
mutex_unlock(&pm_mutex);
return 0;
}
- pr_debug("PM: Checking swsusp image.\n");
+ pr_debug("PM: Checking hibernation image.\n");
error = swsusp_check();
if (error)
goto Unlock;
@@ -499,6 +598,11 @@ static int software_resume(void)
goto Unlock;
}
+ pm_prepare_console();
+ error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
+ if (error)
+ goto Finish;
+
error = create_basic_memory_bitmaps();
if (error)
goto Finish;
@@ -510,7 +614,7 @@ static int software_resume(void)
goto Done;
}
- pr_debug("PM: Reading swsusp image.\n");
+ pr_debug("PM: Reading hibernation image.\n");
error = swsusp_read(&flags);
if (!error)
@@ -518,10 +622,12 @@ static int software_resume(void)
printk(KERN_ERR "PM: Restore failed, recovering.\n");
swsusp_free();
- unprepare_processes();
+ thaw_processes();
Done:
free_basic_memory_bitmaps();
Finish:
+ pm_notifier_call_chain(PM_POST_RESTORE);
+ pm_restore_console();
atomic_inc(&snapshot_device_available);
/* For success case, the suspend path will release the lock */
Unlock:
@@ -636,7 +742,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
error = -EINVAL;
if (!error)
- pr_debug("PM: suspend-to-disk mode set to '%s'\n",
+ pr_debug("PM: Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
mutex_unlock(&pm_mutex);
return error ? error : n;
@@ -668,7 +774,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
mutex_lock(&pm_mutex);
swsusp_resume_device = res;
mutex_unlock(&pm_mutex);
- printk("Attempting manual resume\n");
+ printk(KERN_INFO "PM: Starting manual resume from disk\n");
noresume = 0;
software_resume();
ret = n;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index efc08360e62..6a6d5eb3524 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -24,13 +24,112 @@
#include "power.h"
-BLOCKING_NOTIFIER_HEAD(pm_chain_head);
-
DEFINE_MUTEX(pm_mutex);
unsigned int pm_flags;
EXPORT_SYMBOL(pm_flags);
+#ifdef CONFIG_PM_SLEEP
+
+/* Routines for PM-transition notifications */
+
+static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
+
+int register_pm_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&pm_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(register_pm_notifier);
+
+int unregister_pm_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&pm_chain_head, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_pm_notifier);
+
+int pm_notifier_call_chain(unsigned long val)
+{
+ return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
+ == NOTIFY_BAD) ? -EINVAL : 0;
+}
+
+#ifdef CONFIG_PM_DEBUG
+int pm_test_level = TEST_NONE;
+
+static int suspend_test(int level)
+{
+ if (pm_test_level == level) {
+ printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
+ mdelay(5000);
+ return 1;
+ }
+ return 0;
+}
+
+static const char * const pm_tests[__TEST_AFTER_LAST] = {
+ [TEST_NONE] = "none",
+ [TEST_CORE] = "core",
+ [TEST_CPUS] = "processors",
+ [TEST_PLATFORM] = "platform",
+ [TEST_DEVICES] = "devices",
+ [TEST_FREEZER] = "freezer",
+};
+
+static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ char *s = buf;
+ int level;
+
+ for (level = TEST_FIRST; level <= TEST_MAX; level++)
+ if (pm_tests[level]) {
+ if (level == pm_test_level)
+ s += sprintf(s, "[%s] ", pm_tests[level]);
+ else
+ s += sprintf(s, "%s ", pm_tests[level]);
+ }
+
+ if (s != buf)
+ /* convert the last space to a newline */
+ *(s-1) = '\n';
+
+ return (s - buf);
+}
+
+static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ const char * const *s;
+ int level;
+ char *p;
+ int len;
+ int error = -EINVAL;
+
+ p = memchr(buf, '\n', n);
+ len = p ? p - buf : n;
+
+ mutex_lock(&pm_mutex);
+
+ level = TEST_FIRST;
+ for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
+ if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
+ pm_test_level = level;
+ error = 0;
+ break;
+ }
+
+ mutex_unlock(&pm_mutex);
+
+ return error ? error : n;
+}
+
+power_attr(pm_test);
+#else /* !CONFIG_PM_DEBUG */
+static inline int suspend_test(int level) { return 0; }
+#endif /* !CONFIG_PM_DEBUG */
+
+#endif /* CONFIG_PM_SLEEP */
+
#ifdef CONFIG_SUSPEND
/* This is just an arbitrary number */
@@ -76,13 +175,13 @@ static int suspend_prepare(void)
if (!suspend_ops || !suspend_ops->enter)
return -EPERM;
+ pm_prepare_console();
+
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
goto Finish;
- pm_prepare_console();
-
- if (freeze_processes()) {
+ if (suspend_freeze_processes()) {
error = -EAGAIN;
goto Thaw;
}
@@ -100,10 +199,10 @@ static int suspend_prepare(void)
return 0;
Thaw:
- thaw_processes();
- pm_restore_console();
+ suspend_thaw_processes();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
+ pm_restore_console();
return error;
}
@@ -133,10 +232,13 @@ static int suspend_enter(suspend_state_t state)
BUG_ON(!irqs_disabled());
if ((error = device_power_down(PMSG_SUSPEND))) {
- printk(KERN_ERR "Some devices failed to power down\n");
+ printk(KERN_ERR "PM: Some devices failed to power down\n");
goto Done;
}
- error = suspend_ops->enter(state);
+
+ if (!suspend_test(TEST_CORE))
+ error = suspend_ops->enter(state);
+
device_power_up();
Done:
arch_suspend_enable_irqs();
@@ -145,8 +247,8 @@ static int suspend_enter(suspend_state_t state)
}
/**
- * suspend_devices_and_enter - suspend devices and enter the desired system sleep
- * state.
+ * suspend_devices_and_enter - suspend devices and enter the desired system
+ * sleep state.
* @state: state to enter
*/
int suspend_devices_and_enter(suspend_state_t state)
@@ -156,33 +258,45 @@ int suspend_devices_and_enter(suspend_state_t state)
if (!suspend_ops)
return -ENOSYS;
- if (suspend_ops->set_target) {
- error = suspend_ops->set_target(state);
+ if (suspend_ops->begin) {
+ error = suspend_ops->begin(state);
if (error)
- return error;
+ goto Close;
}
suspend_console();
error = device_suspend(PMSG_SUSPEND);
if (error) {
- printk(KERN_ERR "Some devices failed to suspend\n");
+ printk(KERN_ERR "PM: Some devices failed to suspend\n");
goto Resume_console;
}
+
+ if (suspend_test(TEST_DEVICES))
+ goto Resume_devices;
+
if (suspend_ops->prepare) {
error = suspend_ops->prepare();
if (error)
goto Resume_devices;
}
+
+ if (suspend_test(TEST_PLATFORM))
+ goto Finish;
+
error = disable_nonboot_cpus();
- if (!error)
+ if (!error && !suspend_test(TEST_CPUS))
suspend_enter(state);
enable_nonboot_cpus();
+ Finish:
if (suspend_ops->finish)
suspend_ops->finish();
Resume_devices:
device_resume();
Resume_console:
resume_console();
+ Close:
+ if (suspend_ops->end)
+ suspend_ops->end();
return error;
}
@@ -194,9 +308,9 @@ int suspend_devices_and_enter(suspend_state_t state)
*/
static void suspend_finish(void)
{
- thaw_processes();
- pm_restore_console();
+ suspend_thaw_processes();
pm_notifier_call_chain(PM_POST_SUSPEND);
+ pm_restore_console();
}
@@ -238,17 +352,22 @@ static int enter_state(suspend_state_t state)
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
- printk("Syncing filesystems ... ");
+ printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
- if ((error = suspend_prepare()))
+ error = suspend_prepare();
+ if (error)
goto Unlock;
+ if (suspend_test(TEST_FREEZER))
+ goto Finish;
+
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
error = suspend_devices_and_enter(state);
+ Finish:
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
@@ -369,18 +488,18 @@ pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
}
power_attr(pm_trace);
+#endif /* CONFIG_PM_TRACE */
static struct attribute * g[] = {
&state_attr.attr,
+#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
+#endif
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_DEBUG)
+ &pm_test_attr.attr,
+#endif
NULL,
};
-#else
-static struct attribute * g[] = {
- &state_attr.attr,
- NULL,
-};
-#endif /* CONFIG_PM_TRACE */
static struct attribute_group attr_group = {
.attrs = g,
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 2093c3a9a99..700f44ec840 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -1,5 +1,7 @@
#include <linux/suspend.h>
+#include <linux/suspend_ioctls.h>
#include <linux/utsname.h>
+#include <linux/freezer.h>
struct swsusp_info {
struct new_utsname uts;
@@ -128,42 +130,12 @@ struct snapshot_handle {
#define data_of(handle) ((handle).buffer + (handle).buf_offset)
extern unsigned int snapshot_additional_pages(struct zone *zone);
+extern unsigned long snapshot_get_image_size(void);
extern int snapshot_read_next(struct snapshot_handle *handle, size_t count);
extern int snapshot_write_next(struct snapshot_handle *handle, size_t count);
extern void snapshot_write_finalize(struct snapshot_handle *handle);
extern int snapshot_image_loaded(struct snapshot_handle *handle);
-/*
- * This structure is used to pass the values needed for the identification
- * of the resume swap area from a user space to the kernel via the
- * SNAPSHOT_SET_SWAP_AREA ioctl
- */
-struct resume_swap_area {
- loff_t offset;
- u_int32_t dev;
-} __attribute__((packed));
-
-#define SNAPSHOT_IOC_MAGIC '3'
-#define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1)
-#define SNAPSHOT_UNFREEZE _IO(SNAPSHOT_IOC_MAGIC, 2)
-#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
-#define SNAPSHOT_ATOMIC_RESTORE _IO(SNAPSHOT_IOC_MAGIC, 4)
-#define SNAPSHOT_FREE _IO(SNAPSHOT_IOC_MAGIC, 5)
-#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
-#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
-#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
-#define SNAPSHOT_FREE_SWAP_PAGES _IO(SNAPSHOT_IOC_MAGIC, 9)
-#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
-#define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11)
-#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
-#define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \
- struct resume_swap_area)
-#define SNAPSHOT_IOC_MAXNR 13
-
-#define PMOPS_PREPARE 1
-#define PMOPS_ENTER 2
-#define PMOPS_FINISH 3
-
/* If unset, the snapshot device cannot be open. */
extern atomic_t snapshot_device_available;
@@ -181,7 +153,6 @@ extern int swsusp_swap_in_use(void);
extern int swsusp_check(void);
extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
-extern int swsusp_resume(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
extern void swsusp_close(void);
@@ -201,11 +172,56 @@ static inline int suspend_devices_and_enter(suspend_state_t state)
}
#endif /* !CONFIG_SUSPEND */
-/* kernel/power/common.c */
-extern struct blocking_notifier_head pm_chain_head;
+#ifdef CONFIG_PM_SLEEP
+/* kernel/power/main.c */
+extern int pm_notifier_call_chain(unsigned long val);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+unsigned int count_highmem_pages(void);
+int restore_highmem(void);
+#else
+static inline unsigned int count_highmem_pages(void) { return 0; }
+static inline int restore_highmem(void) { return 0; }
+#endif
+
+/*
+ * Suspend test levels
+ */
+enum {
+ /* keep first */
+ TEST_NONE,
+ TEST_CORE,
+ TEST_CPUS,
+ TEST_PLATFORM,
+ TEST_DEVICES,
+ TEST_FREEZER,
+ /* keep last */
+ __TEST_AFTER_LAST
+};
+
+#define TEST_FIRST TEST_NONE
+#define TEST_MAX (__TEST_AFTER_LAST - 1)
+
+extern int pm_test_level;
+
+#ifdef CONFIG_SUSPEND_FREEZER
+static inline int suspend_freeze_processes(void)
+{
+ return freeze_processes();
+}
-static inline int pm_notifier_call_chain(unsigned long val)
+static inline void suspend_thaw_processes(void)
{
- return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ thaw_processes();
}
+#else
+static inline int suspend_freeze_processes(void)
+{
+ return 0;
+}
+
+static inline void suspend_thaw_processes(void)
+{
+}
+#endif
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 78039b477d2..95250d7c8d9 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -635,7 +635,7 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions);
Report:
- printk("swsusp: Registered nosave memory region: %016lx - %016lx\n",
+ printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
}
@@ -704,7 +704,7 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
list_for_each_entry(region, &nosave_regions, list) {
unsigned long pfn;
- printk("swsusp: Marking nosave pages: %016lx - %016lx\n",
+ pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
region->start_pfn << PAGE_SHIFT,
region->end_pfn << PAGE_SHIFT);
@@ -749,7 +749,7 @@ int create_basic_memory_bitmaps(void)
free_pages_map = bm2;
mark_nosave_pages(forbidden_pages_map);
- printk("swsusp: Basic memory bitmaps created\n");
+ pr_debug("PM: Basic memory bitmaps created\n");
return 0;
@@ -784,7 +784,7 @@ void free_basic_memory_bitmaps(void)
memory_bm_free(bm2, PG_UNSAFE_CLEAR);
kfree(bm2);
- printk("swsusp: Basic memory bitmaps freed\n");
+ pr_debug("PM: Basic memory bitmaps freed\n");
}
/**
@@ -872,7 +872,6 @@ unsigned int count_highmem_pages(void)
}
#else
static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
-static inline unsigned int count_highmem_pages(void) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
@@ -1089,7 +1088,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
}
nr_pages += count_pages_for_highmem(nr_highmem);
- pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
+ pr_debug("PM: Normal pages needed: %u + %u + %u, available pages: %u\n",
nr_pages, PAGES_FOR_IO, meta, free);
return free > nr_pages + PAGES_FOR_IO + meta;
@@ -1202,27 +1201,27 @@ asmlinkage int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
- printk("swsusp: critical section: \n");
+ printk(KERN_INFO "PM: Creating hibernation image: \n");
- drain_local_pages();
+ drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
- printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
+ printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
- printk(KERN_ERR "swsusp: Not enough free memory\n");
+ printk(KERN_ERR "PM: Not enough free memory\n");
return -ENOMEM;
}
if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
- printk(KERN_ERR "swsusp: Memory allocation failed\n");
+ printk(KERN_ERR "PM: Memory allocation failed\n");
return -ENOMEM;
}
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
- drain_local_pages();
+ drain_local_pages(NULL);
copy_data_pages(&copy_bm, &orig_bm);
/*
@@ -1235,7 +1234,8 @@ asmlinkage int swsusp_save(void)
nr_copy_pages = nr_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- printk("swsusp: critical section: done (%d pages copied)\n", nr_pages);
+ printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
+ nr_pages);
return 0;
}
@@ -1264,12 +1264,17 @@ static char *check_image_kernel(struct swsusp_info *info)
}
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
+unsigned long snapshot_get_image_size(void)
+{
+ return nr_copy_pages + nr_meta_pages + 1;
+}
+
static int init_header(struct swsusp_info *info)
{
memset(info, 0, sizeof(struct swsusp_info));
info->num_physpages = num_physpages;
info->image_pages = nr_copy_pages;
- info->pages = nr_copy_pages + nr_meta_pages + 1;
+ info->pages = snapshot_get_image_size();
info->size = info->pages;
info->size <<= PAGE_SHIFT;
return init_header_complete(info);
@@ -1429,7 +1434,7 @@ static int check_header(struct swsusp_info *info)
if (!reason && info->num_physpages != num_physpages)
reason = "memory size";
if (reason) {
- printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
+ printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
return -EPERM;
}
return 0;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 917aba10057..a0abf9a463f 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -28,8 +28,6 @@
#include "power.h"
-extern char resume_file[];
-
#define SWSUSP_SIG "S1SUSPEND"
struct swsusp_header {
@@ -73,7 +71,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
bio->bi_end_io = end_swap_bio_read;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- printk("swsusp: ERROR: adding page to bio at %ld\n", page_off);
+ printk(KERN_ERR "PM: Adding page to bio failed at %ld\n",
+ page_off);
bio_put(bio);
return -EFAULT;
}
@@ -153,7 +152,7 @@ static int mark_swapfiles(sector_t start, unsigned int flags)
error = bio_write_page(swsusp_resume_block,
swsusp_header, NULL);
} else {
- printk(KERN_ERR "swsusp: Swap header not found!\n");
+ printk(KERN_ERR "PM: Swap header not found!\n");
error = -ENODEV;
}
return error;
@@ -325,7 +324,8 @@ static int save_image(struct swap_map_handle *handle,
struct timeval start;
struct timeval stop;
- printk("Saving image data pages (%u pages) ... ", nr_to_write);
+ printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ",
+ nr_to_write);
m = nr_to_write / 100;
if (!m)
m = 1;
@@ -365,7 +365,7 @@ static int enough_swap(unsigned int nr_pages)
{
unsigned int free_swap = count_swap_pages(root_swap, 1);
- pr_debug("swsusp: free swap pages: %u\n", free_swap);
+ pr_debug("PM: Free swap pages: %u\n", free_swap);
return free_swap > nr_pages + PAGES_FOR_IO;
}
@@ -388,7 +388,7 @@ int swsusp_write(unsigned int flags)
error = swsusp_swap_check();
if (error) {
- printk(KERN_ERR "swsusp: Cannot find swap device, try "
+ printk(KERN_ERR "PM: Cannot find swap device, try "
"swapon -a.\n");
return error;
}
@@ -402,7 +402,7 @@ int swsusp_write(unsigned int flags)
}
header = (struct swsusp_info *)data_of(snapshot);
if (!enough_swap(header->pages)) {
- printk(KERN_ERR "swsusp: Not enough free swap\n");
+ printk(KERN_ERR "PM: Not enough free swap\n");
error = -ENOSPC;
goto out;
}
@@ -417,7 +417,7 @@ int swsusp_write(unsigned int flags)
if (!error) {
flush_swap_writer(&handle);
- printk("S");
+ printk(KERN_INFO "PM: S");
error = mark_swapfiles(start, flags);
printk("|\n");
}
@@ -507,7 +507,8 @@ static int load_image(struct swap_map_handle *handle,
int err2;
unsigned nr_pages;
- printk("Loading image data pages (%u pages) ... ", nr_to_read);
+ printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ",
+ nr_to_read);
m = nr_to_read / 100;
if (!m)
m = 1;
@@ -558,7 +559,7 @@ int swsusp_read(unsigned int *flags_p)
*flags_p = swsusp_header->flags;
if (IS_ERR(resume_bdev)) {
- pr_debug("swsusp: block device not initialised\n");
+ pr_debug("PM: Image device not initialised\n");
return PTR_ERR(resume_bdev);
}
@@ -577,9 +578,9 @@ int swsusp_read(unsigned int *flags_p)
blkdev_put(resume_bdev);
if (!error)
- pr_debug("swsusp: Reading resume file was successful\n");
+ pr_debug("PM: Image successfully loaded\n");
else
- pr_debug("swsusp: Error %d resuming\n", error);
+ pr_debug("PM: Error %d resuming\n", error);
return error;
}
@@ -611,13 +612,13 @@ int swsusp_check(void)
if (error)
blkdev_put(resume_bdev);
else
- pr_debug("swsusp: Signature found, resuming\n");
+ pr_debug("PM: Signature found, resuming\n");
} else {
error = PTR_ERR(resume_bdev);
}
if (error)
- pr_debug("swsusp: Error %d check for resume file\n", error);
+ pr_debug("PM: Error %d checking image file\n", error);
return error;
}
@@ -629,7 +630,7 @@ int swsusp_check(void)
void swsusp_close(void)
{
if (IS_ERR(resume_bdev)) {
- pr_debug("swsusp: block device not initialised\n");
+ pr_debug("PM: Image device not initialised\n");
return;
}
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index e1722d3155f..023ff2a31d8 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -64,14 +64,6 @@ unsigned long image_size = 500 * 1024 * 1024;
int in_suspend __nosavedata = 0;
-#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void);
-int restore_highmem(void);
-#else
-static inline int restore_highmem(void) { return 0; }
-static inline unsigned int count_highmem_pages(void) { return 0; }
-#endif
-
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
@@ -196,7 +188,8 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
centisecs = 1; /* avoid div-by-zero */
k = nr_pages * (PAGE_SIZE / 1024);
kps = (k * 100) / centisecs;
- printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
+ printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
+ msg, k,
centisecs / 100, centisecs % 100,
kps / 1000, (kps % 1000) / 10);
}
@@ -227,7 +220,7 @@ int swsusp_shrink_memory(void)
char *p = "-\\|/";
struct timeval start, stop;
- printk("Shrinking memory... ");
+ printk(KERN_INFO "PM: Shrinking memory... ");
do_gettimeofday(&start);
do {
long size, highmem_size;
@@ -269,38 +262,3 @@ int swsusp_shrink_memory(void)
return 0;
}
-
-int swsusp_resume(void)
-{
- int error;
-
- local_irq_disable();
- /* NOTE: device_power_down() is just a suspend() with irqs off;
- * it has no special "power things down" semantics
- */
- if (device_power_down(PMSG_PRETHAW))
- printk(KERN_ERR "Some devices failed to power down, very bad\n");
- /* We'll ignore saved state, but this gets preempt count (etc) right */
- save_processor_state();
- error = restore_highmem();
- if (!error) {
- error = swsusp_arch_resume();
- /* The code below is only ever reached in case of a failure.
- * Otherwise execution continues at place where
- * swsusp_arch_suspend() was called
- */
- BUG_ON(!error);
- /* This call to restore_highmem() undos the previous one */
- restore_highmem();
- }
- /* The only reason why swsusp_arch_resume() can fail is memory being
- * very tight, so we have to free it as soon as we can to avoid
- * subsequent failures
- */
- swsusp_free();
- restore_processor_state();
- touch_softlockup_watchdog();
- device_power_up();
- local_irq_enable();
- return error;
-}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 5bd321bcbb7..f5512cb3aa8 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -28,6 +28,29 @@
#include "power.h"
+/*
+ * NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
+ * will be removed in the future. They are only preserved here for
+ * compatibility with existing userland utilities.
+ */
+#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
+#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
+
+#define PMOPS_PREPARE 1
+#define PMOPS_ENTER 2
+#define PMOPS_FINISH 3
+
+/*
+ * NOTE: The following ioctl definitions are wrong and have been replaced with
+ * correct ones. They are only preserved here for compatibility with existing
+ * userland utilities and will be removed in the future.
+ */
+#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
+#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
+#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
+#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
+
+
#define SNAPSHOT_MINOR 231
static struct snapshot_data {
@@ -36,7 +59,7 @@ static struct snapshot_data {
int mode;
char frozen;
char ready;
- char platform_suspend;
+ char platform_support;
} snapshot_state;
atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -44,6 +67,7 @@ atomic_t snapshot_device_available = ATOMIC_INIT(1);
static int snapshot_open(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
+ int error;
if (!atomic_add_unless(&snapshot_device_available, -1, 0))
return -EBUSY;
@@ -64,13 +88,23 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = swsusp_resume_device ?
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
+ error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
+ if (error)
+ pm_notifier_call_chain(PM_POST_RESTORE);
} else {
data->swap = -1;
data->mode = O_WRONLY;
+ error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
+ if (error)
+ pm_notifier_call_chain(PM_POST_HIBERNATION);
+ }
+ if (error) {
+ atomic_inc(&snapshot_device_available);
+ return error;
}
data->frozen = 0;
data->ready = 0;
- data->platform_suspend = 0;
+ data->platform_support = 0;
return 0;
}
@@ -88,6 +122,8 @@ static int snapshot_release(struct inode *inode, struct file *filp)
thaw_processes();
mutex_unlock(&pm_mutex);
}
+ pm_notifier_call_chain(data->mode == O_WRONLY ?
+ PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
return 0;
}
@@ -133,7 +169,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
{
int error = 0;
struct snapshot_data *data;
- loff_t avail;
+ loff_t size;
sector_t offset;
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
@@ -151,18 +187,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
if (data->frozen)
break;
mutex_lock(&pm_mutex);
- error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
- if (!error) {
- printk("Syncing filesystems ... ");
- sys_sync();
- printk("done.\n");
-
- error = freeze_processes();
- if (error)
- thaw_processes();
- }
+ printk("Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
+ error = freeze_processes();
if (error)
- pm_notifier_call_chain(PM_POST_HIBERNATION);
+ thaw_processes();
mutex_unlock(&pm_mutex);
if (!error)
data->frozen = 1;
@@ -173,19 +204,19 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
break;
mutex_lock(&pm_mutex);
thaw_processes();
- pm_notifier_call_chain(PM_POST_HIBERNATION);
mutex_unlock(&pm_mutex);
data->frozen = 0;
break;
+ case SNAPSHOT_CREATE_IMAGE:
case SNAPSHOT_ATOMIC_SNAPSHOT:
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
error = -EPERM;
break;
}
- error = hibernation_snapshot(data->platform_suspend);
+ error = hibernation_snapshot(data->platform_support);
if (!error)
- error = put_user(in_suspend, (unsigned int __user *)arg);
+ error = put_user(in_suspend, (int __user *)arg);
if (!error)
data->ready = 1;
break;
@@ -197,7 +228,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
error = -EPERM;
break;
}
- error = hibernation_restore(data->platform_suspend);
+ error = hibernation_restore(data->platform_support);
break;
case SNAPSHOT_FREE:
@@ -206,16 +237,29 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
data->ready = 0;
break;
+ case SNAPSHOT_PREF_IMAGE_SIZE:
case SNAPSHOT_SET_IMAGE_SIZE:
image_size = arg;
break;
+ case SNAPSHOT_GET_IMAGE_SIZE:
+ if (!data->ready) {
+ error = -ENODATA;
+ break;
+ }
+ size = snapshot_get_image_size();
+ size <<= PAGE_SHIFT;
+ error = put_user(size, (loff_t __user *)arg);
+ break;
+
+ case SNAPSHOT_AVAIL_SWAP_SIZE:
case SNAPSHOT_AVAIL_SWAP:
- avail = count_swap_pages(data->swap, 1);
- avail <<= PAGE_SHIFT;
- error = put_user(avail, (loff_t __user *)arg);
+ size = count_swap_pages(data->swap, 1);
+ size <<= PAGE_SHIFT;
+ error = put_user(size, (loff_t __user *)arg);
break;
+ case SNAPSHOT_ALLOC_SWAP_PAGE:
case SNAPSHOT_GET_SWAP_PAGE:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
@@ -224,7 +268,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
offset = alloc_swapdev_block(data->swap);
if (offset) {
offset <<= PAGE_SHIFT;
- error = put_user(offset, (sector_t __user *)arg);
+ error = put_user(offset, (loff_t __user *)arg);
} else {
error = -ENOSPC;
}
@@ -238,7 +282,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
free_all_swap_pages(data->swap);
break;
- case SNAPSHOT_SET_SWAP_FILE:
+ case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
if (!swsusp_swap_in_use()) {
/*
* User space encodes device types as two-byte values,
@@ -275,26 +319,33 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
mutex_unlock(&pm_mutex);
break;
- case SNAPSHOT_PMOPS:
+ case SNAPSHOT_PLATFORM_SUPPORT:
+ data->platform_support = !!arg;
+ break;
+
+ case SNAPSHOT_POWER_OFF:
+ if (data->platform_support)
+ error = hibernation_platform_enter();
+ break;
+
+ case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
error = -EINVAL;
switch (arg) {
case PMOPS_PREPARE:
- data->platform_suspend = 1;
+ data->platform_support = 1;
error = 0;
break;
case PMOPS_ENTER:
- if (data->platform_suspend)
+ if (data->platform_support)
error = hibernation_platform_enter();
-
break;
case PMOPS_FINISH:
- if (data->platform_suspend)
+ if (data->platform_support)
error = 0;
-
break;
default:
diff --git a/kernel/printk.c b/kernel/printk.c
index 29ae1e99cde..4a090621f37 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -93,16 +93,16 @@ static int console_locked, console_suspended;
*/
static DEFINE_SPINLOCK(logbuf_lock);
-#define LOG_BUF_MASK (log_buf_len-1)
+#define LOG_BUF_MASK (log_buf_len-1)
#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
/*
* The indices into log_buf are not constrained to log_buf_len - they
* must be masked before subscripting
*/
-static unsigned long log_start; /* Index into log_buf: next char to be read by syslog() */
-static unsigned long con_start; /* Index into log_buf: next char to be sent to consoles */
-static unsigned long log_end; /* Index into log_buf: most-recently-written-char + 1 */
+static unsigned log_start; /* Index into log_buf: next char to be read by syslog() */
+static unsigned con_start; /* Index into log_buf: next char to be sent to consoles */
+static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */
/*
* Array of consoles built from command line options (console=)
@@ -128,17 +128,17 @@ static int console_may_schedule;
static char __log_buf[__LOG_BUF_LEN];
static char *log_buf = __log_buf;
static int log_buf_len = __LOG_BUF_LEN;
-static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */
+static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
static int __init log_buf_len_setup(char *str)
{
- unsigned long size = memparse(str, &str);
+ unsigned size = memparse(str, &str);
unsigned long flags;
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len) {
- unsigned long start, dest_idx, offset;
+ unsigned start, dest_idx, offset;
char *new_log_buf;
new_log_buf = alloc_bootmem(size);
@@ -295,7 +295,7 @@ int log_buf_read(int idx)
*/
int do_syslog(int type, char __user *buf, int len)
{
- unsigned long i, j, limit, count;
+ unsigned i, j, limit, count;
int do_clear = 0;
char c;
int error = 0;
@@ -436,7 +436,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
/*
* Call the console drivers on a range of log_buf
*/
-static void __call_console_drivers(unsigned long start, unsigned long end)
+static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
@@ -463,8 +463,8 @@ early_param("ignore_loglevel", ignore_loglevel_setup);
/*
* Write out chars from start to end - 1 inclusive
*/
-static void _call_console_drivers(unsigned long start,
- unsigned long end, int msg_log_level)
+static void _call_console_drivers(unsigned start,
+ unsigned end, int msg_log_level)
{
if ((msg_log_level < console_loglevel || ignore_loglevel) &&
console_drivers && start != end) {
@@ -484,12 +484,12 @@ static void _call_console_drivers(unsigned long start,
* log_buf[start] to log_buf[end - 1].
* The console_sem must be held.
*/
-static void call_console_drivers(unsigned long start, unsigned long end)
+static void call_console_drivers(unsigned start, unsigned end)
{
- unsigned long cur_index, start_print;
+ unsigned cur_index, start_print;
static int msg_level = -1;
- BUG_ON(((long)(start - end)) > 0);
+ BUG_ON(((int)(start - end)) > 0);
cur_index = start;
start_print = start;
@@ -790,7 +790,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
return -ENOSYS;
}
-static void call_console_drivers(unsigned long start, unsigned long end)
+static void call_console_drivers(unsigned start, unsigned end)
{
}
@@ -983,8 +983,8 @@ void wake_up_klogd(void)
void release_console_sem(void)
{
unsigned long flags;
- unsigned long _con_start, _log_end;
- unsigned long wake_klogd = 0;
+ unsigned _con_start, _log_end;
+ unsigned wake_klogd = 0;
if (console_suspended) {
up(&secondary_console_sem);
@@ -1275,7 +1275,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
{
static DEFINE_SPINLOCK(ratelimit_lock);
- static unsigned long toks = 10 * 5 * HZ;
+ static unsigned toks = 10 * 5 * HZ;
static unsigned long last_msg;
static int missed;
unsigned long flags;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b0d4ab4dfd3..628b03ab88a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -20,6 +20,7 @@
#include <linux/signal.h>
#include <linux/audit.h>
#include <linux/pid_namespace.h>
+#include <linux/syscalls.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -53,7 +54,7 @@ void ptrace_untrace(struct task_struct *child)
spin_lock(&child->sighand->siglock);
if (task_is_traced(child)) {
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
- child->state = TASK_STOPPED;
+ __set_task_state(child, TASK_STOPPED);
} else {
signal_wake_up(child, 1);
}
@@ -103,18 +104,16 @@ int ptrace_check_attach(struct task_struct *child, int kill)
&& child->signal != NULL) {
ret = 0;
spin_lock_irq(&child->sighand->siglock);
- if (task_is_stopped(child)) {
+ if (task_is_stopped(child))
child->state = TASK_TRACED;
- } else if (!task_is_traced(child) && !kill) {
+ else if (!task_is_traced(child) && !kill)
ret = -ESRCH;
- }
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
- if (!ret && !kill) {
+ if (!ret && !kill)
wait_task_inactive(child);
- }
/* All systems go.. */
return ret;
diff --git a/kernel/relay.c b/kernel/relay.c
index 61134eb7a0c..d080b9d161a 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -37,37 +37,31 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
}
/*
- * nopage() vm_op implementation for relay file mapping.
+ * fault() vm_op implementation for relay file mapping.
*/
-static struct page *relay_buf_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
+static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
struct rchan_buf *buf = vma->vm_private_data;
- unsigned long offset = address - vma->vm_start;
+ pgoff_t pgoff = vmf->pgoff;
- if (address > vma->vm_end)
- return NOPAGE_SIGBUS; /* Disallow mremap */
if (!buf)
- return NOPAGE_OOM;
+ return VM_FAULT_OOM;
- page = vmalloc_to_page(buf->start + offset);
+ page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
if (!page)
- return NOPAGE_OOM;
+ return VM_FAULT_SIGBUS;
get_page(page);
+ vmf->page = page;
- if (type)
- *type = VM_FAULT_MINOR;
-
- return page;
+ return 0;
}
/*
* vm_ops for relay file mappings.
*/
static struct vm_operations_struct relay_file_mmap_ops = {
- .nopage = relay_buf_nopage,
+ .fault = relay_buf_fault,
.close = relay_file_mmap_close,
};
@@ -92,6 +86,7 @@ static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_ops = &relay_file_mmap_ops;
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = buf;
buf->chan->cb->buf_mapped(buf, filp);
diff --git a/kernel/signal.c b/kernel/signal.c
index 4333b6dbb42..5d30ff56184 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -911,27 +911,6 @@ __group_complete_signal(int sig, struct task_struct *p)
} while_each_thread(p, t);
return;
}
-
- /*
- * There will be a core dump. We make all threads other
- * than the chosen one go into a group stop so that nothing
- * happens until it gets scheduled, takes the signal off
- * the shared queue, and does the core dump. This is a
- * little more complicated than strictly necessary, but it
- * keeps the signal state that winds up in the core dump
- * unchanged from the death state, e.g. which thread had
- * the core-dump signal unblocked.
- */
- rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
- rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
- p->signal->group_stop_count = 0;
- p->signal->group_exit_task = t;
- p = t;
- do {
- p->signal->group_stop_count++;
- signal_wake_up(t, t == p);
- } while_each_thread(p, t);
- return;
}
/*
@@ -978,7 +957,6 @@ void zap_other_threads(struct task_struct *p)
{
struct task_struct *t;
- p->signal->flags = SIGNAL_GROUP_EXIT;
p->signal->group_stop_count = 0;
for (t = next_thread(p); t != p; t = next_thread(t)) {
@@ -1600,6 +1578,17 @@ static inline int may_ptrace_stop(void)
}
/*
+ * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Called with the siglock held.
+ */
+static int sigkill_pending(struct task_struct *tsk)
+{
+ return ((sigismember(&tsk->pending.signal, SIGKILL) ||
+ sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
+ !unlikely(sigismember(&tsk->blocked, SIGKILL)));
+}
+
+/*
* This must be called with current->sighand->siglock held.
*
* This should be the path for all ptrace stops.
@@ -1612,6 +1601,26 @@ static inline int may_ptrace_stop(void)
*/
static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
{
+ int killed = 0;
+
+ if (arch_ptrace_stop_needed(exit_code, info)) {
+ /*
+ * The arch code has something special to do before a
+ * ptrace stop. This is allowed to block, e.g. for faults
+ * on user stack pages. We can't keep the siglock while
+ * calling arch_ptrace_stop, so we must release it now.
+ * To preserve proper semantics, we must do this before
+ * any signal bookkeeping like checking group_stop_count.
+ * Meanwhile, a SIGKILL could come in before we retake the
+ * siglock. That must prevent us from sleeping in TASK_TRACED.
+ * So after regaining the lock, we must check for SIGKILL.
+ */
+ spin_unlock_irq(&current->sighand->siglock);
+ arch_ptrace_stop(exit_code, info);
+ spin_lock_irq(&current->sighand->siglock);
+ killed = sigkill_pending(current);
+ }
+
/*
* If there is a group stop in progress,
* we must participate in the bookkeeping.
@@ -1623,11 +1632,11 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
current->exit_code = exit_code;
/* Let the debugger run. */
- set_current_state(TASK_TRACED);
+ __set_current_state(TASK_TRACED);
spin_unlock_irq(&current->sighand->siglock);
try_to_freeze();
read_lock(&tasklist_lock);
- if (may_ptrace_stop()) {
+ if (!unlikely(killed) && may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
@@ -1709,9 +1718,6 @@ static int do_signal_stop(int signr)
struct signal_struct *sig = current->signal;
int stop_count;
- if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
- return 0;
-
if (sig->group_stop_count > 0) {
/*
* There is a group stop in progress. We don't need to
@@ -1719,12 +1725,15 @@ static int do_signal_stop(int signr)
*/
stop_count = --sig->group_stop_count;
} else {
+ struct task_struct *t;
+
+ if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+ unlikely(sig->group_exit_task))
+ return 0;
/*
* There is no group stop already in progress.
* We must initiate one now.
*/
- struct task_struct *t;
-
sig->group_exit_code = signr;
stop_count = 0;
@@ -1752,47 +1761,6 @@ static int do_signal_stop(int signr)
return 1;
}
-/*
- * Do appropriate magic when group_stop_count > 0.
- * We return nonzero if we stopped, after releasing the siglock.
- * We return zero if we still hold the siglock and should look
- * for another signal without checking group_stop_count again.
- */
-static int handle_group_stop(void)
-{
- int stop_count;
-
- if (current->signal->group_exit_task == current) {
- /*
- * Group stop is so we can do a core dump,
- * We are the initiating thread, so get on with it.
- */
- current->signal->group_exit_task = NULL;
- return 0;
- }
-
- if (current->signal->flags & SIGNAL_GROUP_EXIT)
- /*
- * Group stop is so another thread can do a core dump,
- * or else we are racing against a death signal.
- * Just punt the stop so we can get the next signal.
- */
- return 0;
-
- /*
- * There is a group stop in progress. We stop
- * without any associated signal being in our queue.
- */
- stop_count = --current->signal->group_stop_count;
- if (stop_count == 0)
- current->signal->flags = SIGNAL_STOP_STOPPED;
- current->exit_code = current->signal->group_exit_code;
- set_current_state(TASK_STOPPED);
- spin_unlock_irq(&current->sighand->siglock);
- finish_stop(stop_count);
- return 1;
-}
-
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct pt_regs *regs, void *cookie)
{
@@ -1807,7 +1775,7 @@ relock:
struct k_sigaction *ka;
if (unlikely(current->signal->group_stop_count > 0) &&
- handle_group_stop())
+ do_signal_stop(0))
goto relock;
signr = dequeue_signal(current, mask, info);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index c1d76552446..7c2da88db4e 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -101,6 +101,10 @@ void softlockup_tick(void)
now = get_timestamp(this_cpu);
+ /* Wake up the high-prio watchdog task every second: */
+ if (now > (touch_timestamp + 1))
+ wake_up_process(per_cpu(watchdog_task, this_cpu));
+
/* Warn about unreasonable delays: */
if (now <= (touch_timestamp + softlockup_thresh))
return;
@@ -191,11 +195,11 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
read_lock(&tasklist_lock);
do_each_thread(g, t) {
if (!--max_count)
- break;
+ goto unlock;
if (t->state & TASK_UNINTERRUPTIBLE)
check_hung_task(t, now);
} while_each_thread(g, t);
-
+ unlock:
read_unlock(&tasklist_lock);
}
@@ -218,14 +222,19 @@ static int watchdog(void *__bind_cpu)
* debug-printout triggers in softlockup_tick().
*/
while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
touch_softlockup_watchdog();
- msleep_interruptible(10000);
+ schedule();
+
+ if (kthread_should_stop())
+ break;
if (this_cpu != check_cpu)
continue;
if (sysctl_hung_task_timeout_secs)
check_hung_uninterruptible_tasks(this_cpu);
+
}
return 0;
@@ -259,13 +268,6 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
wake_up_process(per_cpu(watchdog_task, hotcpu));
break;
#ifdef CONFIG_HOTPLUG_CPU
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- if (!per_cpu(watchdog_task, hotcpu))
- break;
- /* Unbind so it can run. Fall thru. */
- kthread_bind(per_cpu(watchdog_task, hotcpu),
- any_online_cpu(cpu_online_map));
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (hotcpu == check_cpu) {
@@ -275,6 +277,14 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
check_cpu = any_online_cpu(temp_cpu_online_map);
}
break;
+
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ if (!per_cpu(watchdog_task, hotcpu))
+ break;
+ /* Unbind so it can run. Fall thru. */
+ kthread_bind(per_cpu(watchdog_task, hotcpu),
+ any_online_cpu(cpu_online_map));
case CPU_DEAD:
case CPU_DEAD_FROZEN:
p = per_cpu(watchdog_task, hotcpu);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 3507cabe963..b0aeeaf22ce 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -74,7 +74,7 @@ static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
-int srcu_readers_active(struct srcu_struct *sp)
+static int srcu_readers_active(struct srcu_struct *sp)
{
return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
}
@@ -255,4 +255,3 @@ EXPORT_SYMBOL_GPL(srcu_read_lock);
EXPORT_SYMBOL_GPL(srcu_read_unlock);
EXPORT_SYMBOL_GPL(synchronize_srcu);
EXPORT_SYMBOL_GPL(srcu_batches_completed);
-EXPORT_SYMBOL_GPL(srcu_readers_active);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 51b5ee53571..6f4e0e13f70 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -29,7 +29,6 @@ enum stopmachine_state {
static enum stopmachine_state stopmachine_state;
static unsigned int stopmachine_num_threads;
static atomic_t stopmachine_thread_ack;
-static DECLARE_MUTEX(stopmachine_mutex);
static int stopmachine(void *cpu)
{
@@ -170,6 +169,7 @@ static int do_stop(void *_smdata)
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu)
{
+ static DEFINE_MUTEX(stopmachine_mutex);
struct stop_machine_data smdata;
struct task_struct *p;
@@ -177,7 +177,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
smdata.data = data;
init_completion(&smdata.done);
- down(&stopmachine_mutex);
+ mutex_lock(&stopmachine_mutex);
/* If they don't care which CPU fn runs on, bind to any online one. */
if (cpu == NR_CPUS)
@@ -193,7 +193,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
wake_up_process(p);
wait_for_completion(&smdata.done);
}
- up(&stopmachine_mutex);
+ mutex_unlock(&stopmachine_mutex);
return p;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index d1fe71eb454..e3c08d4324d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -315,7 +315,7 @@ static void kernel_kexec(void)
#endif
}
-void kernel_shutdown_prepare(enum system_states state)
+static void kernel_shutdown_prepare(enum system_states state)
{
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
@@ -1145,16 +1145,16 @@ static int groups_to_user(gid_t __user *grouplist,
struct group_info *group_info)
{
int i;
- int count = group_info->ngroups;
+ unsigned int count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min(NGROUPS_PER_BLOCK, count);
- int off = i * NGROUPS_PER_BLOCK;
- int len = cp_count * sizeof(*grouplist);
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
- if (copy_to_user(grouplist+off, group_info->blocks[i], len))
+ if (copy_to_user(grouplist, group_info->blocks[i], len))
return -EFAULT;
+ grouplist += NGROUPS_PER_BLOCK;
count -= cp_count;
}
return 0;
@@ -1165,16 +1165,16 @@ static int groups_from_user(struct group_info *group_info,
gid_t __user *grouplist)
{
int i;
- int count = group_info->ngroups;
+ unsigned int count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min(NGROUPS_PER_BLOCK, count);
- int off = i * NGROUPS_PER_BLOCK;
- int len = cp_count * sizeof(*grouplist);
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
- if (copy_from_user(group_info->blocks[i], grouplist+off, len))
+ if (copy_from_user(group_info->blocks[i], grouplist, len))
return -EFAULT;
+ grouplist += NGROUPS_PER_BLOCK;
count -= cp_count;
}
return 0;
@@ -1472,7 +1472,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
+ if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
@@ -1637,7 +1637,7 @@ asmlinkage long sys_umask(int mask)
mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
return mask;
}
-
+
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
@@ -1742,6 +1742,17 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = prctl_set_seccomp(arg2);
break;
+ case PR_CAPBSET_READ:
+ if (!cap_valid(arg2))
+ return -EINVAL;
+ return !!cap_raised(current->cap_bset, arg2);
+ case PR_CAPBSET_DROP:
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+ return cap_prctl_drop(arg2);
+#else
+ return -EINVAL;
+#endif
+
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index beee5b3b68a..5b9b467de07 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -154,7 +154,10 @@ cond_syscall(sys_ioprio_get);
/* New file descriptors */
cond_syscall(sys_signalfd);
-cond_syscall(sys_timerfd);
cond_syscall(compat_sys_signalfd);
-cond_syscall(compat_sys_timerfd);
+cond_syscall(sys_timerfd_create);
+cond_syscall(sys_timerfd_settime);
+cond_syscall(sys_timerfd_gettime);
+cond_syscall(compat_sys_timerfd_settime);
+cond_syscall(compat_sys_timerfd_gettime);
cond_syscall(sys_eventfd);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7cb1ac3e6ff..86daaa26d12 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -84,8 +84,11 @@ extern int sysctl_stat_interval;
extern int latencytop_enabled;
/* Constants used for minimum and maximum */
-#ifdef CONFIG_DETECT_SOFTLOCKUP
+#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM)
static int one = 1;
+#endif
+
+#ifdef CONFIG_DETECT_SOFTLOCKUP
static int sixty = 60;
#endif
@@ -416,15 +419,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_SECURITY_CAPABILITIES
- {
- .procname = "cap-bound",
- .data = &cap_bset,
- .maxlen = sizeof(kernel_cap_t),
- .mode = 0600,
- .proc_handler = &proc_dointvec_bset,
- },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
#ifdef CONFIG_BLK_DEV_INITRD
{
.ctl_name = KERN_REALROOTDEV,
@@ -1150,6 +1144,19 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
#endif
+#ifdef CONFIG_HIGHMEM
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "highmem_is_dirtyable",
+ .data = &vm_highmem_is_dirtyable,
+ .maxlen = sizeof(vm_highmem_is_dirtyable),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -1196,6 +1203,14 @@ static struct ctl_table fs_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nr_open",
+ .data = &sysctl_nr_open,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = FS_DENTRY,
.procname = "dentry-state",
.data = &dentry_stat,
@@ -2080,26 +2095,6 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
return 0;
}
-#ifdef CONFIG_SECURITY_CAPABILITIES
-/*
- * init may raise the set.
- */
-
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int op;
-
- if (write && !capable(CAP_SYS_MODULE)) {
- return -EPERM;
- }
-
- op = is_global_init(current) ? OP_SET : OP_AND;
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
- do_proc_dointvec_bset_conv,&op);
-}
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
/*
* Taint values can only be increased
*/
@@ -2513,12 +2508,6 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
return -ENOSYS;
}
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return -ENOSYS;
-}
-
int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c3206fa5004..006365b69ea 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -37,10 +37,6 @@ static struct trans_ctl_table trans_kern_table[] = {
{ KERN_NODENAME, "hostname" },
{ KERN_DOMAINNAME, "domainname" },
-#ifdef CONFIG_SECURITY_CAPABILITIES
- { KERN_CAP_BSET, "cap-bound" },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
{ KERN_PANIC, "panic" },
{ KERN_REALROOTDEV, "real-root-dev" },
@@ -1498,9 +1494,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
(table->strategy == sysctl_ms_jiffies) ||
(table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
-#ifdef CONFIG_SECURITY_CAPABILITIES
- (table->proc_handler == proc_dointvec_bset) ||
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 88cdb109e13..06b6395b45b 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -135,6 +135,12 @@ static int test_jprobe(void)
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
@@ -144,13 +150,19 @@ static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in kretprobe handler\n");
}
+ if (krph_val == 0) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "call to kretprobe entry handler failed\n");
+ }
- krph_val = (rand1 / div_factor);
+ krph_val = rand1;
return 0;
}
static struct kretprobe rp = {
.handler = return_handler,
+ .entry_handler = entry_handler,
.kp.symbol_name = "kprobe_target"
};
@@ -167,7 +179,7 @@ static int test_kretprobe(void)
ret = kprobe_target(rand1);
unregister_kretprobe(&rp);
- if (krph_val == 0) {
+ if (krph_val != rand1) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kretprobe handler not called\n");
handler_errors++;
diff --git a/kernel/time.c b/kernel/time.c
index 4064c0566e7..33af3e55570 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -566,7 +566,11 @@ EXPORT_SYMBOL(jiffies_to_timeval);
clock_t jiffies_to_clock_t(long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ return x * (USER_HZ / HZ);
+# else
return x / (HZ / USER_HZ);
+# endif
#else
u64 tmp = (u64)x * TICK_NSEC;
do_div(tmp, (NSEC_PER_SEC / USER_HZ));
@@ -599,7 +603,14 @@ EXPORT_SYMBOL(clock_t_to_jiffies);
u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ x *= USER_HZ;
+ do_div(x, HZ);
+# elif HZ > USER_HZ
do_div(x, HZ / USER_HZ);
+# else
+ /* Nothing to do */
+# endif
#else
/*
* There are better ways that don't overflow early,
@@ -611,7 +622,6 @@ u64 jiffies_64_to_clock_t(u64 x)
#endif
return x;
}
-
EXPORT_SYMBOL(jiffies_64_to_clock_t);
u64 nsec_to_clock_t(u64 x)
@@ -646,7 +656,6 @@ u64 get_jiffies_64(void)
} while (read_seqretry(&xtime_lock, seq));
return ret;
}
-
EXPORT_SYMBOL(get_jiffies_64);
#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 6e9259a5d50..81afb3927ec 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -363,15 +363,13 @@ void clocksource_unregister(struct clocksource *cs)
static ssize_t
sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
{
- char *curr = buf;
+ ssize_t count = 0;
spin_lock_irq(&clocksource_lock);
- curr += sprintf(curr, "%s ", curr_clocksource->name);
+ count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
spin_unlock_irq(&clocksource_lock);
- curr += sprintf(curr, "\n");
-
- return curr - buf;
+ return count;
}
/**
@@ -439,17 +437,20 @@ static ssize_t
sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
{
struct clocksource *src;
- char *curr = buf;
+ ssize_t count = 0;
spin_lock_irq(&clocksource_lock);
list_for_each_entry(src, &clocksource_list, list) {
- curr += sprintf(curr, "%s ", src->name);
+ count += snprintf(buf + count,
+ max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
+ "%s ", src->name);
}
spin_unlock_irq(&clocksource_lock);
- curr += sprintf(curr, "\n");
+ count += snprintf(buf + count,
+ max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
- return curr - buf;
+ return count;
}
/*
diff --git a/kernel/timer.c b/kernel/timer.c
index 9fbb472b8cf..70b29b59343 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -818,12 +818,14 @@ unsigned long next_timer_interrupt(void)
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
void account_process_tick(struct task_struct *p, int user_tick)
{
+ cputime_t one_jiffy = jiffies_to_cputime(1);
+
if (user_tick) {
- account_user_time(p, jiffies_to_cputime(1));
- account_user_time_scaled(p, jiffies_to_cputime(1));
+ account_user_time(p, one_jiffy);
+ account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
} else {
- account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
- account_system_time_scaled(p, jiffies_to_cputime(1));
+ account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
+ account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
}
}
#endif