summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c112
-rw-r--r--kernel/auditsc.c47
-rw-r--r--kernel/exit.c15
-rw-r--r--kernel/futex.c3
-rw-r--r--kernel/itimer.c6
-rw-r--r--kernel/kallsyms.c13
-rw-r--r--kernel/kprobes.c142
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/power/swsusp.c2
-rw-r--r--kernel/printk.c27
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/ptrace.c5
-rw-r--r--kernel/rcupdate.c22
-rw-r--r--kernel/sched.c28
-rw-r--r--kernel/signal.c9
-rw-r--r--kernel/stop_machine.c10
-rw-r--r--kernel/sys.c27
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/timer.c2
24 files changed, 356 insertions, 139 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index eb88b446c2c..b01d26fe8db 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_SYSFS) += ksysfs.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
-ifneq ($(CONFIG_IA64),y)
+ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
# needed for x86 only. Why this used to be enabled for all architectures is beyond
# me. I suspect most platforms don't need this, but until we know that for sure
diff --git a/kernel/audit.c b/kernel/audit.c
index 0f84dd7af2c..9c4f1af0c79 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1,4 +1,4 @@
-/* audit.c -- Auditing support -*- linux-c -*-
+/* audit.c -- Auditing support
* Gateway between the kernel (e.g., selinux) and the user-space audit daemon.
* System-call specific features have moved to auditsc.c
*
@@ -38,7 +38,7 @@
* 6) Support low-overhead kernel-based filtering to minimize the
* information that must be passed to user-space.
*
- * Example user-space utilities: http://people.redhat.com/faith/audit/
+ * Example user-space utilities: http://people.redhat.com/sgrubb/audit/
*/
#include <linux/init.h>
@@ -142,7 +142,6 @@ struct audit_buffer {
int total;
int type;
int pid;
- int count; /* Times requeued */
};
void audit_set_type(struct audit_buffer *ab, int type)
@@ -239,36 +238,36 @@ void audit_log_lost(const char *message)
}
-static int audit_set_rate_limit(int limit)
+static int audit_set_rate_limit(int limit, uid_t loginuid)
{
int old = audit_rate_limit;
audit_rate_limit = limit;
- audit_log(current->audit_context, "audit_rate_limit=%d old=%d",
- audit_rate_limit, old);
+ audit_log(NULL, "audit_rate_limit=%d old=%d by auid %u",
+ audit_rate_limit, old, loginuid);
return old;
}
-static int audit_set_backlog_limit(int limit)
+static int audit_set_backlog_limit(int limit, uid_t loginuid)
{
int old = audit_backlog_limit;
audit_backlog_limit = limit;
- audit_log(current->audit_context, "audit_backlog_limit=%d old=%d",
- audit_backlog_limit, old);
+ audit_log(NULL, "audit_backlog_limit=%d old=%d by auid %u",
+ audit_backlog_limit, old, loginuid);
return old;
}
-static int audit_set_enabled(int state)
+static int audit_set_enabled(int state, uid_t loginuid)
{
int old = audit_enabled;
if (state != 0 && state != 1)
return -EINVAL;
audit_enabled = state;
- audit_log(current->audit_context, "audit_enabled=%d old=%d",
- audit_enabled, old);
+ audit_log(NULL, "audit_enabled=%d old=%d by auid %u",
+ audit_enabled, old, loginuid);
return old;
}
-static int audit_set_failure(int state)
+static int audit_set_failure(int state, uid_t loginuid)
{
int old = audit_failure;
if (state != AUDIT_FAIL_SILENT
@@ -276,8 +275,8 @@ static int audit_set_failure(int state)
&& state != AUDIT_FAIL_PANIC)
return -EINVAL;
audit_failure = state;
- audit_log(current->audit_context, "audit_failure=%d old=%d",
- audit_failure, old);
+ audit_log(NULL, "audit_failure=%d old=%d by auid %u",
+ audit_failure, old, loginuid);
return old;
}
@@ -344,6 +343,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
int err;
struct audit_buffer *ab;
u16 msg_type = nlh->nlmsg_type;
+ uid_t loginuid; /* loginuid of sender */
err = audit_netlink_ok(NETLINK_CB(skb).eff_cap, msg_type);
if (err)
@@ -351,6 +351,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
pid = NETLINK_CREDS(skb)->pid;
uid = NETLINK_CREDS(skb)->uid;
+ loginuid = NETLINK_CB(skb).loginuid;
seq = nlh->nlmsg_seq;
data = NLMSG_DATA(nlh);
@@ -371,34 +372,36 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
status_get = (struct audit_status *)data;
if (status_get->mask & AUDIT_STATUS_ENABLED) {
- err = audit_set_enabled(status_get->enabled);
+ err = audit_set_enabled(status_get->enabled, loginuid);
if (err < 0) return err;
}
if (status_get->mask & AUDIT_STATUS_FAILURE) {
- err = audit_set_failure(status_get->failure);
+ err = audit_set_failure(status_get->failure, loginuid);
if (err < 0) return err;
}
if (status_get->mask & AUDIT_STATUS_PID) {
int old = audit_pid;
audit_pid = status_get->pid;
- audit_log(current->audit_context,
- "audit_pid=%d old=%d", audit_pid, old);
+ audit_log(NULL, "audit_pid=%d old=%d by auid %u",
+ audit_pid, old, loginuid);
}
if (status_get->mask & AUDIT_STATUS_RATE_LIMIT)
- audit_set_rate_limit(status_get->rate_limit);
+ audit_set_rate_limit(status_get->rate_limit, loginuid);
if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
- audit_set_backlog_limit(status_get->backlog_limit);
+ audit_set_backlog_limit(status_get->backlog_limit,
+ loginuid);
break;
case AUDIT_USER:
ab = audit_log_start(NULL);
if (!ab)
break; /* audit_panic has been called */
audit_log_format(ab,
- "user pid=%d uid=%d length=%d msg='%.1024s'",
+ "user pid=%d uid=%d length=%d loginuid=%u"
+ " msg='%.1024s'",
pid, uid,
(int)(nlh->nlmsg_len
- ((char *)data - (char *)nlh)),
- (char *)data);
+ loginuid, (char *)data);
ab->type = AUDIT_USER;
ab->pid = pid;
audit_log_end(ab);
@@ -411,7 +414,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case AUDIT_LIST:
#ifdef CONFIG_AUDITSYSCALL
err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
- uid, seq, data);
+ uid, seq, data, loginuid);
#else
err = -EOPNOTSUPP;
#endif
@@ -427,7 +430,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
/* Get message from skb (based on rtnetlink_rcv_skb). Each message is
* processed by audit_receive_msg. Malformed skbs with wrong length are
* discarded silently. */
-static int audit_receive_skb(struct sk_buff *skb)
+static void audit_receive_skb(struct sk_buff *skb)
{
int err;
struct nlmsghdr *nlh;
@@ -436,7 +439,7 @@ static int audit_receive_skb(struct sk_buff *skb)
while (skb->len >= NLMSG_SPACE(0)) {
nlh = (struct nlmsghdr *)skb->data;
if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
- return 0;
+ return;
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
if (rlen > skb->len)
rlen = skb->len;
@@ -446,23 +449,20 @@ static int audit_receive_skb(struct sk_buff *skb)
netlink_ack(skb, nlh, 0);
skb_pull(skb, rlen);
}
- return 0;
}
/* Receive messages from netlink socket. */
static void audit_receive(struct sock *sk, int length)
{
struct sk_buff *skb;
+ unsigned int qlen;
- if (down_trylock(&audit_netlink_sem))
- return;
+ down(&audit_netlink_sem);
- /* FIXME: this must not cause starvation */
- while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
- if (audit_receive_skb(skb) && skb->len)
- skb_queue_head(&sk->sk_receive_queue, skb);
- else
- kfree_skb(skb);
+ for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ audit_receive_skb(skb);
+ kfree_skb(skb);
}
up(&audit_netlink_sem);
}
@@ -483,7 +483,7 @@ static void audit_log_move(struct audit_buffer *ab)
if (ab->len == 0)
return;
- skb = skb_peek(&ab->sklist);
+ skb = skb_peek_tail(&ab->sklist);
if (!skb || skb_tailroom(skb) <= ab->len + extra) {
skb = alloc_skb(2 * ab->len + extra, GFP_ATOMIC);
if (!skb) {
@@ -522,9 +522,9 @@ static inline int audit_log_drain(struct audit_buffer *ab)
retval = netlink_unicast(audit_sock, skb, audit_pid,
MSG_DONTWAIT);
}
- if (retval == -EAGAIN && ab->count < 5) {
- ++ab->count;
- skb_queue_tail(&ab->sklist, skb);
+ if (retval == -EAGAIN &&
+ (atomic_read(&audit_backlog)) < audit_backlog_limit) {
+ skb_queue_head(&ab->sklist, skb);
audit_log_end_irq(ab);
return 1;
}
@@ -540,8 +540,8 @@ static inline int audit_log_drain(struct audit_buffer *ab)
if (!audit_pid) { /* No daemon */
int offset = ab->nlh ? NLMSG_SPACE(0) : 0;
int len = skb->len - offset;
- printk(KERN_ERR "%*.*s\n",
- len, len, skb->data + offset);
+ skb->data[offset + len] = '\0';
+ printk(KERN_ERR "%s\n", skb->data + offset);
}
kfree_skb(skb);
ab->nlh = NULL;
@@ -620,7 +620,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx)
struct audit_buffer *ab = NULL;
unsigned long flags;
struct timespec t;
- int serial = 0;
+ unsigned int serial;
if (!audit_initialized)
return NULL;
@@ -662,15 +662,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx)
ab->total = 0;
ab->type = AUDIT_KERNEL;
ab->pid = 0;
- ab->count = 0;
#ifdef CONFIG_AUDITSYSCALL
if (ab->ctx)
audit_get_stamp(ab->ctx, &t, &serial);
else
#endif
+ {
t = CURRENT_TIME;
-
+ serial = 0;
+ }
audit_log_format(ab, "audit(%lu.%03lu:%u): ",
t.tv_sec, t.tv_nsec/1000000, serial);
return ab;
@@ -720,6 +721,29 @@ void audit_log_format(struct audit_buffer *ab, const char *fmt, ...)
va_end(args);
}
+void audit_log_hex(struct audit_buffer *ab, const unsigned char *buf, size_t len)
+{
+ int i;
+
+ for (i=0; i<len; i++)
+ audit_log_format(ab, "%02x", buf[i]);
+}
+
+void audit_log_untrustedstring(struct audit_buffer *ab, const char *string)
+{
+ const unsigned char *p = string;
+
+ while (*p) {
+ if (*p == '"' || *p == ' ' || *p < 0x20 || *p > 0x7f) {
+ audit_log_hex(ab, string, strlen(string));
+ return;
+ }
+ p++;
+ }
+ audit_log_format(ab, "\"%s\"", string);
+}
+
+
/* This is a helper-function to print the d_path without using a static
* buffer or allocating another buffer in addition to the one in
* audit_buffer. */
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 6f1931381bc..37b3ac94bc4 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1,4 +1,4 @@
-/* auditsc.c -- System-call auditing support -*- linux-c -*-
+/* auditsc.c -- System-call auditing support
* Handles all system-call specific auditing features.
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
@@ -123,7 +123,7 @@ struct audit_context {
int major; /* syscall number */
unsigned long argv[4]; /* syscall arguments */
int return_valid; /* return code is valid */
- int return_code;/* syscall return code */
+ long return_code;/* syscall return code */
int auditable; /* 1 if record should be written */
int name_count;
struct audit_names names[AUDIT_NAMES];
@@ -135,6 +135,7 @@ struct audit_context {
uid_t uid, euid, suid, fsuid;
gid_t gid, egid, sgid, fsgid;
unsigned long personality;
+ int arch;
#if AUDIT_DEBUG
int put_count;
@@ -250,7 +251,8 @@ static int audit_copy_rule(struct audit_rule *d, struct audit_rule *s)
return 0;
}
-int audit_receive_filter(int type, int pid, int uid, int seq, void *data)
+int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
+ uid_t loginuid)
{
u32 flags;
struct audit_entry *entry;
@@ -285,6 +287,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data)
err = audit_add_rule(entry, &audit_entlist);
if (!err && (flags & AUDIT_AT_EXIT))
err = audit_add_rule(entry, &audit_extlist);
+ audit_log(NULL, "auid %u added an audit rule\n", loginuid);
break;
case AUDIT_DEL:
flags =((struct audit_rule *)data)->flags;
@@ -294,6 +297,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data)
err = audit_del_rule(data, &audit_entlist);
if (!err && (flags & AUDIT_AT_EXIT))
err = audit_del_rule(data, &audit_extlist);
+ audit_log(NULL, "auid %u removed an audit rule\n", loginuid);
break;
default:
return -EINVAL;
@@ -348,6 +352,10 @@ static int audit_filter_rules(struct task_struct *tsk,
case AUDIT_PERS:
result = (tsk->personality == value);
break;
+ case AUDIT_ARCH:
+ if (ctx)
+ result = (ctx->arch == value);
+ break;
case AUDIT_EXIT:
if (ctx && ctx->return_valid)
@@ -355,7 +363,7 @@ static int audit_filter_rules(struct task_struct *tsk,
break;
case AUDIT_SUCCESS:
if (ctx && ctx->return_valid)
- result = (ctx->return_code >= 0);
+ result = (ctx->return_valid == AUDITSC_SUCCESS);
break;
case AUDIT_DEVMAJOR:
if (ctx) {
@@ -648,8 +656,11 @@ static void audit_log_exit(struct audit_context *context)
audit_log_format(ab, "syscall=%d", context->major);
if (context->personality != PER_LINUX)
audit_log_format(ab, " per=%lx", context->personality);
+ audit_log_format(ab, " arch=%x", context->arch);
if (context->return_valid)
- audit_log_format(ab, " exit=%d", context->return_code);
+ audit_log_format(ab, " success=%s exit=%ld",
+ (context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
+ context->return_code);
audit_log_format(ab,
" a0=%lx a1=%lx a2=%lx a3=%lx items=%d"
" pid=%d loginuid=%d uid=%d gid=%d"
@@ -696,9 +707,10 @@ static void audit_log_exit(struct audit_context *context)
if (!ab)
continue; /* audit_panic has been called */
audit_log_format(ab, "item=%d", i);
- if (context->names[i].name)
- audit_log_format(ab, " name=%s",
- context->names[i].name);
+ if (context->names[i].name) {
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, context->names[i].name);
+ }
if (context->names[i].ino != (unsigned long)-1)
audit_log_format(ab, " inode=%lu dev=%02x:%02x mode=%#o"
" uid=%d gid=%d rdev=%02x:%02x",
@@ -772,7 +784,7 @@ static inline unsigned int audit_serial(void)
* then the record will be written at syscall exit time (otherwise, it
* will only be written if another part of the kernel requests that it
* be written). */
-void audit_syscall_entry(struct task_struct *tsk, int major,
+void audit_syscall_entry(struct task_struct *tsk, int arch, int major,
unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4)
{
@@ -826,6 +838,7 @@ void audit_syscall_entry(struct task_struct *tsk, int major,
if (!audit_enabled)
return;
+ context->arch = arch;
context->major = major;
context->argv[0] = a1;
context->argv[1] = a2;
@@ -849,13 +862,13 @@ void audit_syscall_entry(struct task_struct *tsk, int major,
* filtering, or because some other part of the kernel write an audit
* message), then write out the syscall information. In call cases,
* free the names stored from getname(). */
-void audit_syscall_exit(struct task_struct *tsk, int return_code)
+void audit_syscall_exit(struct task_struct *tsk, int valid, long return_code)
{
struct audit_context *context;
get_task_struct(tsk);
task_lock(tsk);
- context = audit_get_context(tsk, 1, return_code);
+ context = audit_get_context(tsk, valid, return_code);
task_unlock(tsk);
/* Not having a context here is ok, since the parent may have
@@ -868,6 +881,7 @@ void audit_syscall_exit(struct task_struct *tsk, int return_code)
context->in_syscall = 0;
context->auditable = 0;
+
if (context->previous) {
struct audit_context *new_context = context->previous;
context->previous = NULL;
@@ -981,7 +995,7 @@ void audit_inode(const char *name, const struct inode *inode)
}
void audit_get_stamp(struct audit_context *ctx,
- struct timespec *t, int *serial)
+ struct timespec *t, unsigned int *serial)
{
if (ctx) {
t->tv_sec = ctx->ctime.tv_sec;
@@ -996,20 +1010,21 @@ void audit_get_stamp(struct audit_context *ctx,
extern int audit_set_type(struct audit_buffer *ab, int type);
-int audit_set_loginuid(struct audit_context *ctx, uid_t loginuid)
+int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
{
- if (ctx) {
+ if (task->audit_context) {
struct audit_buffer *ab;
ab = audit_log_start(NULL);
if (ab) {
audit_log_format(ab, "login pid=%d uid=%u "
"old loginuid=%u new loginuid=%u",
- ctx->pid, ctx->uid, ctx->loginuid, loginuid);
+ task->pid, task->uid,
+ task->audit_context->loginuid, loginuid);
audit_set_type(ab, AUDIT_LOGIN);
audit_log_end(ab);
}
- ctx->loginuid = loginuid;
+ task->audit_context->loginuid = loginuid;
}
return 0;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 39d35935b37..edaa50b5bbf 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -27,6 +27,7 @@
#include <linux/mempolicy.h>
#include <linux/cpuset.h>
#include <linux/syscalls.h>
+#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -38,6 +39,8 @@ extern struct task_struct *child_reaper;
int getrusage(struct task_struct *, int, struct rusage __user *);
+static void exit_mm(struct task_struct * tsk);
+
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
@@ -209,7 +212,7 @@ static inline int has_stopped_jobs(int pgrp)
}
/**
- * reparent_to_init() - Reparent the calling kernel thread to the init task.
+ * reparent_to_init - Reparent the calling kernel thread to the init task.
*
* If a kernel thread is launched as a result of a system call, or if
* it ever exits, it should generally reparent itself to init so that
@@ -277,7 +280,7 @@ void set_special_pids(pid_t session, pid_t pgrp)
*/
int allow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(&current->sighand->siglock);
@@ -298,7 +301,7 @@ EXPORT_SYMBOL(allow_signal);
int disallow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(&current->sighand->siglock);
@@ -473,7 +476,7 @@ EXPORT_SYMBOL_GPL(exit_fs);
* Turn us into a lazy TLB process if we
* aren't already..
*/
-void exit_mm(struct task_struct * tsk)
+static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
@@ -517,8 +520,6 @@ static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_re
*/
BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
p->real_parent = reaper;
- if (p->parent == p->real_parent)
- BUG();
}
static inline void reparent_thread(task_t *p, task_t *father, int traced)
@@ -845,6 +846,8 @@ fastcall NORET_TYPE void do_exit(long code)
for (;;) ;
}
+EXPORT_SYMBOL_GPL(do_exit);
+
NORET_TYPE void complete_and_exit(struct completion *comp, long code)
{
if (comp)
diff --git a/kernel/futex.c b/kernel/futex.c
index 7b54a672d0a..c7130f86106 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -39,6 +39,7 @@
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
+#include <linux/signal.h>
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
@@ -654,7 +655,7 @@ static int futex_fd(unsigned long uaddr, int signal)
int ret, err;
ret = -EINVAL;
- if (signal < 0 || signal > _NSIG)
+ if (!valid_signal(signal))
goto out;
ret = get_unused_fd();
diff --git a/kernel/itimer.c b/kernel/itimer.c
index e9a40e947e0..1dc988e0d2c 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -123,7 +123,11 @@ static inline void it_real_arm(struct task_struct *p, unsigned long interval)
return;
if (interval > (unsigned long) LONG_MAX)
interval = LONG_MAX;
- p->signal->real_timer.expires = jiffies + interval;
+ /* the "+ 1" below makes sure that the timer doesn't go off before
+ * the interval requested. This could happen if
+ * time requested % (usecs per jiffy) is more than the usecs left
+ * in the current jiffy */
+ p->signal->real_timer.expires = jiffies + interval + 1;
add_timer(&p->signal->real_timer);
}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 1627f8d6e0c..13bcec151b5 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -46,6 +46,14 @@ static inline int is_kernel_inittext(unsigned long addr)
return 0;
}
+static inline int is_kernel_extratext(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sextratext
+ && addr <= (unsigned long)_eextratext)
+ return 1;
+ return 0;
+}
+
static inline int is_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext)
@@ -169,8 +177,9 @@ const char *kallsyms_lookup(unsigned long addr,
namebuf[0] = 0;
if ((all_var && is_kernel(addr)) ||
- (!all_var && (is_kernel_text(addr) || is_kernel_inittext(addr)))) {
- unsigned long symbol_end=0;
+ (!all_var && (is_kernel_text(addr) || is_kernel_inittext(addr) ||
+ is_kernel_extratext(addr)))) {
+ unsigned long symbol_end = 0;
/* do a binary search on the sorted kallsyms_addresses array */
low = 0;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1d5dd1337bd..037142b72a4 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -44,6 +44,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
unsigned int kprobe_cpu = NR_CPUS;
static DEFINE_SPINLOCK(kprobe_lock);
+static struct kprobe *curr_kprobe;
/* Locks kprobe: irqs must be disabled */
void lock_kprobes(void)
@@ -73,22 +74,139 @@ struct kprobe *get_kprobe(void *addr)
return NULL;
}
+/*
+ * Aggregate handlers for multiple kprobes support - these handlers
+ * take care of invoking the individual kprobe handlers on p->list
+ */
+int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe *kp;
+
+ list_for_each_entry(kp, &p->list, list) {
+ if (kp->pre_handler) {
+ curr_kprobe = kp;
+ kp->pre_handler(kp, regs);
+ curr_kprobe = NULL;
+ }
+ }
+ return 0;
+}
+
+void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ struct kprobe *kp;
+
+ list_for_each_entry(kp, &p->list, list) {
+ if (kp->post_handler) {
+ curr_kprobe = kp;
+ kp->post_handler(kp, regs, flags);
+ curr_kprobe = NULL;
+ }
+ }
+ return;
+}
+
+int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
+{
+ /*
+ * if we faulted "during" the execution of a user specified
+ * probe handler, invoke just that probe's fault handler
+ */
+ if (curr_kprobe && curr_kprobe->fault_handler) {
+ if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Fill in the required fields of the "manager kprobe". Replace the
+ * earlier kprobe in the hlist with the manager kprobe
+ */
+static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
+{
+ ap->addr = p->addr;
+ ap->opcode = p->opcode;
+ memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn));
+
+ ap->pre_handler = aggr_pre_handler;
+ ap->post_handler = aggr_post_handler;
+ ap->fault_handler = aggr_fault_handler;
+
+ INIT_LIST_HEAD(&ap->list);
+ list_add(&p->list, &ap->list);
+
+ INIT_HLIST_NODE(&ap->hlist);
+ hlist_del(&p->hlist);
+ hlist_add_head(&ap->hlist,
+ &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
+}
+
+/*
+ * This is the second or subsequent kprobe at the address - handle
+ * the intricacies
+ * TODO: Move kcalloc outside the spinlock
+ */
+static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
+{
+ int ret = 0;
+ struct kprobe *ap;
+
+ if (old_p->break_handler || p->break_handler) {
+ ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */
+ } else if (old_p->pre_handler == aggr_pre_handler) {
+ list_add(&p->list, &old_p->list);
+ } else {
+ ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
+ if (!ap)
+ return -ENOMEM;
+ add_aggr_kprobe(ap, old_p);
+ list_add(&p->list, &ap->list);
+ }
+ return ret;
+}
+
+/* kprobe removal house-keeping routines */
+static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
+{
+ *p->addr = p->opcode;
+ hlist_del(&p->hlist);
+ flush_icache_range((unsigned long) p->addr,
+ (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+ spin_unlock_irqrestore(&kprobe_lock, flags);
+ arch_remove_kprobe(p);
+}
+
+static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
+ struct kprobe *p, unsigned long flags)
+{
+ list_del(&p->list);
+ if (list_empty(&old_p->list)) {
+ cleanup_kprobe(old_p, flags);
+ kfree(old_p);
+ } else
+ spin_unlock_irqrestore(&kprobe_lock, flags);
+}
+
int register_kprobe(struct kprobe *p)
{
int ret = 0;
unsigned long flags = 0;
+ struct kprobe *old_p;
if ((ret = arch_prepare_kprobe(p)) != 0) {
goto rm_kprobe;
}
spin_lock_irqsave(&kprobe_lock, flags);
- INIT_HLIST_NODE(&p->hlist);
- if (get_kprobe(p->addr)) {
- ret = -EEXIST;
+ old_p = get_kprobe(p->addr);
+ if (old_p) {
+ ret = register_aggr_kprobe(old_p, p);
goto out;
}
- arch_copy_kprobe(p);
+ arch_copy_kprobe(p);
+ INIT_HLIST_NODE(&p->hlist);
hlist_add_head(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
@@ -107,13 +225,17 @@ rm_kprobe:
void unregister_kprobe(struct kprobe *p)
{
unsigned long flags;
- arch_remove_kprobe(p);
+ struct kprobe *old_p;
+
spin_lock_irqsave(&kprobe_lock, flags);
- *p->addr = p->opcode;
- hlist_del(&p->hlist);
- flush_icache_range((unsigned long) p->addr,
- (unsigned long) p->addr + sizeof(kprobe_opcode_t));
- spin_unlock_irqrestore(&kprobe_lock, flags);
+ old_p = get_kprobe(p->addr);
+ if (old_p) {
+ if (old_p->pre_handler == aggr_pre_handler)
+ cleanup_aggr_kprobe(old_p, p, flags);
+ else
+ cleanup_kprobe(p, flags);
+ } else
+ spin_unlock_irqrestore(&kprobe_lock, flags);
}
static struct notifier_block kprobe_exceptions_nb = {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index e377e224410..f50f174e92d 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -174,7 +174,7 @@ int kthread_stop(struct task_struct *k)
/* Must init completion *before* thread sees kthread_stop_info.k */
init_completion(&kthread_stop_info.done);
- wmb();
+ smp_wmb();
/* Now set kthread_should_stop() to true, and wake it up. */
kthread_stop_info.k = k;
diff --git a/kernel/module.c b/kernel/module.c
index 2dbfa0773fa..5734ab09d3f 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod,
/* Init routine failed: abort. Try to protect us from
buggy refcounters. */
mod->state = MODULE_STATE_GOING;
- synchronize_kernel();
+ synchronize_sched();
if (mod->unsafe)
printk(KERN_ERR "%s: module is now stuck!\n",
mod->name);
diff --git a/kernel/panic.c b/kernel/panic.c
index 0fa3f3a66fb..081f7465fc8 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -102,9 +102,9 @@ NORET_TYPE void panic(const char * fmt, ...)
#ifdef __sparc__
{
extern int stop_a_enabled;
- /* Make sure the user can actually press L1-A */
+ /* Make sure the user can actually press Stop-A (L1-A) */
stop_a_enabled = 1;
- printk(KERN_EMERG "Press L1-A to return to the boot prom\n");
+ printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n");
}
#endif
#if defined(CONFIG_ARCH_S390)
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index ae5bebc3b18..90b3b68dee3 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1099,7 +1099,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
return pblist;
}
-/**
+/*
* Using bio to read from swap.
* This code requires a bit more work than just using buffer heads
* but, it is the recommended way for 2.5/2.6.
diff --git a/kernel/printk.c b/kernel/printk.c
index 1498689548d..290a07ce2c8 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -85,10 +85,6 @@ static int console_locked;
*/
static DEFINE_SPINLOCK(logbuf_lock);
-static char __log_buf[__LOG_BUF_LEN];
-static char *log_buf = __log_buf;
-static int log_buf_len = __LOG_BUF_LEN;
-
#define LOG_BUF_MASK (log_buf_len-1)
#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
@@ -99,7 +95,6 @@ static int log_buf_len = __LOG_BUF_LEN;
static unsigned long log_start; /* Index into log_buf: next char to be read by syslog() */
static unsigned long con_start; /* Index into log_buf: next char to be sent to consoles */
static unsigned long log_end; /* Index into log_buf: most-recently-written-char + 1 */
-static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */
/*
* Array of consoles built from command line options (console=)
@@ -120,6 +115,13 @@ static int preferred_console = -1;
/* Flag: console code may call schedule() */
static int console_may_schedule;
+#ifdef CONFIG_PRINTK
+
+static char __log_buf[__LOG_BUF_LEN];
+static char *log_buf = __log_buf;
+static int log_buf_len = __LOG_BUF_LEN;
+static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */
+
/*
* Setup a list of consoles. Called from init/main.c
*/
@@ -535,6 +537,7 @@ __setup("time", printk_time_setup);
* then changes console_loglevel may break. This is because console_loglevel
* is inspected when the actual printing occurs.
*/
+
asmlinkage int printk(const char *fmt, ...)
{
va_list args;
@@ -655,6 +658,18 @@ out:
EXPORT_SYMBOL(printk);
EXPORT_SYMBOL(vprintk);
+#else
+
+asmlinkage long sys_syslog(int type, char __user * buf, int len)
+{
+ return 0;
+}
+
+int do_syslog(int type, char __user * buf, int len) { return 0; }
+static void call_console_drivers(unsigned long start, unsigned long end) {}
+
+#endif
+
/**
* acquire_console_sem - lock the console system for exclusive use.
*
@@ -931,7 +946,7 @@ int unregister_console(struct console * console)
return res;
}
EXPORT_SYMBOL(unregister_console);
-
+
/**
* tty_write_message - write a message to a certain tty, not just the console.
*
diff --git a/kernel/profile.c b/kernel/profile.c
index a38fa70075f..0221a50ca86 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
WARN_ON(hook != timer_hook);
timer_hook = NULL;
/* make sure all CPUs see the NULL hook */
- synchronize_kernel();
+ synchronize_sched(); /* Allow ongoing interrupts to complete. */
}
EXPORT_SYMBOL_GPL(register_timer_hook);
@@ -522,7 +522,7 @@ static int __init create_hash_tables(void)
return 0;
out_cleanup:
prof_on = 0;
- mb();
+ smp_mb();
on_each_cpu(profile_nop, NULL, 0, 1);
for_each_online_cpu(cpu) {
struct page *page;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 88b306c4e84..8dcb8f6288b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -16,6 +16,7 @@
#include <linux/smp_lock.h>
#include <linux/ptrace.h>
#include <linux/security.h>
+#include <linux/signal.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -135,7 +136,7 @@ int ptrace_attach(struct task_struct *task)
(current->gid != task->sgid) ||
(current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
goto bad;
- rmb();
+ smp_rmb();
if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
goto bad;
/* the same process cannot be attached many times */
@@ -166,7 +167,7 @@ bad:
int ptrace_detach(struct task_struct *child, unsigned int data)
{
- if ((unsigned long) data > _NSIG)
+ if (!valid_signal(data))
return -EIO;
/* Architecture-specific hardware disable .. */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index d00eded75d7..f436993bd59 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -444,15 +444,18 @@ static void wakeme_after_rcu(struct rcu_head *head)
}
/**
- * synchronize_kernel - wait until a grace period has elapsed.
+ * synchronize_rcu - wait until a grace period has elapsed.
*
* Control will return to the caller some time after a full grace
* period has elapsed, in other words after all currently executing RCU
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
+ *
+ * If your read-side code is not protected by rcu_read_lock(), do -not-
+ * use synchronize_rcu().
*/
-void synchronize_kernel(void)
+void synchronize_rcu(void)
{
struct rcu_synchronize rcu;
@@ -464,7 +467,16 @@ void synchronize_kernel(void)
wait_for_completion(&rcu.completion);
}
+/*
+ * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
+ */
+void synchronize_kernel(void)
+{
+ synchronize_rcu();
+}
+
module_param(maxbatch, int, 0);
-EXPORT_SYMBOL_GPL(call_rcu);
-EXPORT_SYMBOL_GPL(call_rcu_bh);
-EXPORT_SYMBOL_GPL(synchronize_kernel);
+EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
+EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
+EXPORT_SYMBOL_GPL(synchronize_rcu);
+EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */
diff --git a/kernel/sched.c b/kernel/sched.c
index 9bb7489ee64..0dc3158667a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2906,6 +2906,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @key: is directly passed to the wakeup function
*/
void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
@@ -2928,7 +2929,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
}
/**
- * __wake_up - sync- wake up threads blocked on a waitqueue.
+ * __wake_up_sync - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
@@ -3223,6 +3224,19 @@ out_unlock:
EXPORT_SYMBOL(set_user_nice);
+/*
+ * can_nice - check if a task can reduce its nice value
+ * @p: task
+ * @nice: nice value
+ */
+int can_nice(const task_t *p, const int nice)
+{
+ /* convert nice value [19,-20] to rlimit style value [0,39] */
+ int nice_rlim = 19 - nice;
+ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
+ capable(CAP_SYS_NICE));
+}
+
#ifdef __ARCH_WANT_SYS_NICE
/*
@@ -3242,12 +3256,8 @@ asmlinkage long sys_nice(int increment)
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
- if (increment < 0) {
- if (!capable(CAP_SYS_NICE))
- return -EPERM;
- if (increment < -40)
- increment = -40;
- }
+ if (increment < -40)
+ increment = -40;
if (increment > 40)
increment = 40;
@@ -3257,6 +3267,9 @@ asmlinkage long sys_nice(int increment)
if (nice > 19)
nice = 19;
+ if (increment < 0 && !can_nice(current, nice))
+ return -EPERM;
+
retval = security_task_setnice(current, nice);
if (retval)
return retval;
@@ -3372,6 +3385,7 @@ recheck:
return -EINVAL;
if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
+ param->sched_priority > p->signal->rlim[RLIMIT_RTPRIO].rlim_cur &&
!capable(CAP_SYS_NICE))
return -EPERM;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
diff --git a/kernel/signal.c b/kernel/signal.c
index e6567d7f2b6..8f3debc77c5 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -23,6 +23,7 @@
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/posix-timers.h>
+#include <linux/signal.h>
#include <asm/param.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -646,7 +647,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
struct task_struct *t)
{
int error = -EINVAL;
- if (sig < 0 || sig > _NSIG)
+ if (!valid_signal(sig))
return error;
error = -EPERM;
if ((!info || ((unsigned long)info != 1 &&
@@ -1245,7 +1246,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
* Make sure legacy kernel users don't send in bad values
* (normal paths check this in check_kill_permission).
*/
- if (sig < 0 || sig > _NSIG)
+ if (!valid_signal(sig))
return -EINVAL;
/*
@@ -1520,7 +1521,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
sig = 0;
}
- if (sig > 0 && sig <= _NSIG)
+ if (valid_signal(sig) && sig > 0)
__group_send_sig_info(sig, &info, tsk->parent);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
@@ -2364,7 +2365,7 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
{
struct k_sigaction *k;
- if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
+ if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
return -EINVAL;
k = &current->sighand->action[sig-1];
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index c39ed70af17..6116b25aa7c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -33,7 +33,7 @@ static int stopmachine(void *cpu)
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
/* Ack: we are alive */
- mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
+ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
atomic_inc(&stopmachine_thread_ack);
/* Simple state machine */
@@ -43,14 +43,14 @@ static int stopmachine(void *cpu)
local_irq_disable();
irqs_disabled = 1;
/* Ack: irqs disabled. */
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
} else if (stopmachine_state == STOPMACHINE_PREPARE
&& !prepared) {
/* Everyone is in place, hold CPU. */
preempt_disable();
prepared = 1;
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
}
/* Yield in first stage: migration threads need to
@@ -62,7 +62,7 @@ static int stopmachine(void *cpu)
}
/* Ack: we are exiting. */
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
if (irqs_disabled)
@@ -77,7 +77,7 @@ static int stopmachine(void *cpu)
static void stopmachine_set_state(enum stopmachine_state state)
{
atomic_set(&stopmachine_thread_ack, 0);
- wmb();
+ smp_wmb();
stopmachine_state = state;
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
cpu_relax();
diff --git a/kernel/sys.c b/kernel/sys.c
index 462d78d5589..f006632c2ba 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -25,6 +25,7 @@
#include <linux/dcookies.h>
#include <linux/suspend.h>
#include <linux/tty.h>
+#include <linux/signal.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -227,7 +228,7 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
error = -EPERM;
goto out;
}
- if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) {
+ if (niceval < task_nice(p) && !can_nice(p, niceval)) {
error = -EACCES;
goto out;
}
@@ -525,7 +526,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
if (new_egid != old_egid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old_rgid))
@@ -556,7 +557,7 @@ asmlinkage long sys_setgid(gid_t gid)
if(old_egid != gid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->gid = current->egid = current->sgid = current->fsgid = gid;
}
@@ -565,7 +566,7 @@ asmlinkage long sys_setgid(gid_t gid)
if(old_egid != gid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->egid = current->fsgid = gid;
}
@@ -596,7 +597,7 @@ static int set_user(uid_t new_ruid, int dumpclear)
if(dumpclear)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->uid = new_ruid;
return 0;
@@ -653,7 +654,7 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
if (new_euid != old_euid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
@@ -703,7 +704,7 @@ asmlinkage long sys_setuid(uid_t uid)
if (old_euid != uid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsuid = current->euid = uid;
current->suid = new_suid;
@@ -748,7 +749,7 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if (euid != current->euid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->euid = euid;
}
@@ -798,7 +799,7 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
if (egid != current->egid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->egid = egid;
}
@@ -845,7 +846,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
if (uid != old_fsuid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsuid = uid;
}
@@ -875,7 +876,7 @@ asmlinkage long sys_setfsgid(gid_t gid)
if (gid != old_fsgid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsgid = gid;
key_fsgid_changed(current);
@@ -1194,7 +1195,7 @@ static int groups_from_user(struct group_info *group_info,
return 0;
}
-/* a simple shell-metzner sort */
+/* a simple Shell sort */
static void groups_sort(struct group_info *group_info)
{
int base, max, stride;
@@ -1637,7 +1638,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
switch (option) {
case PR_SET_PDEATHSIG:
sig = arg2;
- if (sig < 0 || sig > _NSIG) {
+ if (!valid_signal(sig)) {
error = -EINVAL;
break;
}
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 1802a311dd3..0dda70ed1f9 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -52,6 +52,7 @@ cond_syscall(sys_msgsnd);
cond_syscall(sys_msgrcv);
cond_syscall(sys_msgctl);
cond_syscall(sys_shmget);
+cond_syscall(sys_shmat);
cond_syscall(sys_shmdt);
cond_syscall(sys_shmctl);
cond_syscall(sys_mq_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 79dbd93bd69..701d12c6306 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1991,6 +1991,8 @@ int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
* @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
+ * @ppos: file position
+ * @ppos: the current position in the file
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
* values from/to the user buffer, treated as an ASCII string.
diff --git a/kernel/time.c b/kernel/time.c
index 96fd0f49963..d4335c1c884 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -516,14 +516,6 @@ int do_settimeofday (struct timespec *tv)
write_seqlock_irq(&xtime_lock);
{
- /*
- * This is revolting. We need to set "xtime" correctly. However, the value
- * in this location is the value at the most recent update of wall time.
- * Discover what correction gettimeofday would have done, and then undo
- * it!
- */
- nsec -= time_interpolator_get_offset();
-
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
diff --git a/kernel/timer.c b/kernel/timer.c
index ecb3d67c0e1..207aa4f0aa1 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1007,7 +1007,7 @@ asmlinkage long sys_getppid(void)
* Make sure we read the pid before re-reading the
* parent pointer:
*/
- rmb();
+ smp_rmb();
parent = me->group_leader->real_parent;
if (old != parent)
continue;