summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/c2port.h3
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/gfp.h14
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/kmemcheck.h153
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/slab.h7
-rw-r--r--include/linux/slab_def.h81
-rw-r--r--include/linux/stacktrace.h3
-rw-r--r--include/net/inet_sock.h14
-rw-r--r--include/net/inet_timewait_sock.h5
-rw-r--r--include/net/sock.h2
14 files changed, 312 insertions, 8 deletions
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
index 7b5a2388ba6..2a5cd867c36 100644
--- a/include/linux/c2port.h
+++ b/include/linux/c2port.h
@@ -10,6 +10,7 @@
*/
#include <linux/device.h>
+#include <linux/kmemcheck.h>
#define C2PORT_NAME_LEN 32
@@ -20,8 +21,10 @@
/* Main struct */
struct c2port_ops;
struct c2port_device {
+ kmemcheck_bitfield_begin(flags);
unsigned int access:1;
unsigned int flash_access:1;
+ kmemcheck_bitfield_end(flags);
int id;
char name[C2PORT_NAME_LEN];
diff --git a/include/linux/fs.h b/include/linux/fs.h
index ede84fa7da5..6d12174fbe1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1919,8 +1919,9 @@ extern void __init vfs_caches_init(unsigned long);
extern struct kmem_cache *names_cachep;
-#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp))
+#define __getname() __getname_gfp(GFP_KERNEL)
+#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
#ifndef CONFIG_AUDITSYSCALL
#define putname(name) __putname(name)
#else
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 3760e7c5de0..80e14b8c2e7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -52,7 +52,19 @@ struct vm_area_struct;
#define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */
#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */
-#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */
+#ifdef CONFIG_KMEMCHECK
+#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */
+#else
+#define __GFP_NOTRACK ((__force gfp_t)0)
+#endif
+
+/*
+ * This may seem redundant, but it's a way of annotating false positives vs.
+ * allocations that simply cannot be supported (e.g. page tables).
+ */
+#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c41e812e9d5..2721f07e935 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -472,6 +472,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
__tasklet_hi_schedule(t);
}
+extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
+
+/*
+ * This version avoids touching any other tasklets. Needed for kmemcheck
+ * in order not to take any page faults while enqueueing this tasklet;
+ * consider VERY carefully whether you really need this or
+ * tasklet_hi_schedule()...
+ */
+static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+ __tasklet_hi_schedule_first(t);
+}
+
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
new file mode 100644
index 00000000000..47b39b7c7e8
--- /dev/null
+++ b/include/linux/kmemcheck.h
@@ -0,0 +1,153 @@
+#ifndef LINUX_KMEMCHECK_H
+#define LINUX_KMEMCHECK_H
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KMEMCHECK
+extern int kmemcheck_enabled;
+
+/* The slab-related functions. */
+void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
+void kmemcheck_free_shadow(struct page *page, int order);
+void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+ size_t size);
+void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
+
+void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
+ gfp_t gfpflags);
+
+void kmemcheck_show_pages(struct page *p, unsigned int n);
+void kmemcheck_hide_pages(struct page *p, unsigned int n);
+
+bool kmemcheck_page_is_tracked(struct page *p);
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n);
+void kmemcheck_mark_uninitialized(void *address, unsigned int n);
+void kmemcheck_mark_initialized(void *address, unsigned int n);
+void kmemcheck_mark_freed(void *address, unsigned int n);
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
+
+int kmemcheck_show_addr(unsigned long address);
+int kmemcheck_hide_addr(unsigned long address);
+
+#else
+#define kmemcheck_enabled 0
+
+static inline void
+kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
+{
+}
+
+static inline void
+kmemcheck_free_shadow(struct page *page, int order)
+{
+}
+
+static inline void
+kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+ size_t size)
+{
+}
+
+static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
+ size_t size)
+{
+}
+
+static inline void kmemcheck_pagealloc_alloc(struct page *p,
+ unsigned int order, gfp_t gfpflags)
+{
+}
+
+static inline bool kmemcheck_page_is_tracked(struct page *p)
+{
+ return false;
+}
+
+static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_unallocated_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+#endif /* CONFIG_KMEMCHECK */
+
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ * struct a {
+ * int x:8, y:8;
+ * };
+ *
+ * then this should be rewritten as
+ *
+ * struct a {
+ * kmemcheck_bitfield_begin(flags);
+ * int x:8, y:8;
+ * kmemcheck_bitfield_end(flags);
+ * };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ * kmemcheck_annotate_bitfield(a, flags);
+ *
+ * Note: We provide the same definitions for both kmemcheck and non-
+ * kmemcheck kernels. This makes it harder to introduce accidental errors. It
+ * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
+ */
+#define kmemcheck_bitfield_begin(name) \
+ int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name) \
+ int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do if (ptr) { \
+ int _n = (long) &((ptr)->name##_end) \
+ - (long) &((ptr)->name##_begin); \
+ BUILD_BUG_ON(_n < 0); \
+ \
+ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+ } while (0)
+
+#define kmemcheck_annotate_variable(var) \
+ do { \
+ kmemcheck_mark_initialized(&(var), sizeof(var)); \
+ } while (0) \
+
+#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0e80e26ecf2..0042090a4d7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -98,6 +98,14 @@ struct page {
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
unsigned long debug_flags; /* Use atomic bitops on this */
#endif
+
+#ifdef CONFIG_KMEMCHECK
+ /*
+ * kmemcheck wants to track the status of each byte in a page; this
+ * is a pointer to such a status block. NULL if not tracked.
+ */
+ void *shadow;
+#endif
};
/*
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 8670f1575fe..29f8599e6be 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_RING_BUFFER_H
#define _LINUX_RING_BUFFER_H
+#include <linux/kmemcheck.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
@@ -11,7 +12,10 @@ struct ring_buffer_iter;
* Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
+ kmemcheck_bitfield_begin(bitfield);
u32 type_len:5, time_delta:27;
+ kmemcheck_bitfield_end(bitfield);
+
u32 array[];
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fa51293f270..63ef24bc01d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,6 +15,7 @@
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/cache.h>
@@ -343,6 +344,7 @@ struct sk_buff {
};
};
__u32 priority;
+ kmemcheck_bitfield_begin(flags1);
__u8 local_df:1,
cloned:1,
ip_summed:2,
@@ -353,6 +355,7 @@ struct sk_buff {
ipvs_property:1,
peeked:1,
nf_trace:1;
+ kmemcheck_bitfield_end(flags1);
__be16 protocol;
void (*destructor)(struct sk_buff *skb);
@@ -372,12 +375,16 @@ struct sk_buff {
__u16 tc_verd; /* traffic control verdict */
#endif
#endif
+
+ kmemcheck_bitfield_begin(flags2);
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
__u8 do_not_encrypt:1;
#endif
+ kmemcheck_bitfield_end(flags2);
+
/* 0/13/14 bit hole */
#ifdef CONFIG_NET_DMA
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 219b8fb4651..2da8372519f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,13 @@
#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+/* Don't track use of uninitialized memory */
+#ifdef CONFIG_KMEMCHECK
+# define SLAB_NOTRACK 0x01000000UL
+#else
+# define SLAB_NOTRACK 0x00000000UL
+#endif
+
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 713f841ecaa..850d057500d 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -16,6 +16,87 @@
#include <linux/compiler.h>
#include <linux/kmemtrace.h>
+/*
+ * struct kmem_cache
+ *
+ * manages a cache.
+ */
+
+struct kmem_cache {
+/* 1) per-cpu data, touched during every alloc/free */
+ struct array_cache *array[NR_CPUS];
+/* 2) Cache tunables. Protected by cache_chain_mutex */
+ unsigned int batchcount;
+ unsigned int limit;
+ unsigned int shared;
+
+ unsigned int buffer_size;
+ u32 reciprocal_buffer_size;
+/* 3) touched by every alloc & free from the backend */
+
+ unsigned int flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+
+/* 4) cache_grow/shrink */
+ /* order of pgs per slab (2^n) */
+ unsigned int gfporder;
+
+ /* force GFP flags, e.g. GFP_DMA */
+ gfp_t gfpflags;
+
+ size_t colour; /* cache colouring range */
+ unsigned int colour_off; /* colour offset */
+ struct kmem_cache *slabp_cache;
+ unsigned int slab_size;
+ unsigned int dflags; /* dynamic flags */
+
+ /* constructor func */
+ void (*ctor)(void *obj);
+
+/* 5) cache creation/removal */
+ const char *name;
+ struct list_head next;
+
+/* 6) statistics */
+#ifdef CONFIG_DEBUG_SLAB
+ unsigned long num_active;
+ unsigned long num_allocations;
+ unsigned long high_mark;
+ unsigned long grown;
+ unsigned long reaped;
+ unsigned long errors;
+ unsigned long max_freeable;
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+ atomic_t allochit;
+ atomic_t allocmiss;
+ atomic_t freehit;
+ atomic_t freemiss;
+
+ /*
+ * If debugging is enabled, then the allocator can add additional
+ * fields and/or padding to every object. buffer_size contains the total
+ * object size including these internal fields, the following two
+ * variables contain the offset to the user object and its size.
+ */
+ int obj_offset;
+ int obj_size;
+#endif /* CONFIG_DEBUG_SLAB */
+
+ /*
+ * We put nodelists[] at the end of kmem_cache, because we want to size
+ * this array to nr_node_ids slots instead of MAX_NUMNODES
+ * (see kmem_cache_init())
+ * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
+ * is statically defined, so we reserve the max number of nodes.
+ */
+ struct kmem_list3 *nodelists[MAX_NUMNODES];
+ /*
+ * Do not add fields after nodelists[]
+ */
+};
+
/* Size description struct for general caches. */
struct cache_sizes {
size_t cs_size;
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 1a8cecc4f38..51efbef38fb 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -4,6 +4,8 @@
struct task_struct;
#ifdef CONFIG_STACKTRACE
+struct task_struct;
+
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
@@ -11,6 +13,7 @@ struct stack_trace {
};
extern void save_stack_trace(struct stack_trace *trace);
+extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp);
extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 20a6957af87..47004f35cc7 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -17,6 +17,7 @@
#define _INET_SOCK_H
+#include <linux/kmemcheck.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/jhash.h>
@@ -66,14 +67,16 @@ struct inet_request_sock {
__be32 loc_addr;
__be32 rmt_addr;
__be16 rmt_port;
- u16 snd_wscale : 4,
- rcv_wscale : 4,
+ kmemcheck_bitfield_begin(flags);
+ u16 snd_wscale : 4,
+ rcv_wscale : 4,
tstamp_ok : 1,
sack_ok : 1,
wscale_ok : 1,
ecn_ok : 1,
acked : 1,
no_srccheck: 1;
+ kmemcheck_bitfield_end(flags);
struct ip_options *opt;
};
@@ -199,9 +202,12 @@ static inline int inet_sk_ehashfn(const struct sock *sk)
static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
{
struct request_sock *req = reqsk_alloc(ops);
+ struct inet_request_sock *ireq = inet_rsk(req);
- if (req != NULL)
- inet_rsk(req)->opt = NULL;
+ if (req != NULL) {
+ kmemcheck_annotate_bitfield(ireq, flags);
+ ireq->opt = NULL;
+ }
return req;
}
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 4b8ece22b8e..b63b80fac56 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -16,6 +16,7 @@
#define _INET_TIMEWAIT_SOCK_
+#include <linux/kmemcheck.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -127,10 +128,12 @@ struct inet_timewait_sock {
__be32 tw_rcv_saddr;
__be16 tw_dport;
__u16 tw_num;
+ kmemcheck_bitfield_begin(flags);
/* And these are ours. */
__u8 tw_ipv6only:1,
tw_transparent:1;
- /* 15 bits hole, try to pack */
+ /* 14 bits hole, try to pack */
+ kmemcheck_bitfield_end(flags);
__u16 tw_ipv6_offset;
unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb;
diff --git a/include/net/sock.h b/include/net/sock.h
index 010e14a93c9..95bd3fd75f9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -218,9 +218,11 @@ struct sock {
#define sk_hash __sk_common.skc_hash
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
+ kmemcheck_bitfield_begin(flags);
unsigned char sk_shutdown : 2,
sk_no_check : 2,
sk_userlocks : 4;
+ kmemcheck_bitfield_end(flags);
unsigned char sk_protocol;
unsigned short sk_type;
int sk_rcvbuf;