summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ata.h36
-rw-r--r--include/linux/backing-dev.h55
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/cpu.h17
-rw-r--r--include/linux/cred.h69
-rw-r--r--include/linux/crypto.h43
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/dm-log-userspace.h13
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/dmi.h13
-rw-r--r--include/linux/fips.h10
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/ftrace_event.h51
-rw-r--r--include/linux/hardirq.h10
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irqnr.h6
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/keyctl.h1
-rw-r--r--include/linux/kmemcheck.h7
-rw-r--r--include/linux/kmemleak.h18
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/lockdep.h18
-rw-r--r--include/linux/lsm_audit.h12
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs_sb.h9
-rw-r--r--include/linux/nmi.h19
-rw-r--r--include/linux/oprofile.h5
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_counter.h7
-rw-r--r--include/linux/rcuclassic.h178
-rw-r--r--include/linux/rcupdate.h98
-rw-r--r--include/linux/rcupreempt.h127
-rw-r--r--include/linux/rcupreempt_trace.h97
-rw-r--r--include/linux/rcutree.h262
-rw-r--r--include/linux/ring_buffer.h24
-rw-r--r--include/linux/sched.h129
-rw-r--r--include/linux/security.h154
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/spinlock.h64
-rw-r--r--include/linux/spinlock_api_smp.h394
-rw-r--r--include/linux/sunrpc/cache.h40
-rw-r--r--include/linux/sunrpc/clnt.h43
-rw-r--r--include/linux/sunrpc/msg_prot.h17
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h20
-rw-r--r--include/linux/sunrpc/xdr.h10
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/syscalls.h131
-rw-r--r--include/linux/topology.h168
-rw-r--r--include/linux/tracepoint.h29
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/workqueue.h15
-rw-r--r--include/linux/writeback.h23
-rw-r--r--include/linux/xattr.h1
59 files changed, 1553 insertions, 1020 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 9c75921f0c1..6299a259ed1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -210,15 +210,25 @@ enum {
ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
ATA_CMD_EDD = 0x90, /* execute device diagnostic */
+ ATA_CMD_DOWNLOAD_MICRO = 0x92,
+ ATA_CMD_NOP = 0x00,
ATA_CMD_FLUSH = 0xE7,
ATA_CMD_FLUSH_EXT = 0xEA,
ATA_CMD_ID_ATA = 0xEC,
ATA_CMD_ID_ATAPI = 0xA1,
+ ATA_CMD_SERVICE = 0xA2,
ATA_CMD_READ = 0xC8,
ATA_CMD_READ_EXT = 0x25,
+ ATA_CMD_READ_QUEUED = 0x26,
+ ATA_CMD_READ_STREAM_EXT = 0x2B,
+ ATA_CMD_READ_STREAM_DMA_EXT = 0x2A,
ATA_CMD_WRITE = 0xCA,
ATA_CMD_WRITE_EXT = 0x35,
+ ATA_CMD_WRITE_QUEUED = 0x36,
+ ATA_CMD_WRITE_STREAM_EXT = 0x3B,
+ ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A,
ATA_CMD_WRITE_FUA_EXT = 0x3D,
+ ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
ATA_CMD_FPDMA_READ = 0x60,
ATA_CMD_FPDMA_WRITE = 0x61,
ATA_CMD_PIO_READ = 0x20,
@@ -235,6 +245,7 @@ enum {
ATA_CMD_PACKET = 0xA0,
ATA_CMD_VERIFY = 0x40,
ATA_CMD_VERIFY_EXT = 0x42,
+ ATA_CMD_WRITE_UNCORR_EXT = 0x45,
ATA_CMD_STANDBYNOW1 = 0xE0,
ATA_CMD_IDLEIMMEDIATE = 0xE1,
ATA_CMD_SLEEP = 0xE6,
@@ -243,15 +254,34 @@ enum {
ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
ATA_CMD_SET_MAX = 0xF9,
ATA_CMD_SET_MAX_EXT = 0x37,
- ATA_CMD_READ_LOG_EXT = 0x2f,
+ ATA_CMD_READ_LOG_EXT = 0x2F,
+ ATA_CMD_WRITE_LOG_EXT = 0x3F,
+ ATA_CMD_READ_LOG_DMA_EXT = 0x47,
+ ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
+ ATA_CMD_TRUSTED_RCV = 0x5C,
+ ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
+ ATA_CMD_TRUSTED_SND = 0x5E,
+ ATA_CMD_TRUSTED_SND_DMA = 0x5F,
ATA_CMD_PMP_READ = 0xE4,
ATA_CMD_PMP_WRITE = 0xE8,
ATA_CMD_CONF_OVERLAY = 0xB1,
+ ATA_CMD_SEC_SET_PASS = 0xF1,
+ ATA_CMD_SEC_UNLOCK = 0xF2,
+ ATA_CMD_SEC_ERASE_PREP = 0xF3,
+ ATA_CMD_SEC_ERASE_UNIT = 0xF4,
ATA_CMD_SEC_FREEZE_LOCK = 0xF5,
+ ATA_CMD_SEC_DISABLE_PASS = 0xF6,
+ ATA_CMD_CONFIG_STREAM = 0x51,
ATA_CMD_SMART = 0xB0,
ATA_CMD_MEDIA_LOCK = 0xDE,
ATA_CMD_MEDIA_UNLOCK = 0xDF,
ATA_CMD_DSM = 0x06,
+ ATA_CMD_CHK_MED_CRD_TYP = 0xD1,
+ ATA_CMD_CFA_REQ_EXT_ERR = 0x03,
+ ATA_CMD_CFA_WRITE_NE = 0x38,
+ ATA_CMD_CFA_TRANS_SECT = 0x87,
+ ATA_CMD_CFA_ERASE = 0xC0,
+ ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
@@ -306,6 +336,7 @@ enum {
/* SETFEATURE Sector counts for SATA features */
SATA_AN = 0x05, /* Asynchronous Notification */
SATA_DIPM = 0x03, /* Device Initiated Power Management */
+ SATA_FPDMA_AA = 0x02, /* DMA Setup FIS Auto-Activate */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
@@ -525,6 +556,9 @@ static inline int ata_is_data(u8 prot)
#define ata_id_has_atapi_AN(id) \
( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
((id)[78] & (1 << 5)) )
+#define ata_id_has_fpdma_aa(id) \
+ ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
+ ((id)[78] & (1 << 2)) )
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 1d52425a611..f169bcb90b5 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,8 @@
#include <linux/proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/writeback.h>
#include <asm/atomic.h>
struct page;
@@ -23,9 +25,11 @@ struct dentry;
* Bits in backing_dev_info.state
*/
enum bdi_state {
- BDI_pdflush, /* A pdflush thread is working this device */
+ BDI_pending, /* On its way to being activated */
+ BDI_wb_alloc, /* Default embedded wb allocated */
BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */
+ BDI_registered, /* bdi_register() was done */
BDI_unused, /* Available bits start here */
};
@@ -39,7 +43,22 @@ enum bdi_stat_item {
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
+struct bdi_writeback {
+ struct list_head list; /* hangs off the bdi */
+
+ struct backing_dev_info *bdi; /* our parent bdi */
+ unsigned int nr;
+
+ unsigned long last_old_flush; /* last old data flush */
+
+ struct task_struct *task; /* writeback task */
+ struct list_head b_dirty; /* dirty inodes */
+ struct list_head b_io; /* parked for writeback */
+ struct list_head b_more_io; /* parked for more writeback */
+};
+
struct backing_dev_info {
+ struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */
@@ -48,6 +67,8 @@ struct backing_dev_info {
void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
void *unplug_io_data;
+ char *name;
+
struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
struct prop_local_percpu completions;
@@ -56,6 +77,14 @@ struct backing_dev_info {
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
+ struct bdi_writeback wb; /* default writeback info for this bdi */
+ spinlock_t wb_lock; /* protects update side of wb_list */
+ struct list_head wb_list; /* the flusher threads hanging off this bdi */
+ unsigned long wb_mask; /* bitmask of registered tasks */
+ unsigned int wb_cnt; /* number of registered tasks */
+
+ struct list_head work_list;
+
struct device *dev;
#ifdef CONFIG_DEBUG_FS
@@ -71,6 +100,19 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
+void bdi_start_writeback(struct writeback_control *wbc);
+int bdi_writeback_task(struct bdi_writeback *wb);
+int bdi_has_dirty_io(struct backing_dev_info *bdi);
+
+extern spinlock_t bdi_lock;
+extern struct list_head bdi_list;
+
+static inline int wb_has_dirty_io(struct bdi_writeback *wb)
+{
+ return !list_empty(&wb->b_dirty) ||
+ !list_empty(&wb->b_io) ||
+ !list_empty(&wb->b_more_io);
+}
static inline void __add_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item, s64 amount)
@@ -261,6 +303,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
return bdi->capabilities & BDI_CAP_SWAP_BACKED;
}
+static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
+{
+ return bdi == &default_backing_dev_info;
+}
+
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -276,4 +323,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping)
return bdi_cap_swap_backed(mapping->backing_dev_info);
}
+static inline int bdi_sched_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 61ee18c1bdb..2046b5b8af4 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -117,6 +117,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
int executable_stack);
extern int bprm_mm_init(struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
+extern int prepare_bprm_creds(struct linux_binprm *bprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
extern int set_binfmt(struct linux_binfmt *new);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4d668e05d45..47536197ffd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,6 +48,15 @@ struct notifier_block;
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
+#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
+#define cpu_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb __cpuinitdata = \
+ { .notifier_call = fn, .priority = pri }; \
+ register_cpu_notifier(&fn##_nb); \
+}
+#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
#ifdef CONFIG_HOTPLUG_CPU
extern int register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
@@ -74,6 +83,8 @@ extern void cpu_maps_update_done(void);
#else /* CONFIG_SMP */
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
@@ -99,11 +110,7 @@ extern struct sysdev_class cpu_sysdev_class;
extern void get_online_cpus(void);
extern void put_online_cpus(void);
-#define hotcpu_notifier(fn, pri) { \
- static struct notifier_block fn##_nb __cpuinitdata = \
- { .notifier_call = fn, .priority = pri }; \
- register_cpu_notifier(&fn##_nb); \
-}
+#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4fa99969631..24520a539c6 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -114,6 +114,13 @@ struct thread_group_cred {
*/
struct cred {
atomic_t usage;
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ atomic_t subscribers; /* number of processes subscribed */
+ void *put_addr;
+ unsigned magic;
+#define CRED_MAGIC 0x43736564
+#define CRED_MAGIC_DEAD 0x44656144
+#endif
uid_t uid; /* real UID of the task */
gid_t gid; /* real GID of the task */
uid_t suid; /* saved UID of the task */
@@ -143,7 +150,9 @@ struct cred {
};
extern void __put_cred(struct cred *);
+extern void exit_creds(struct task_struct *);
extern int copy_creds(struct task_struct *, unsigned long);
+extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern struct cred *prepare_usermodehelper_creds(void);
@@ -158,6 +167,60 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern void __init cred_init(void);
+/*
+ * check for validity of credentials
+ */
+#ifdef CONFIG_DEBUG_CREDENTIALS
+extern void __invalid_creds(const struct cred *, const char *, unsigned);
+extern void __validate_process_creds(struct task_struct *,
+ const char *, unsigned);
+
+static inline bool creds_are_invalid(const struct cred *cred)
+{
+ if (cred->magic != CRED_MAGIC)
+ return true;
+ if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
+ return true;
+#ifdef CONFIG_SECURITY_SELINUX
+ if ((unsigned long) cred->security < PAGE_SIZE)
+ return true;
+ if ((*(u32*)cred->security & 0xffffff00) ==
+ (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
+ return true;
+#endif
+ return false;
+}
+
+static inline void __validate_creds(const struct cred *cred,
+ const char *file, unsigned line)
+{
+ if (unlikely(creds_are_invalid(cred)))
+ __invalid_creds(cred, file, line);
+}
+
+#define validate_creds(cred) \
+do { \
+ __validate_creds((cred), __FILE__, __LINE__); \
+} while(0)
+
+#define validate_process_creds() \
+do { \
+ __validate_process_creds(current, __FILE__, __LINE__); \
+} while(0)
+
+extern void validate_creds_for_do_exit(struct task_struct *);
+#else
+static inline void validate_creds(const struct cred *cred)
+{
+}
+static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+{
+}
+static inline void validate_process_creds(void)
+{
+}
+#endif
+
/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
@@ -186,7 +249,9 @@ static inline struct cred *get_new_cred(struct cred *cred)
*/
static inline const struct cred *get_cred(const struct cred *cred)
{
- return get_new_cred((struct cred *) cred);
+ struct cred *nonconst_cred = (struct cred *) cred;
+ validate_creds(cred);
+ return get_new_cred(nonconst_cred);
}
/**
@@ -204,7 +269,7 @@ static inline void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;
- BUG_ON(atomic_read(&(cred)->usage) <= 0);
+ validate_creds(cred);
if (atomic_dec_and_test(&(cred)->usage))
__put_cred(cred);
}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index ec29fa268b9..fd929889e8d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -115,7 +115,6 @@ struct crypto_async_request;
struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
-struct crypto_ahash;
struct crypto_rng;
struct crypto_tfm;
struct crypto_type;
@@ -146,16 +145,6 @@ struct ablkcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct ahash_request {
- struct crypto_async_request base;
-
- unsigned int nbytes;
- struct scatterlist *src;
- u8 *result;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
@@ -220,18 +209,6 @@ struct ablkcipher_alg {
unsigned int ivsize;
};
-struct ahash_alg {
- int (*init)(struct ahash_request *req);
- int (*reinit)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
- unsigned int digestsize;
-};
-
struct aead_alg {
int (*setkey)(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen);
@@ -318,7 +295,6 @@ struct rng_alg {
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_hash cra_u.hash
-#define cra_ahash cra_u.ahash
#define cra_compress cra_u.compress
#define cra_rng cra_u.rng
@@ -346,7 +322,6 @@ struct crypto_alg {
struct cipher_alg cipher;
struct digest_alg digest;
struct hash_alg hash;
- struct ahash_alg ahash;
struct compress_alg compress;
struct rng_alg rng;
} cra_u;
@@ -433,18 +408,6 @@ struct hash_tfm {
unsigned int digestsize;
};
-struct ahash_tfm {
- int (*init)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
- unsigned int digestsize;
- unsigned int reqsize;
-};
-
struct compress_tfm {
int (*cot_compress)(struct crypto_tfm *tfm,
const u8 *src, unsigned int slen,
@@ -465,7 +428,6 @@ struct rng_tfm {
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_hash crt_u.hash
-#define crt_ahash crt_u.ahash
#define crt_compress crt_u.compress
#define crt_rng crt_u.rng
@@ -479,7 +441,6 @@ struct crypto_tfm {
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct hash_tfm hash;
- struct ahash_tfm ahash;
struct compress_tfm compress;
struct rng_tfm rng;
} crt_u;
@@ -770,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
static inline void ablkcipher_request_free(struct ablkcipher_request *req)
{
- kfree(req);
+ kzfree(req);
}
static inline void ablkcipher_request_set_callback(
@@ -901,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
static inline void aead_request_free(struct aead_request *req)
{
- kfree(req);
+ kzfree(req);
}
static inline void aead_request_set_callback(struct aead_request *req,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 655e7721580..df7607e6dce 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -91,6 +91,9 @@ typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
iterate_devices_callout_fn fn,
void *data);
+typedef void (*dm_io_hints_fn) (struct dm_target *ti,
+ struct queue_limits *limits);
+
/*
* Returns:
* 0: The target can handle the next I/O immediately.
@@ -151,6 +154,7 @@ struct target_type {
dm_merge_fn merge;
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
+ dm_io_hints_fn io_hints;
/* For internal device-mapper use. */
struct list_head list;
diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h
index 642e3017b51..8a1f972c0fe 100644
--- a/include/linux/dm-log-userspace.h
+++ b/include/linux/dm-log-userspace.h
@@ -371,7 +371,18 @@
(DM_ULOG_REQUEST_MASK & (request_type))
struct dm_ulog_request {
- char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */
+ /*
+ * The local unique identifier (luid) and the universally unique
+ * identifier (uuid) are used to tie a request to a specific
+ * mirror log. A single machine log could probably make due with
+ * just the 'luid', but a cluster-aware log must use the 'uuid' and
+ * the 'luid'. The uuid is what is required for node to node
+ * communication concerning a particular log, but the 'luid' helps
+ * differentiate between logs that are being swapped and have the
+ * same 'uuid'. (Think "live" and "inactive" device-mapper tables.)
+ */
+ uint64_t luid;
+ char uuid[DM_UUID_LEN];
char padding[7]; /* Padding because DM_UUID_LEN = 129 */
int32_t error; /* Used to report back processing errors */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 07dfd460d28..c0f6c3cd788 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -98,11 +98,6 @@ static inline int is_device_dma_capable(struct device *dev)
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
-static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
-{
- return addr + size <= mask;
-}
-
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
#else
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index bb5489c82c9..a8a3e1ac281 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -43,7 +43,7 @@ extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
extern void dmi_scan_machine(void);
-extern int dmi_get_year(int field);
+extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
@@ -58,7 +58,16 @@ static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
static inline void dmi_scan_machine(void) { return; }
-static inline int dmi_get_year(int year) { return 0; }
+static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
+{
+ if (yearp)
+ *yearp = 0;
+ if (monthp)
+ *monthp = 0;
+ if (dayp)
+ *dayp = 0;
+ return false;
+}
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
diff --git a/include/linux/fips.h b/include/linux/fips.h
new file mode 100644
index 00000000000..f8fb07b0b6b
--- /dev/null
+++ b/include/linux/fips.h
@@ -0,0 +1,10 @@
+#ifndef _FIPS_H
+#define _FIPS_H
+
+#ifdef CONFIG_CRYPTO_FIPS
+extern int fips_enabled;
+#else
+#define fips_enabled 0
+#endif
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 73e9b643e45..a79f48373e7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -715,7 +715,7 @@ struct posix_acl;
struct inode {
struct hlist_node i_hash;
- struct list_head i_list;
+ struct list_head i_list; /* backing dev IO list */
struct list_head i_sb_list;
struct list_head i_dentry;
unsigned long i_ino;
@@ -1336,9 +1336,6 @@ struct super_block {
struct xattr_handler **s_xattr;
struct list_head s_inodes; /* all inodes */
- struct list_head s_dirty; /* dirty inodes */
- struct list_head s_io; /* parked for writeback */
- struct list_head s_more_io; /* parked for more writeback */
struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_files;
/* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
@@ -1528,6 +1525,7 @@ struct inode_operations {
void (*put_link) (struct dentry *, struct nameidata *, void *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int);
+ int (*check_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1788,6 +1786,7 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
struct vfsmount *mnt);
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
+void put_super(struct super_block *sb);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -1998,12 +1997,25 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *);
#define CHRDEV_MAJOR_HASH_SIZE 255
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
-extern int register_chrdev(unsigned int, const char *,
- const struct file_operations *);
-extern void unregister_chrdev(unsigned int, const char *);
+extern int __register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops);
+extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name);
extern void unregister_chrdev_region(dev_t, unsigned);
extern void chrdev_show(struct seq_file *,off_t);
+static inline int register_chrdev(unsigned int major, const char *name,
+ const struct file_operations *fops)
+{
+ return __register_chrdev(major, 0, 256, name, fops);
+}
+
+static inline void unregister_chrdev(unsigned int major, const char *name)
+{
+ __unregister_chrdev(major, 0, 256, name);
+}
+
/* fs/block_dev.c */
#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
@@ -2070,8 +2082,6 @@ static inline void invalidate_remote_inode(struct inode *inode)
extern int invalidate_inode_pages2(struct address_space *mapping);
extern int invalidate_inode_pages2_range(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-extern void generic_sync_sb_inodes(struct super_block *sb,
- struct writeback_control *wbc);
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
@@ -2186,7 +2196,6 @@ extern int bdev_read_only(struct block_device *);
extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
-extern int sb_has_dirty_inodes(struct super_block *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index a81170de7f6..23f7179bf74 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -93,16 +93,22 @@ void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
int pc);
struct ring_buffer_event *
-trace_current_buffer_lock_reserve(int type, unsigned long len,
+trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
+ int type, unsigned long len,
unsigned long flags, int pc);
-void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
unsigned long flags, int pc);
-void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
+void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
void tracing_record_cmdline(struct task_struct *tsk);
+struct event_filter;
+
struct ftrace_event_call {
struct list_head list;
char *name;
@@ -110,16 +116,18 @@ struct ftrace_event_call {
struct dentry *dir;
struct trace_event *event;
int enabled;
- int (*regfunc)(void);
- void (*unregfunc)(void);
+ int (*regfunc)(void *);
+ void (*unregfunc)(void *);
int id;
int (*raw_init)(void);
- int (*show_format)(struct trace_seq *s);
- int (*define_fields)(void);
+ int (*show_format)(struct ftrace_event_call *call,
+ struct trace_seq *s);
+ int (*define_fields)(struct ftrace_event_call *);
struct list_head fields;
int filter_active;
- void *filter;
+ struct event_filter *filter;
void *mod;
+ void *data;
atomic_t profile_count;
int (*profile_enable)(struct ftrace_event_call *);
@@ -129,15 +137,25 @@ struct ftrace_event_call {
#define MAX_FILTER_PRED 32
#define MAX_FILTER_STR_VAL 128
-extern int init_preds(struct ftrace_event_call *call);
extern void destroy_preds(struct ftrace_event_call *call);
extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
-extern int filter_current_check_discard(struct ftrace_event_call *call,
+extern int filter_current_check_discard(struct ring_buffer *buffer,
+ struct ftrace_event_call *call,
void *rec,
struct ring_buffer_event *event);
-extern int trace_define_field(struct ftrace_event_call *call, char *type,
- char *name, int offset, int size, int is_signed);
+enum {
+ FILTER_OTHER = 0,
+ FILTER_STATIC_STRING,
+ FILTER_DYN_STRING,
+ FILTER_PTR_STRING,
+};
+
+extern int trace_define_field(struct ftrace_event_call *call,
+ const char *type, const char *name,
+ int offset, int size, int is_signed,
+ int filter_type);
+extern int trace_define_common_fields(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < 0)
@@ -162,11 +180,4 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
-#define __common_field(type, item, is_signed) \
- ret = trace_define_field(event_call, #type, "common_" #item, \
- offsetof(typeof(field.ent), item), \
- sizeof(field.ent.item), is_signed); \
- if (ret) \
- return ret;
-
#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8246c697863..6d527ee82b2 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -64,6 +64,12 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
+#ifndef PREEMPT_ACTIVE
+#define PREEMPT_ACTIVE_BITS 1
+#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+#endif
+
#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
#error PREEMPT_ACTIVE is too low!
#endif
@@ -132,7 +138,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
}
#endif
-#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
+#if defined(CONFIG_NO_HZ)
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
extern void rcu_nmi_enter(void);
@@ -142,7 +148,7 @@ extern void rcu_nmi_exit(void);
# define rcu_irq_exit() do { } while (0)
# define rcu_nmi_enter() do { } while (0)
# define rcu_nmi_exit() do { } while (0)
-#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
+#endif /* #if defined(CONFIG_NO_HZ) */
/*
* It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7fc01b13be4..9e7f2e8fc66 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -94,6 +94,16 @@ extern struct group_info init_groups;
# define CAP_INIT_BSET CAP_INIT_EFF_SET
#endif
+#ifdef CONFIG_TREE_PREEMPT_RCU
+#define INIT_TASK_RCU_PREEMPT(tsk) \
+ .rcu_read_lock_nesting = 0, \
+ .rcu_read_unlock_special = 0, \
+ .rcu_blocked_node = NULL, \
+ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
+#else
+#define INIT_TASK_RCU_PREEMPT(tsk)
+#endif
+
extern struct cred init_cred;
#ifdef CONFIG_PERF_COUNTERS
@@ -173,6 +183,7 @@ extern struct cred init_cred;
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 35e7df1e9f3..1ac57e522a1 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -50,6 +50,9 @@
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
* registered first in an shared interrupt is considered for
* performance reasons)
+ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
+ * Used by threaded interrupts which need to keep the
+ * irq line disabled until the threaded handler has been run.
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -59,6 +62,7 @@
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
+#define IRQF_ONESHOT 0x00002000
/*
* Bits used by threaded handlers:
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cb2e77a3f7f..ae9653dbcd7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -69,6 +69,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
+#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
+#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -100,6 +102,9 @@ struct msi_desc;
* @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @set_wake: enable/disable power-management wake-on of an IRQ
*
+ * @bus_lock: function to lock access to slow bus (i2c) chips
+ * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips
+ *
* @release: release function solely used by UML
* @typename: obsoleted by name, kept as migration helper
*/
@@ -123,6 +128,9 @@ struct irq_chip {
int (*set_type)(unsigned int irq, unsigned int flow_type);
int (*set_wake)(unsigned int irq, unsigned int on);
+ void (*bus_lock)(unsigned int irq);
+ void (*bus_sync_unlock)(unsigned int irq);
+
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
void (*release)(unsigned int irq, void *dev_id);
@@ -220,13 +228,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
/*
- * Migration helpers for obsolete names, they will go away:
- */
-#define hw_interrupt_type irq_chip
-#define no_irq_type no_irq_chip
-typedef struct irq_desc irq_desc_t;
-
-/*
* Pick up the arch-dependent methods:
*/
#include <asm/hw_irq.h>
@@ -289,6 +290,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_nested_irq(unsigned int irq);
/*
* Monolithic do_IRQ implementation.
@@ -379,6 +381,8 @@ set_irq_chained_handler(unsigned int irq,
__set_irq_handler(irq, handle, 1, NULL);
}
+extern void set_irq_nested_thread(unsigned int irq, int nest);
+
extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index ec87b212ff7..7bf89bc8cbc 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -41,6 +41,12 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
; \
else
+#ifdef CONFIG_SMP
+#define irq_node(irq) (irq_to_desc(irq)->node)
+#else
+#define irq_node(irq) 0
+#endif
+
#endif /* CONFIG_GENERIC_HARDIRQS */
#define for_each_irq_nr(irq) \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d6320a3e8de..2b5b1e0899a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -125,7 +125,7 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- void __might_sleep(char *file, int line);
+ void __might_sleep(char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
@@ -137,8 +137,9 @@ extern int _cond_resched(void);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else
+ static inline void __might_sleep(char *file, int line, int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
#endif
diff --git a/include/linux/key.h b/include/linux/key.h
index e544f466d69..cd50dfa1d4c 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -129,7 +129,10 @@ struct key {
struct rw_semaphore sem; /* change vs change sem */
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
- time_t expiry; /* time at which key expires (or 0) */
+ union {
+ time_t expiry; /* time at which key expires (or 0) */
+ time_t revoked_at; /* time at which key was revoked */
+ };
uid_t uid;
gid_t gid;
key_perm_t perm; /* access permissions */
@@ -275,6 +278,8 @@ static inline key_serial_t key_serial(struct key *key)
extern ctl_table key_sysctls[];
#endif
+extern void key_replace_session_keyring(void);
+
/*
* the userspace interface
*/
@@ -297,6 +302,7 @@ extern void key_init(void);
#define key_fsuid_changed(t) do { } while(0)
#define key_fsgid_changed(t) do { } while(0)
#define key_init() do { } while(0)
+#define key_replace_session_keyring() do { } while(0)
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index c0688eb7209..bd383f1944f 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -52,5 +52,6 @@
#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */
#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */
#define KEYCTL_GET_SECURITY 17 /* get key security label */
+#define KEYCTL_SESSION_TO_PARENT 18 /* apply session keyring to parent process */
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 47b39b7c7e8..dc2fd545db0 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address);
+bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
+
#else
#define kmemcheck_enabled 0
@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p,
{
}
+static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
+{
+ return true;
+}
+
#endif /* CONFIG_KMEMCHECK */
/*
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 6a63807f714..3c7497d46ee 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -23,18 +23,18 @@
#ifdef CONFIG_DEBUG_KMEMLEAK
-extern void kmemleak_init(void);
+extern void kmemleak_init(void) __ref;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
- gfp_t gfp);
-extern void kmemleak_free(const void *ptr);
-extern void kmemleak_free_part(const void *ptr, size_t size);
+ gfp_t gfp) __ref;
+extern void kmemleak_free(const void *ptr) __ref;
+extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_padding(const void *ptr, unsigned long offset,
- size_t size);
-extern void kmemleak_not_leak(const void *ptr);
-extern void kmemleak_ignore(const void *ptr);
+ size_t size) __ref;
+extern void kmemleak_not_leak(const void *ptr) __ref;
+extern void kmemleak_ignore(const void *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
- size_t length, gfp_t gfp);
-extern void kmemleak_no_scan(const void *ptr);
+ size_t length, gfp_t gfp) __ref;
+extern void kmemleak_no_scan(const void *ptr) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e5b6e33c657..76319bf03e3 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -143,7 +143,6 @@ enum {
ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
- ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
@@ -190,6 +189,7 @@ enum {
ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */
ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */
ATA_FLAG_DEBUGMSG = (1 << 13),
+ ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */
ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */
ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
@@ -386,6 +386,7 @@ enum {
ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
+ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b53df0..9ccf0e286b2 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
struct lock_class *class;
struct stack_trace trace;
int distance;
+
+ /*
+ * The parent field is used to implement breadth-first search, and the
+ * bit 0 is reused to indicate if the lock has been accessed in BFS.
+ */
+ struct lock_list *parent;
};
/*
@@ -208,10 +214,12 @@ struct held_lock {
* interrupt context:
*/
unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
- unsigned int trylock:1;
+ unsigned int trylock:1; /* 16 bits */
+
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:2; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
+ unsigned int references:11; /* 32 bits */
};
/*
@@ -291,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
+#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
+
+extern int lock_is_held(struct lockdep_map *lock);
+
extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
@@ -309,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
+#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
+
#else /* !LOCKDEP */
static inline void lockdep_off(void)
@@ -353,6 +367,8 @@ struct lock_class_key { };
#define lockdep_depth(tsk) (0)
+#define lockdep_assert_held(l) do { } while (0)
+
#endif /* !LOCKDEP */
#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index e461b2c3d71..190c3785487 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -33,6 +33,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IPC 4
#define LSM_AUDIT_DATA_TASK 5
#define LSM_AUDIT_DATA_KEY 6
+#define LSM_AUDIT_NO_AUDIT 7
struct task_struct *tsk;
union {
struct {
@@ -66,16 +67,19 @@ struct common_audit_data {
} key_struct;
#endif
} u;
- const char *function;
/* this union contains LSM specific data */
union {
+#ifdef CONFIG_SECURITY_SMACK
/* SMACK data */
struct smack_audit_data {
+ const char *function;
char *subject;
char *object;
char *request;
int result;
} smack_audit_data;
+#endif
+#ifdef CONFIG_SECURITY_SELINUX
/* SELinux data */
struct {
u32 ssid;
@@ -83,10 +87,12 @@ struct common_audit_data {
u16 tclass;
u32 requested;
u32 audited;
+ u32 denied;
struct av_decision *avd;
int result;
} selinux_audit_data;
- } lsm_priv;
+#endif
+ };
/* these callback will be implemented by a specific LSM */
void (*lsm_pre_audit)(struct audit_buffer *, void *);
void (*lsm_post_audit)(struct audit_buffer *, void *);
@@ -104,7 +110,7 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
/* Initialize an LSM audit data structure. */
#define COMMON_AUDIT_DATA_INIT(_d, _t) \
{ memset((_d), 0, sizeof(struct common_audit_data)); \
- (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; }
+ (_d)->type = LSM_AUDIT_DATA_##_t; }
void common_lsm_audit(struct common_audit_data *a);
diff --git a/include/linux/module.h b/include/linux/module.h
index 098bdb7bfac..f8f92d015ef 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -17,10 +17,12 @@
#include <linux/moduleparam.h>
#include <linux/marker.h>
#include <linux/tracepoint.h>
-#include <asm/local.h>
+#include <asm/local.h>
#include <asm/module.h>
+#include <trace/events/module.h>
+
/* Not Yet Implemented */
#define MODULE_SUPPORTED_DEVICE(name)
@@ -462,7 +464,10 @@ static inline local_t *__module_ref_addr(struct module *mod, int cpu)
static inline void __module_get(struct module *module)
{
if (module) {
- local_inc(__module_ref_addr(module, get_cpu()));
+ unsigned int cpu = get_cpu();
+ local_inc(__module_ref_addr(module, cpu));
+ trace_module_get(module, _THIS_IP_,
+ local_read(__module_ref_addr(module, cpu)));
put_cpu();
}
}
@@ -473,8 +478,11 @@ static inline int try_module_get(struct module *module)
if (module) {
unsigned int cpu = get_cpu();
- if (likely(module_is_live(module)))
+ if (likely(module_is_live(module))) {
local_inc(__module_ref_addr(module, cpu));
+ trace_module_get(module, _THIS_IP_,
+ local_read(__module_ref_addr(module, cpu)));
+ }
else
ret = 0;
put_cpu();
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bd2eba53066..33b283601f6 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -472,6 +472,7 @@ enum lock_type4 {
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
+#define NFS4_VERSION 4
#define NFS4_MINOR_VERSION 0
#if defined(CONFIG_NFS_V4_1)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 19fe15d1204..320569eabe3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -167,6 +167,15 @@ struct nfs_server {
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
+#define NFS_CAP_CHANGE_ATTR (1U << 5)
+#define NFS_CAP_FILEID (1U << 6)
+#define NFS_CAP_MODE (1U << 7)
+#define NFS_CAP_NLINK (1U << 8)
+#define NFS_CAP_OWNER (1U << 9)
+#define NFS_CAP_OWNER_GROUP (1U << 10)
+#define NFS_CAP_ATIME (1U << 11)
+#define NFS_CAP_CTIME (1U << 12)
+#define NFS_CAP_MTIME (1U << 13)
/* maximum number of slots to use */
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 29af2d5df09..b752e807add 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
static inline void acpi_nmi_enable(void) { }
#endif
-#ifndef trigger_all_cpu_backtrace
-#define trigger_all_cpu_backtrace() do { } while (0)
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ arch_trigger_all_cpu_backtrace();
+
+ return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1d9518bc4c5..5171639ecf0 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -67,6 +67,9 @@ struct oprofile_operations {
/* Initiate a stack backtrace. Optional. */
void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
+
+ /* Multiplex between different events. Optional. */
+ int (*switch_events)(void);
/* CPU identification string. */
char * cpu_type;
};
@@ -171,7 +174,6 @@ struct op_sample;
struct op_entry {
struct ring_buffer_event *event;
struct op_sample *sample;
- unsigned long irq_flags;
unsigned long size;
unsigned long *data;
};
@@ -180,6 +182,7 @@ void oprofile_write_reserve(struct op_entry *entry,
struct pt_regs * const regs,
unsigned long pc, int code, int size);
int oprofile_add_data(struct op_entry *entry, unsigned long val);
+int oprofile_add_data64(struct op_entry *entry, u64 val);
int oprofile_write_commit(struct op_entry *entry);
#endif /* OPROFILE_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index aec3252afcf..ed5d7501e18 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -132,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page)
{
VM_BUG_ON(in_interrupt());
-#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
VM_BUG_ON(!in_atomic());
# endif
@@ -170,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
{
VM_BUG_ON(in_interrupt());
-#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT
VM_BUG_ON(!in_atomic());
# endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 73b46b6b904..c8fdcadce43 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -376,6 +376,9 @@
#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
+/* AMD SB Chipset */
+#define PCI_DEVICE_ID_AMD_SB900_IDE 0x780c
+#define PCI_DEVICE_ID_AMD_SB900_SATA_IDE 0x7800
#define PCI_VENDOR_ID_VLSI 0x1004
#define PCI_DEVICE_ID_VLSI_82C592 0x0005
@@ -537,6 +540,7 @@
#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index b53f7006cc4..972f90d7a32 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -216,6 +216,7 @@ struct perf_counter_attr {
#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
+#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
enum perf_counter_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -415,6 +416,9 @@ enum perf_callchain_context {
PERF_CONTEXT_MAX = (__u64)-4095,
};
+#define PERF_FLAG_FD_NO_GROUP (1U << 0)
+#define PERF_FLAG_FD_OUTPUT (1U << 1)
+
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
@@ -536,6 +540,7 @@ struct perf_counter {
struct list_head sibling_list;
int nr_siblings;
struct perf_counter *group_leader;
+ struct perf_counter *output;
const struct pmu *pmu;
enum perf_counter_active_state state;
@@ -761,6 +766,8 @@ extern int sysctl_perf_counter_mlock;
extern int sysctl_perf_counter_sample_rate;
extern void perf_counter_init(void);
+extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
+ void *record, int entry_size);
#ifndef perf_misc_flags
#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
deleted file mode 100644
index bfd92e1e5d2..00000000000
--- a/include/linux/rcuclassic.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion (classic version)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2001
- *
- * Author: Dipankar Sarma <dipankar@in.ibm.com>
- *
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU
- *
- */
-
-#ifndef __LINUX_RCUCLASSIC_H
-#define __LINUX_RCUCLASSIC_H
-
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
-
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-/* Global control variables for rcupdate callback mechanism. */
-struct rcu_ctrlblk {
- long cur; /* Current batch number. */
- long completed; /* Number of the last completed batch */
- long pending; /* Number of the last pending batch */
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- unsigned long gp_start; /* Time at which GP started in jiffies. */
- unsigned long jiffies_stall;
- /* Time at which to check for CPU stalls. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
- int signaled;
-
- spinlock_t lock ____cacheline_internodealigned_in_smp;
- DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
- /* current batch to proceed. */
-} ____cacheline_internodealigned_in_smp;
-
-/* Is batch a before batch b ? */
-static inline int rcu_batch_before(long a, long b)
-{
- return (a - b) < 0;
-}
-
-/* Is batch a after batch b ? */
-static inline int rcu_batch_after(long a, long b)
-{
- return (a - b) > 0;
-}
-
-/* Per-CPU data for Read-Copy UPdate. */
-struct rcu_data {
- /* 1) quiescent state handling : */
- long quiescbatch; /* Batch # for grace period */
- int passed_quiesc; /* User-mode/idle loop etc. */
- int qs_pending; /* core waits for quiesc state */
-
- /* 2) batch handling */
- /*
- * if nxtlist is not NULL, then:
- * batch:
- * The batch # for the last entry of nxtlist
- * [*nxttail[1], NULL = *nxttail[2]):
- * Entries that batch # <= batch
- * [*nxttail[0], *nxttail[1]):
- * Entries that batch # <= batch - 1
- * [nxtlist, *nxttail[0]):
- * Entries that batch # <= batch - 2
- * The grace period for these entries has completed, and
- * the other grace-period-completed entries may be moved
- * here temporarily in rcu_process_callbacks().
- */
- long batch;
- struct rcu_head *nxtlist;
- struct rcu_head **nxttail[3];
- long qlen; /* # of queued callbacks */
- struct rcu_head *donelist;
- struct rcu_head **donetail;
- long blimit; /* Upper limit on a processed batch */
- int cpu;
- struct rcu_head barrier;
-};
-
-/*
- * Increment the quiescent state counter.
- * The counter is a bit degenerated: We do not need to know
- * how many quiescent states passed, just if there was at least
- * one since the start of the grace period. Thus just a flag.
- */
-extern void rcu_qsctr_inc(int cpu);
-extern void rcu_bh_qsctr_inc(int cpu);
-
-extern int rcu_pending(int cpu);
-extern int rcu_needs_cpu(int cpu);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
-#else
-# define rcu_read_acquire() do { } while (0)
-# define rcu_read_release() do { } while (0)
-#endif
-
-#define __rcu_read_lock() \
- do { \
- preempt_disable(); \
- __acquire(RCU); \
- rcu_read_acquire(); \
- } while (0)
-#define __rcu_read_unlock() \
- do { \
- rcu_read_release(); \
- __release(RCU); \
- preempt_enable(); \
- } while (0)
-#define __rcu_read_lock_bh() \
- do { \
- local_bh_disable(); \
- __acquire(RCU_BH); \
- rcu_read_acquire(); \
- } while (0)
-#define __rcu_read_unlock_bh() \
- do { \
- rcu_read_release(); \
- __release(RCU_BH); \
- local_bh_enable(); \
- } while (0)
-
-#define __synchronize_sched() synchronize_rcu()
-
-#define call_rcu_sched(head, func) call_rcu(head, func)
-
-extern void __rcu_init(void);
-#define rcu_init_sched() do { } while (0)
-extern void rcu_check_callbacks(int cpu, int user);
-extern void rcu_restart_cpu(int cpu);
-
-extern long rcu_batches_completed(void);
-extern long rcu_batches_completed_bh(void);
-
-#define rcu_enter_nohz() do { } while (0)
-#define rcu_exit_nohz() do { } while (0)
-
-/* A context switch is a grace period for rcuclassic. */
-static inline int rcu_blocking_is_gp(void)
-{
- return num_online_cpus() == 1;
-}
-
-#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 15fbb3ca634..95e0615f4d7 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,18 +51,26 @@ struct rcu_head {
void (*func)(struct rcu_head *head);
};
-/* Internal to kernel, but needed by rcupreempt.h. */
+/* Exported common interfaces */
+extern void synchronize_rcu(void);
+extern void synchronize_rcu_bh(void);
+extern void rcu_barrier(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
+extern void synchronize_sched_expedited(void);
+extern int sched_expedited_torture_stats(char *page);
+
+/* Internal to kernel */
+extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
+extern int rcu_needs_cpu(int cpu);
extern int rcu_scheduler_active;
-#if defined(CONFIG_CLASSIC_RCU)
-#include <linux/rcuclassic.h>
-#elif defined(CONFIG_TREE_RCU)
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h>
-#elif defined(CONFIG_PREEMPT_RCU)
-#include <linux/rcupreempt.h>
#else
#error "Unknown RCU implementation specified to kernel configuration"
-#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
+#endif
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -70,6 +78,16 @@ extern int rcu_scheduler_active;
(ptr)->next = NULL; (ptr)->func = NULL; \
} while (0)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern struct lockdep_map rcu_lock_map;
+# define rcu_read_acquire() \
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
+# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
+#else
+# define rcu_read_acquire() do { } while (0)
+# define rcu_read_release() do { } while (0)
+#endif
+
/**
* rcu_read_lock - mark the beginning of an RCU read-side critical section.
*
@@ -99,7 +117,12 @@ extern int rcu_scheduler_active;
*
* It is illegal to block while in an RCU read-side critical section.
*/
-#define rcu_read_lock() __rcu_read_lock()
+static inline void rcu_read_lock(void)
+{
+ __rcu_read_lock();
+ __acquire(RCU);
+ rcu_read_acquire();
+}
/**
* rcu_read_unlock - marks the end of an RCU read-side critical section.
@@ -116,7 +139,12 @@ extern int rcu_scheduler_active;
* used as well. RCU does not care how the writers keep out of each
* others' way, as long as they do so.
*/
-#define rcu_read_unlock() __rcu_read_unlock()
+static inline void rcu_read_unlock(void)
+{
+ rcu_read_release();
+ __release(RCU);
+ __rcu_read_unlock();
+}
/**
* rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
@@ -129,14 +157,24 @@ extern int rcu_scheduler_active;
* can use just rcu_read_lock().
*
*/
-#define rcu_read_lock_bh() __rcu_read_lock_bh()
+static inline void rcu_read_lock_bh(void)
+{
+ __rcu_read_lock_bh();
+ __acquire(RCU_BH);
+ rcu_read_acquire();
+}
/*
* rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
*
* See rcu_read_lock_bh() for more information.
*/
-#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
+static inline void rcu_read_unlock_bh(void)
+{
+ rcu_read_release();
+ __release(RCU_BH);
+ __rcu_read_unlock_bh();
+}
/**
* rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
@@ -147,17 +185,34 @@ extern int rcu_scheduler_active;
* - call_rcu_sched() and rcu_barrier_sched()
* on the write-side to insure proper synchronization.
*/
-#define rcu_read_lock_sched() preempt_disable()
-#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
+static inline void rcu_read_lock_sched(void)
+{
+ preempt_disable();
+ __acquire(RCU_SCHED);
+ rcu_read_acquire();
+}
+static inline notrace void rcu_read_lock_sched_notrace(void)
+{
+ preempt_disable_notrace();
+ __acquire(RCU_SCHED);
+}
/*
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
*
* See rcu_read_lock_sched for more information.
*/
-#define rcu_read_unlock_sched() preempt_enable()
-#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
-
+static inline void rcu_read_unlock_sched(void)
+{
+ rcu_read_release();
+ __release(RCU_SCHED);
+ preempt_enable();
+}
+static inline notrace void rcu_read_unlock_sched_notrace(void)
+{
+ __release(RCU_SCHED);
+ preempt_enable_notrace();
+}
/**
@@ -259,15 +314,4 @@ extern void call_rcu(struct rcu_head *head,
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
-/* Exported common interfaces */
-extern void synchronize_rcu(void);
-extern void rcu_barrier(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
-
-/* Internal to kernel */
-extern void rcu_init(void);
-extern void rcu_scheduler_starting(void);
-extern int rcu_needs_cpu(int cpu);
-
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
deleted file mode 100644
index fce522782ff..00000000000
--- a/include/linux/rcupreempt.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion (RT implementation)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Paul McKenney <paulmck@us.ibm.com>
- *
- * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU
- *
- */
-
-#ifndef __LINUX_RCUPREEMPT_H
-#define __LINUX_RCUPREEMPT_H
-
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
-
-extern void rcu_qsctr_inc(int cpu);
-static inline void rcu_bh_qsctr_inc(int cpu) { }
-
-/*
- * Someone might want to pass call_rcu_bh as a function pointer.
- * So this needs to just be a rename and not a macro function.
- * (no parentheses)
- */
-#define call_rcu_bh call_rcu
-
-/**
- * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual update function to be invoked after the grace period
- *
- * The update function will be invoked some time after a full
- * synchronize_sched()-style grace period elapses, in other words after
- * all currently executing preempt-disabled sections of code (including
- * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
- * completed.
- */
-extern void call_rcu_sched(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-extern void __rcu_read_lock(void) __acquires(RCU);
-extern void __rcu_read_unlock(void) __releases(RCU);
-extern int rcu_pending(int cpu);
-extern int rcu_needs_cpu(int cpu);
-
-#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
-#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
-
-extern void __synchronize_sched(void);
-
-extern void __rcu_init(void);
-extern void rcu_init_sched(void);
-extern void rcu_check_callbacks(int cpu, int user);
-extern void rcu_restart_cpu(int cpu);
-extern long rcu_batches_completed(void);
-
-/*
- * Return the number of RCU batches processed thus far. Useful for debug
- * and statistic. The _bh variant is identifcal to straight RCU
- */
-static inline long rcu_batches_completed_bh(void)
-{
- return rcu_batches_completed();
-}
-
-#ifdef CONFIG_RCU_TRACE
-struct rcupreempt_trace;
-extern long *rcupreempt_flipctr(int cpu);
-extern long rcupreempt_data_completed(void);
-extern int rcupreempt_flip_flag(int cpu);
-extern int rcupreempt_mb_flag(int cpu);
-extern char *rcupreempt_try_flip_state_name(void);
-extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
-#endif
-
-struct softirq_action;
-
-#ifdef CONFIG_NO_HZ
-extern void rcu_enter_nohz(void);
-extern void rcu_exit_nohz(void);
-#else
-# define rcu_enter_nohz() do { } while (0)
-# define rcu_exit_nohz() do { } while (0)
-#endif
-
-/*
- * A context switch is a grace period for rcupreempt synchronize_rcu()
- * only during early boot, before the scheduler has been initialized.
- * So, how the heck do we get a context switch? Well, if the caller
- * invokes synchronize_rcu(), they are willing to accept a context
- * switch, so we simply pretend that one happened.
- *
- * After boot, there might be a blocked or preempted task in an RCU
- * read-side critical section, so we cannot then take the fastpath.
- */
-static inline int rcu_blocking_is_gp(void)
-{
- return num_online_cpus() == 1 && !rcu_scheduler_active;
-}
-
-#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h
deleted file mode 100644
index b99ae073192..00000000000
--- a/include/linux/rcupreempt_trace.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Read-Copy Update mechanism for mutual exclusion (RT implementation)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Paul McKenney <paulmck@us.ibm.com>
- *
- * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
- * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
- * Papers:
- * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
- * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
- *
- * For detailed explanation of the Preemptible Read-Copy Update mechanism see -
- * http://lwn.net/Articles/253651/
- */
-
-#ifndef __LINUX_RCUPREEMPT_TRACE_H
-#define __LINUX_RCUPREEMPT_TRACE_H
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <asm/atomic.h>
-
-/*
- * PREEMPT_RCU data structures.
- */
-
-struct rcupreempt_trace {
- long next_length;
- long next_add;
- long wait_length;
- long wait_add;
- long done_length;
- long done_add;
- long done_remove;
- atomic_t done_invoked;
- long rcu_check_callbacks;
- atomic_t rcu_try_flip_1;
- atomic_t rcu_try_flip_e1;
- long rcu_try_flip_i1;
- long rcu_try_flip_ie1;
- long rcu_try_flip_g1;
- long rcu_try_flip_a1;
- long rcu_try_flip_ae1;
- long rcu_try_flip_a2;
- long rcu_try_flip_z1;
- long rcu_try_flip_ze1;
- long rcu_try_flip_z2;
- long rcu_try_flip_m1;
- long rcu_try_flip_me1;
- long rcu_try_flip_m2;
-};
-
-#ifdef CONFIG_RCU_TRACE
-#define RCU_TRACE(fn, arg) fn(arg);
-#else
-#define RCU_TRACE(fn, arg)
-#endif
-
-extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
-extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
-
-#endif /* __LINUX_RCUPREEMPT_TRACE_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5a5153806c4..a8930771782 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,264 +30,57 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-#include <linux/cache.h>
-#include <linux/spinlock.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/seqlock.h>
+extern void rcu_sched_qs(int cpu);
+extern void rcu_bh_qs(int cpu);
-/*
- * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
- * In theory, it should be possible to add more levels straightforwardly.
- * In practice, this has not been tested, so there is probably some
- * bug somewhere.
- */
-#define MAX_RCU_LVLS 3
-#define RCU_FANOUT (CONFIG_RCU_FANOUT)
-#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
-#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
-
-#if NR_CPUS <= RCU_FANOUT
-# define NUM_RCU_LVLS 1
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 (NR_CPUS)
-# define NUM_RCU_LVL_2 0
-# define NUM_RCU_LVL_3 0
-#elif NR_CPUS <= RCU_FANOUT_SQ
-# define NUM_RCU_LVLS 2
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
-# define NUM_RCU_LVL_2 (NR_CPUS)
-# define NUM_RCU_LVL_3 0
-#elif NR_CPUS <= RCU_FANOUT_CUBE
-# define NUM_RCU_LVLS 3
-# define NUM_RCU_LVL_0 1
-# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
-# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
-# define NUM_RCU_LVL_3 NR_CPUS
-#else
-# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
-#endif /* #if (NR_CPUS) <= RCU_FANOUT */
-
-#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
-#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
-
-/*
- * Dynticks per-CPU state.
- */
-struct rcu_dynticks {
- int dynticks_nesting; /* Track nesting level, sort of. */
- int dynticks; /* Even value for dynticks-idle, else odd. */
- int dynticks_nmi; /* Even value for either dynticks-idle or */
- /* not in nmi handler, else odd. So this */
- /* remains even for nmi from irq handler. */
-};
-
-/*
- * Definition for node within the RCU grace-period-detection hierarchy.
- */
-struct rcu_node {
- spinlock_t lock;
- unsigned long qsmask; /* CPUs or groups that need to switch in */
- /* order for current grace period to proceed.*/
- unsigned long qsmaskinit;
- /* Per-GP initialization for qsmask. */
- unsigned long grpmask; /* Mask to apply to parent qsmask. */
- int grplo; /* lowest-numbered CPU or group here. */
- int grphi; /* highest-numbered CPU or group here. */
- u8 grpnum; /* CPU/group number for next level up. */
- u8 level; /* root is at level 0. */
- struct rcu_node *parent;
-} ____cacheline_internodealigned_in_smp;
-
-/* Index values for nxttail array in struct rcu_data. */
-#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
-#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
-#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
-#define RCU_NEXT_TAIL 3
-#define RCU_NEXT_SIZE 4
-
-/* Per-CPU data for read-copy update. */
-struct rcu_data {
- /* 1) quiescent-state and grace-period handling : */
- long completed; /* Track rsp->completed gp number */
- /* in order to detect GP end. */
- long gpnum; /* Highest gp number that this CPU */
- /* is aware of having started. */
- long passed_quiesc_completed;
- /* Value of completed at time of qs. */
- bool passed_quiesc; /* User-mode/idle loop etc. */
- bool qs_pending; /* Core waits for quiesc state. */
- bool beenonline; /* CPU online at least once. */
- struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
- unsigned long grpmask; /* Mask to apply to leaf qsmask. */
-
- /* 2) batch handling */
- /*
- * If nxtlist is not NULL, it is partitioned as follows.
- * Any of the partitions might be empty, in which case the
- * pointer to that partition will be equal to the pointer for
- * the following partition. When the list is empty, all of
- * the nxttail elements point to nxtlist, which is NULL.
- *
- * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
- * Entries that might have arrived after current GP ended
- * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
- * Entries known to have arrived before current GP ended
- * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
- * Entries that batch # <= ->completed - 1: waiting for current GP
- * [nxtlist, *nxttail[RCU_DONE_TAIL]):
- * Entries that batch # <= ->completed
- * The grace period for these entries has completed, and
- * the other grace-period-completed entries may be moved
- * here temporarily in rcu_process_callbacks().
- */
- struct rcu_head *nxtlist;
- struct rcu_head **nxttail[RCU_NEXT_SIZE];
- long qlen; /* # of queued callbacks */
- long blimit; /* Upper limit on a processed batch */
-
-#ifdef CONFIG_NO_HZ
- /* 3) dynticks interface. */
- struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
- int dynticks_snap; /* Per-GP tracking for dynticks. */
- int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
-#endif /* #ifdef CONFIG_NO_HZ */
-
- /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
-#ifdef CONFIG_NO_HZ
- unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
-#endif /* #ifdef CONFIG_NO_HZ */
- unsigned long offline_fqs; /* Kicked due to being offline. */
- unsigned long resched_ipi; /* Sent a resched IPI. */
-
- /* 5) __rcu_pending() statistics. */
- long n_rcu_pending; /* rcu_pending() calls since boot. */
- long n_rp_qs_pending;
- long n_rp_cb_ready;
- long n_rp_cpu_needs_gp;
- long n_rp_gp_completed;
- long n_rp_gp_started;
- long n_rp_need_fqs;
- long n_rp_need_nothing;
-
- int cpu;
-};
-
-/* Values for signaled field in struct rcu_state. */
-#define RCU_GP_INIT 0 /* Grace period being initialized. */
-#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
-#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
-#ifdef CONFIG_NO_HZ
-#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
-#else /* #ifdef CONFIG_NO_HZ */
-#define RCU_SIGNAL_INIT RCU_FORCE_QS
-#endif /* #else #ifdef CONFIG_NO_HZ */
-
-#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
-#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
-#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
- /* to take at least one */
- /* scheduling clock irq */
- /* before ratting on them. */
-
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-
-/*
- * RCU global state, including node hierarchy. This hierarchy is
- * represented in "heap" form in a dense array. The root (first level)
- * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
- * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
- * and the third level in ->node[m+1] and following (->node[m+1] referenced
- * by ->level[2]). The number of levels is determined by the number of
- * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
- * consisting of a single rcu_node.
- */
-struct rcu_state {
- struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
- struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
- u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
- u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
- struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
-
- /* The following fields are guarded by the root rcu_node's lock. */
-
- u8 signaled ____cacheline_internodealigned_in_smp;
- /* Force QS state. */
- long gpnum; /* Current gp number. */
- long completed; /* # of last completed gp. */
- spinlock_t onofflock; /* exclude on/offline and */
- /* starting new GP. */
- spinlock_t fqslock; /* Only one task forcing */
- /* quiescent states. */
- unsigned long jiffies_force_qs; /* Time at which to invoke */
- /* force_quiescent_state(). */
- unsigned long n_force_qs; /* Number of calls to */
- /* force_quiescent_state(). */
- unsigned long n_force_qs_lh; /* ~Number of calls leaving */
- /* due to lock unavailable. */
- unsigned long n_force_qs_ngp; /* Number of calls leaving */
- /* due to no GP active. */
-#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
- unsigned long gp_start; /* Time at which GP started, */
- /* but in jiffies. */
- unsigned long jiffies_stall; /* Time at which to check */
- /* for CPU stalls. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
-#ifdef CONFIG_NO_HZ
- long dynticks_completed; /* Value of completed @ snap. */
-#endif /* #ifdef CONFIG_NO_HZ */
-};
+extern int rcu_needs_cpu(int cpu);
-extern void rcu_qsctr_inc(int cpu);
-extern void rcu_bh_qsctr_inc(int cpu);
+#ifdef CONFIG_TREE_PREEMPT_RCU
-extern int rcu_pending(int cpu);
-extern int rcu_needs_cpu(int cpu);
+extern void __rcu_read_lock(void);
+extern void __rcu_read_unlock(void);
+extern void exit_rcu(void);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-extern struct lockdep_map rcu_lock_map;
-# define rcu_read_acquire() \
- lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
-# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
-#else
-# define rcu_read_acquire() do { } while (0)
-# define rcu_read_release() do { } while (0)
-#endif
+#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
static inline void __rcu_read_lock(void)
{
preempt_disable();
- __acquire(RCU);
- rcu_read_acquire();
}
+
static inline void __rcu_read_unlock(void)
{
- rcu_read_release();
- __release(RCU);
preempt_enable();
}
+
+static inline void exit_rcu(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
+
static inline void __rcu_read_lock_bh(void)
{
local_bh_disable();
- __acquire(RCU_BH);
- rcu_read_acquire();
}
static inline void __rcu_read_unlock_bh(void)
{
- rcu_read_release();
- __release(RCU_BH);
local_bh_enable();
}
#define __synchronize_sched() synchronize_rcu()
-#define call_rcu_sched(head, func) call_rcu(head, func)
+extern void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
-static inline void rcu_init_sched(void)
+static inline void synchronize_rcu_expedited(void)
{
+ synchronize_sched_expedited();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_sched_expedited();
}
extern void __rcu_init(void);
@@ -296,6 +89,11 @@ extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
+extern long rcu_batches_completed_sched(void);
+
+static inline void rcu_init_sched(void)
+{
+}
#ifdef CONFIG_NO_HZ
void rcu_enter_nohz(void);
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 29f8599e6be..5fcc31ed577 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -75,20 +75,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
}
/*
- * ring_buffer_event_discard can discard any event in the ring buffer.
- * it is up to the caller to protect against a reader from
- * consuming it or a writer from wrapping and replacing it.
- *
- * No external protection is needed if this is called before
- * the event is commited. But in that case it would be better to
- * use ring_buffer_discard_commit.
- *
- * Note, if an event that has not been committed is discarded
- * with ring_buffer_event_discard, it must still be committed.
- */
-void ring_buffer_event_discard(struct ring_buffer_event *event);
-
-/*
* ring_buffer_discard_commit will remove an event that has not
* ben committed yet. If this is used, then ring_buffer_unlock_commit
* must not be called on the discarded event. This function
@@ -154,8 +140,17 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer);
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_reset(struct ring_buffer *buffer);
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
struct ring_buffer *buffer_b, int cpu);
+#else
+static inline int
+ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
+ struct ring_buffer *buffer_b, int cpu)
+{
+ return -ENODEV;
+}
+#endif
int ring_buffer_empty(struct ring_buffer *buffer);
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
@@ -170,7 +165,6 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f1ea4a6695..f3d74bd04d1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,8 @@
#define SCHED_BATCH 3
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE 5
+/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
+#define SCHED_RESET_ON_FORK 0x40000000
#ifdef __KERNEL__
@@ -796,18 +798,19 @@ enum cpu_idle_type {
#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
#ifdef CONFIG_SMP
-#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
-#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
-#define SD_BALANCE_EXEC 4 /* Balance on exec */
-#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
-#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
-#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
-#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
-#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
-#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
-#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
-#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
-#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
+#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
+#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
+#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
+#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
+#define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */
+#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
+#define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */
+#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
+#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
+#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
+#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
+#define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */
+#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
enum powersavings_balance_level {
POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -827,7 +830,7 @@ static inline int sd_balance_for_mc_power(void)
if (sched_smt_power_savings)
return SD_POWERSAVINGS_BALANCE;
- return 0;
+ return SD_PREFER_SIBLING;
}
static inline int sd_balance_for_package_power(void)
@@ -835,7 +838,7 @@ static inline int sd_balance_for_package_power(void)
if (sched_mc_power_savings | sched_smt_power_savings)
return SD_POWERSAVINGS_BALANCE;
- return 0;
+ return SD_PREFER_SIBLING;
}
/*
@@ -857,15 +860,9 @@ struct sched_group {
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
- * single CPU. This is read only (except for setup, hotplug CPU).
- * Note : Never change cpu_power without recompute its reciprocal
+ * single CPU.
*/
- unsigned int __cpu_power;
- /*
- * reciprocal value of cpu_power to avoid expensive divides
- * (see include/linux/reciprocal_div.h)
- */
- u32 reciprocal_cpu_power;
+ unsigned int cpu_power;
/*
* The CPUs this group covers.
@@ -918,6 +915,7 @@ struct sched_domain {
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
+ unsigned int smt_gain;
int flags; /* See SD_* */
enum sched_domain_level level;
@@ -1045,7 +1043,6 @@ struct sched_class {
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
- int (*needs_post_schedule) (struct rq *this_rq);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
@@ -1110,6 +1107,8 @@ struct sched_entity {
u64 wait_max;
u64 wait_count;
u64 wait_sum;
+ u64 iowait_count;
+ u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
@@ -1163,6 +1162,8 @@ struct sched_rt_entity {
#endif
};
+struct rcu_node;
+
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
@@ -1206,10 +1207,12 @@ struct task_struct {
unsigned int policy;
cpumask_t cpus_allowed;
-#ifdef CONFIG_PREEMPT_RCU
+#ifdef CONFIG_TREE_PREEMPT_RCU
int rcu_read_lock_nesting;
- int rcu_flipctr_idx;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
+ char rcu_read_unlock_special;
+ struct rcu_node *rcu_blocked_node;
+ struct list_head rcu_node_entry;
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info sched_info;
@@ -1230,11 +1233,19 @@ struct task_struct {
unsigned did_exec:1;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
+ unsigned in_iowait:1;
+
+
+ /* Revert to default priority/policy when forking */
+ unsigned sched_reset_on_fork:1;
+
pid_t pid;
pid_t tgid;
+#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
+#endif
/*
* pointers to (original) parent process, youngest child, younger sibling,
@@ -1292,6 +1303,7 @@ struct task_struct {
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
@@ -1724,6 +1736,28 @@ extern cputime_t task_gtime(struct task_struct *p);
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
+#ifdef CONFIG_TREE_PREEMPT_RCU
+
+#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
+#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
+#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
+
+static inline void rcu_copy_process(struct task_struct *p)
+{
+ p->rcu_read_lock_nesting = 0;
+ p->rcu_read_unlock_special = 0;
+ p->rcu_blocked_node = NULL;
+ INIT_LIST_HEAD(&p->rcu_node_entry);
+}
+
+#else
+
+static inline void rcu_copy_process(struct task_struct *p)
+{
+}
+
+#endif
+
#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -1813,11 +1847,12 @@ extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
-#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_child_runs_first;
+#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration;
int sched_nr_latency_handler(struct ctl_table *table, int write,
@@ -2077,7 +2112,7 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
-extern bool is_single_threaded(struct task_struct *);
+extern bool current_is_single_threaded(void);
/*
* Careful: do_each_thread/while_each_thread is a double loop so
@@ -2281,23 +2316,31 @@ static inline int need_resched(void)
* cond_resched_softirq() will enable bhs before scheduling.
*/
extern int _cond_resched(void);
-#ifdef CONFIG_PREEMPT_BKL
-static inline int cond_resched(void)
-{
- return 0;
-}
+
+#define cond_resched() ({ \
+ __might_sleep(__FILE__, __LINE__, 0); \
+ _cond_resched(); \
+})
+
+extern int __cond_resched_lock(spinlock_t *lock);
+
+#ifdef CONFIG_PREEMPT
+#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
#else
-static inline int cond_resched(void)
-{
- return _cond_resched();
-}
+#define PREEMPT_LOCK_OFFSET 0
#endif
-extern int cond_resched_lock(spinlock_t * lock);
-extern int cond_resched_softirq(void);
-static inline int cond_resched_bkl(void)
-{
- return _cond_resched();
-}
+
+#define cond_resched_lock(lock) ({ \
+ __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+ __cond_resched_lock(lock); \
+})
+
+extern int __cond_resched_softirq(void);
+
+#define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
+ __cond_resched_softirq(); \
+})
/*
* Does a critical section need to be broken due to another
diff --git a/include/linux/security.h b/include/linux/security.h
index 1f16eea2017..d050b66ab9e 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -53,7 +53,7 @@ struct audit_krule;
extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
int cap, int audit);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
-extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode);
+extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
@@ -653,6 +653,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* manual page for definitions of the @clone_flags.
* @clone_flags contains the flags indicating what should be shared.
* Return 0 if permission is granted.
+ * @cred_alloc_blank:
+ * @cred points to the credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Only allocate sufficient memory and attach to @cred such that
+ * cred_transfer() will not get ENOMEM.
* @cred_free:
* @cred points to the credentials.
* Deallocate and clear the cred->security field in a set of credentials.
@@ -665,6 +670,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @new points to the new credentials.
* @old points to the original credentials.
* Install a new set of credentials.
+ * @cred_transfer:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * Transfer data from original creds to new creds
* @kernel_act_as:
* Set the credentials for a kernel service to act as (subjective context).
* @new points to the credentials to be modified.
@@ -678,6 +687,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode points to the inode to use as a reference.
* The current task must be the one that nominated @inode.
* Return 0 if successful.
+ * @kernel_module_request:
+ * Ability to trigger the kernel to automatically upcall to userspace for
+ * userspace to load a kernel module with the given name.
+ * Return 0 if successful.
* @task_setuid:
* Check permission before setting one or more of the user identity
* attributes of the current process. The @flags parameter indicates
@@ -994,6 +1007,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Sets the connection's peersid to the secmark on skb.
* @req_classify_flow:
* Sets the flow's sid to the openreq sid.
+ * @tun_dev_create:
+ * Check permissions prior to creating a new TUN device.
+ * @tun_dev_post_create:
+ * This hook allows a module to update or allocate a per-socket security
+ * structure.
+ * @sk contains the newly created sock structure.
+ * @tun_dev_attach:
+ * Check permissions prior to attaching to a persistent TUN device. This
+ * hook can also be used by the module to update any security state
+ * associated with the TUN device's sock structure.
+ * @sk contains the existing sock structure.
*
* Security hooks for XFRM operations.
*
@@ -1088,6 +1112,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Return the length of the string (including terminating NUL) or -ve if
* an error.
* May also return 0 (and a NULL buffer pointer) if there is no label.
+ * @key_session_to_parent:
+ * Forcibly assign the session keyring from a process to its parent
+ * process.
+ * @cred: Pointer to process's credentials
+ * @parent_cred: Pointer to parent process's credentials
+ * @keyring: Proposed new session keyring
+ * Return 0 if permission is granted, -ve error otherwise.
*
* Security hooks affecting all System V IPC operations.
*
@@ -1229,7 +1260,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @alter contains the flag indicating whether changes are to be made.
* Return 0 if permission is granted.
*
- * @ptrace_may_access:
+ * @ptrace_access_check:
* Check permission before allowing the current process to trace the
* @child process.
* Security modules may also want to perform a process tracing check
@@ -1244,7 +1275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check that the @parent process has sufficient permission to trace the
* current process before allowing the current process to present itself
* to the @parent process for tracing.
- * The parent process will still have to undergo the ptrace_may_access
+ * The parent process will still have to undergo the ptrace_access_check
* checks before it is allowed to trace this one.
* @parent contains the task_struct structure for debugger process.
* Return 0 if permission is granted.
@@ -1351,12 +1382,47 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* audit_rule_init.
* @rule contains the allocated rule
*
+ * @inode_notifysecctx:
+ * Notify the security module of what the security context of an inode
+ * should be. Initializes the incore security context managed by the
+ * security module for this inode. Example usage: NFS client invokes
+ * this hook to initialize the security context in its incore inode to the
+ * value provided by the server for the file when the server returned the
+ * file's attributes to the client.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_setsecctx:
+ * Change the security context of an inode. Updates the
+ * incore security context managed by the security module and invokes the
+ * fs code as needed (via __vfs_setxattr_noperm) to update any backing
+ * xattrs that represent the context. Example usage: NFS server invokes
+ * this hook to change the security context in its incore inode and on the
+ * backing filesystem to a value provided by the client on a SETATTR
+ * operation.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @dentry contains the inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_getsecctx:
+ * Returns a string containing all relavent security context information
+ *
+ * @inode we wish to set the security context of.
+ * @ctx is a pointer in which to place the allocated security context.
+ * @ctxlen points to the place to put the length of @ctx.
* This is the main security structure.
*/
struct security_operations {
char name[SECURITY_NAME_MAX + 1];
- int (*ptrace_may_access) (struct task_struct *child, unsigned int mode);
+ int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
int (*ptrace_traceme) (struct task_struct *parent);
int (*capget) (struct task_struct *target,
kernel_cap_t *effective,
@@ -1483,12 +1549,15 @@ struct security_operations {
int (*dentry_open) (struct file *file, const struct cred *cred);
int (*task_create) (unsigned long clone_flags);
+ int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
void (*cred_free) (struct cred *cred);
int (*cred_prepare)(struct cred *new, const struct cred *old,
gfp_t gfp);
void (*cred_commit)(struct cred *new, const struct cred *old);
+ void (*cred_transfer)(struct cred *new, const struct cred *old);
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
+ int (*kernel_module_request)(void);
int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
int (*task_fix_setuid) (struct cred *new, const struct cred *old,
int flags);
@@ -1556,6 +1625,10 @@ struct security_operations {
int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
void (*release_secctx) (char *secdata, u32 seclen);
+ int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
+ int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
+ int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
+
#ifdef CONFIG_SECURITY_NETWORK
int (*unix_stream_connect) (struct socket *sock,
struct socket *other, struct sock *newsk);
@@ -1592,6 +1665,9 @@ struct security_operations {
void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
+ int (*tun_dev_create)(void);
+ void (*tun_dev_post_create)(struct sock *sk);
+ int (*tun_dev_attach)(struct sock *sk);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1620,6 +1696,9 @@ struct security_operations {
const struct cred *cred,
key_perm_t perm);
int (*key_getsecurity)(struct key *key, char **_buffer);
+ int (*key_session_to_parent)(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key);
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
@@ -1637,7 +1716,7 @@ extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
/* Security operations */
-int security_ptrace_may_access(struct task_struct *child, unsigned int mode);
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
int security_capget(struct task_struct *target,
kernel_cap_t *effective,
@@ -1736,11 +1815,14 @@ int security_file_send_sigiotask(struct task_struct *tsk,
int security_file_receive(struct file *file);
int security_dentry_open(struct file *file, const struct cred *cred);
int security_task_create(unsigned long clone_flags);
+int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_commit_creds(struct cred *new, const struct cred *old);
+void security_transfer_creds(struct cred *new, const struct cred *old);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
+int security_kernel_module_request(void);
int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
@@ -1796,6 +1878,9 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
void security_release_secctx(char *secdata, u32 seclen);
+int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
+int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
+int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
#else /* CONFIG_SECURITY */
struct security_mnt_opts {
};
@@ -1818,10 +1903,10 @@ static inline int security_init(void)
return 0;
}
-static inline int security_ptrace_may_access(struct task_struct *child,
+static inline int security_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
- return cap_ptrace_may_access(child, mode);
+ return cap_ptrace_access_check(child, mode);
}
static inline int security_ptrace_traceme(struct task_struct *parent)
@@ -2266,6 +2351,11 @@ static inline int security_task_create(unsigned long clone_flags)
return 0;
}
+static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ return 0;
+}
+
static inline void security_cred_free(struct cred *cred)
{ }
@@ -2281,6 +2371,11 @@ static inline void security_commit_creds(struct cred *new,
{
}
+static inline void security_transfer_creds(struct cred *new,
+ const struct cred *old)
+{
+}
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -2292,6 +2387,11 @@ static inline int security_kernel_create_files_as(struct cred *cred,
return 0;
}
+static inline int security_kernel_module_request(void)
+{
+ return 0;
+}
+
static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
int flags)
{
@@ -2537,6 +2637,19 @@ static inline int security_secctx_to_secid(const char *secdata,
static inline void security_release_secctx(char *secdata, u32 seclen)
{
}
+
+static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_SECURITY */
#ifdef CONFIG_SECURITY_NETWORK
@@ -2575,6 +2688,9 @@ void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req);
void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb);
+int security_tun_dev_create(void);
+void security_tun_dev_post_create(struct sock *sk);
+int security_tun_dev_attach(struct sock *sk);
#else /* CONFIG_SECURITY_NETWORK */
static inline int security_unix_stream_connect(struct socket *sock,
@@ -2725,6 +2841,20 @@ static inline void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb)
{
}
+
+static inline int security_tun_dev_create(void)
+{
+ return 0;
+}
+
+static inline void security_tun_dev_post_create(struct sock *sk)
+{
+}
+
+static inline int security_tun_dev_attach(struct sock *sk)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2881,6 +3011,9 @@ void security_key_free(struct key *key);
int security_key_permission(key_ref_t key_ref,
const struct cred *cred, key_perm_t perm);
int security_key_getsecurity(struct key *key, char **_buffer);
+int security_key_session_to_parent(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key);
#else
@@ -2908,6 +3041,13 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
+static inline int security_key_session_to_parent(const struct cred *cred,
+ const struct cred *parent_cred,
+ struct key *key)
+{
+ return 0;
+}
+
#endif
#endif /* CONFIG_KEYS */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index abff6c9b413..6d3f2f449ea 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -39,7 +39,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
}
#ifdef CONFIG_TMPFS_POSIX_ACL
-int shmem_permission(struct inode *, int);
+int shmem_check_acl(struct inode *, int);
int shmem_acl_init(struct inode *, struct inode *);
extern struct xattr_handler shmem_xattr_acl_access_handler;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 4be57ab0347..f0ca7a7a175 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
*/
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _raw_spin_lock(spinlock_t *lock);
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
@@ -268,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#define spin_lock_irq(lock) _spin_lock_irq(lock)
#define spin_lock_bh(lock) _spin_lock_bh(lock)
-
#define read_lock_irq(lock) _read_lock_irq(lock)
#define read_lock_bh(lock) _read_lock_bh(lock)
-
#define write_lock_irq(lock) _write_lock_irq(lock)
#define write_lock_bh(lock) _write_lock_bh(lock)
-
-/*
- * We inline the unlock functions in the nondebug case:
- */
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
- !defined(CONFIG_SMP)
-# define spin_unlock(lock) _spin_unlock(lock)
-# define read_unlock(lock) _read_unlock(lock)
-# define write_unlock(lock) _write_unlock(lock)
-# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-# define read_unlock_irq(lock) _read_unlock_irq(lock)
-# define write_unlock_irq(lock) _write_unlock_irq(lock)
-#else
-# define spin_unlock(lock) \
- do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define read_unlock(lock) \
- do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define write_unlock(lock) \
- do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
-# define spin_unlock_irq(lock) \
-do { \
- __raw_spin_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
-# define read_unlock_irq(lock) \
-do { \
- __raw_read_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
-# define write_unlock_irq(lock) \
-do { \
- __raw_write_unlock(&(lock)->raw_lock); \
- __release(lock); \
- local_irq_enable(); \
-} while (0)
-#endif
+#define spin_unlock(lock) _spin_unlock(lock)
+#define read_unlock(lock) _read_unlock(lock)
+#define write_unlock(lock) _write_unlock(lock)
+#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
+#define read_unlock_irq(lock) _read_unlock_irq(lock)
+#define write_unlock_irq(lock) _write_unlock_irq(lock)
#define spin_unlock_irqrestore(lock, flags) \
do { \
@@ -380,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
*/
#define spin_can_lock(lock) (!spin_is_locked(lock))
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index d79845d034b..7a7e18fc241 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
__releases(lock);
+/*
+ * We inline the unlock functions in the nondebug case:
+ */
+#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
+#define __always_inline__spin_unlock
+#define __always_inline__read_unlock
+#define __always_inline__write_unlock
+#define __always_inline__spin_unlock_irq
+#define __always_inline__read_unlock_irq
+#define __always_inline__write_unlock_irq
+#endif
+
+#ifndef CONFIG_DEBUG_SPINLOCK
+#ifndef CONFIG_GENERIC_LOCKBREAK
+
+#ifdef __always_inline__spin_lock
+#define _spin_lock(lock) __spin_lock(lock)
+#endif
+
+#ifdef __always_inline__read_lock
+#define _read_lock(lock) __read_lock(lock)
+#endif
+
+#ifdef __always_inline__write_lock
+#define _write_lock(lock) __write_lock(lock)
+#endif
+
+#ifdef __always_inline__spin_lock_bh
+#define _spin_lock_bh(lock) __spin_lock_bh(lock)
+#endif
+
+#ifdef __always_inline__read_lock_bh
+#define _read_lock_bh(lock) __read_lock_bh(lock)
+#endif
+
+#ifdef __always_inline__write_lock_bh
+#define _write_lock_bh(lock) __write_lock_bh(lock)
+#endif
+
+#ifdef __always_inline__spin_lock_irq
+#define _spin_lock_irq(lock) __spin_lock_irq(lock)
+#endif
+
+#ifdef __always_inline__read_lock_irq
+#define _read_lock_irq(lock) __read_lock_irq(lock)
+#endif
+
+#ifdef __always_inline__write_lock_irq
+#define _write_lock_irq(lock) __write_lock_irq(lock)
+#endif
+
+#ifdef __always_inline__spin_lock_irqsave
+#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
+#endif
+
+#ifdef __always_inline__read_lock_irqsave
+#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
+#endif
+
+#ifdef __always_inline__write_lock_irqsave
+#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#endif
+
+#endif /* !CONFIG_GENERIC_LOCKBREAK */
+
+#ifdef __always_inline__spin_trylock
+#define _spin_trylock(lock) __spin_trylock(lock)
+#endif
+
+#ifdef __always_inline__read_trylock
+#define _read_trylock(lock) __read_trylock(lock)
+#endif
+
+#ifdef __always_inline__write_trylock
+#define _write_trylock(lock) __write_trylock(lock)
+#endif
+
+#ifdef __always_inline__spin_trylock_bh
+#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock
+#define _spin_unlock(lock) __spin_unlock(lock)
+#endif
+
+#ifdef __always_inline__read_unlock
+#define _read_unlock(lock) __read_unlock(lock)
+#endif
+
+#ifdef __always_inline__write_unlock
+#define _write_unlock(lock) __write_unlock(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock_bh
+#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
+#endif
+
+#ifdef __always_inline__read_unlock_bh
+#define _read_unlock_bh(lock) __read_unlock_bh(lock)
+#endif
+
+#ifdef __always_inline__write_unlock_bh
+#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock_irq
+#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
+#endif
+
+#ifdef __always_inline__read_unlock_irq
+#define _read_unlock_irq(lock) __read_unlock_irq(lock)
+#endif
+
+#ifdef __always_inline__write_unlock_irq
+#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#endif
+
+#ifdef __always_inline__spin_unlock_irqrestore
+#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __always_inline__read_unlock_irqrestore
+#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef __always_inline__write_unlock_irqrestore
+#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#endif
+
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+static inline int __spin_trylock(spinlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ /*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * _raw_spin_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#ifdef CONFIG_LOCKDEP
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+#else
+ _raw_spin_lock_flags(lock, &flags);
+#endif
+ return flags;
+}
+
+static inline void __spin_lock_irq(spinlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+static inline void __spin_lock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
+ _raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline void __read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
+}
+
+static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
+ _raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+static inline void __spin_lock(spinlock_t *lock)
+{
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+}
+
+static inline void __write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __spin_unlock(spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+ unsigned long flags)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __spin_unlock_irq(spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __spin_unlock_bh(spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_spin_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ _raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline int __spin_trylock_bh(spinlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ if (_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+ return 0;
+}
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 2d8b211b932..6f52b4d7c44 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -59,6 +59,15 @@ struct cache_head {
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
+struct cache_detail_procfs {
+ struct proc_dir_entry *proc_ent;
+ struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
+};
+
+struct cache_detail_pipefs {
+ struct dentry *dir;
+};
+
struct cache_detail {
struct module * owner;
int hash_size;
@@ -70,15 +79,17 @@ struct cache_detail {
char *name;
void (*cache_put)(struct kref *);
- void (*cache_request)(struct cache_detail *cd,
- struct cache_head *h,
- char **bpp, int *blen);
+ int (*cache_upcall)(struct cache_detail *,
+ struct cache_head *);
+
int (*cache_parse)(struct cache_detail *,
char *buf, int len);
int (*cache_show)(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h);
+ void (*warn_no_listener)(struct cache_detail *cd,
+ int has_died);
struct cache_head * (*alloc)(void);
int (*match)(struct cache_head *orig, struct cache_head *new);
@@ -96,13 +107,15 @@ struct cache_detail {
/* fields for communication over channel */
struct list_head queue;
- struct proc_dir_entry *proc_ent;
- struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
atomic_t readers; /* how many time is /chennel open */
time_t last_close; /* if no readers, when did last close */
time_t last_warn; /* when we last warned about no readers */
- void (*warn_no_listener)(struct cache_detail *cd);
+
+ union {
+ struct cache_detail_procfs procfs;
+ struct cache_detail_pipefs pipefs;
+ } u;
};
@@ -127,6 +140,10 @@ struct cache_deferred_req {
};
+extern const struct file_operations cache_file_operations_pipefs;
+extern const struct file_operations content_file_operations_pipefs;
+extern const struct file_operations cache_flush_operations_pipefs;
+
extern struct cache_head *
sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash);
@@ -134,6 +151,13 @@ extern struct cache_head *
sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *new, struct cache_head *old, int hash);
+extern int
+sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
+ void (*cache_request)(struct cache_detail *,
+ struct cache_head *,
+ char **,
+ int *));
+
extern void cache_clean_deferred(void *owner);
@@ -171,6 +195,10 @@ extern void cache_purge(struct cache_detail *detail);
extern int cache_register(struct cache_detail *cd);
extern void cache_unregister(struct cache_detail *cd);
+extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
+ mode_t, struct cache_detail *);
+extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
+
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 37881f1a0bd..ab3f6e90caa 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -9,6 +9,10 @@
#ifndef _LINUX_SUNRPC_CLNT_H
#define _LINUX_SUNRPC_CLNT_H
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprt.h>
@@ -17,6 +21,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/timer.h>
#include <asm/signal.h>
+#include <linux/path.h>
struct rpc_inode;
@@ -50,9 +55,7 @@ struct rpc_clnt {
int cl_nodelen; /* nodename length */
char cl_nodename[UNX_MAXNODENAME];
- char cl_pathname[30];/* Path in rpc_pipe_fs */
- struct vfsmount * cl_vfsmnt;
- struct dentry * cl_dentry; /* inode */
+ struct path cl_path;
struct rpc_clnt * cl_parent; /* Points to parent of clones */
struct rpc_rtt cl_rtt_default;
struct rpc_timeout cl_timeout_default;
@@ -151,5 +154,39 @@ void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
+size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
+size_t rpc_pton(const char *, const size_t,
+ struct sockaddr *, const size_t);
+char * rpc_sockaddr2uaddr(const struct sockaddr *);
+size_t rpc_uaddr2sockaddr(const char *, const size_t,
+ struct sockaddr *, const size_t);
+
+static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ return ntohs(((struct sockaddr_in *)sap)->sin_port);
+ case AF_INET6:
+ return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+ }
+ return 0;
+}
+
+static inline void rpc_set_port(struct sockaddr *sap,
+ const unsigned short port)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)sap)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
+ break;
+ }
+}
+
+#define IPV6_SCOPE_DELIMITER '%'
+#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 70df4f1d884..77e62488339 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -189,7 +189,22 @@ typedef __be32 rpc_fraghdr;
* Additionally, the two alternative forms specified in Section 2.2 of
* [RFC2373] are also acceptable.
*/
-#define RPCBIND_MAXUADDRLEN (56u)
+
+#include <linux/inet.h>
+
+/* Maximum size of the port number part of a universal address */
+#define RPCBIND_MAXUADDRPLEN sizeof(".255.255")
+
+/* Maximum size of an IPv4 universal address */
+#define RPCBIND_MAXUADDR4LEN \
+ (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
+
+/* Maximum size of an IPv6 universal address */
+#define RPCBIND_MAXUADDR6LEN \
+ (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
+
+/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */
+#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index cea764c2359..cf14db975da 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -3,6 +3,8 @@
#ifdef __KERNEL__
+#include <linux/workqueue.h>
+
struct rpc_pipe_msg {
struct list_head list;
void *data;
@@ -32,8 +34,8 @@ struct rpc_inode {
wait_queue_head_t waitq;
#define RPC_PIPE_WAIT_FOR_OPEN 1
int flags;
- struct rpc_pipe_ops *ops;
struct delayed_work queue_timeout;
+ const struct rpc_pipe_ops *ops;
};
static inline struct rpc_inode *
@@ -44,9 +46,19 @@ RPC_I(struct inode *inode)
extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
-extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *);
-extern int rpc_rmdir(struct dentry *);
-extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, struct rpc_pipe_ops *, int flags);
+struct rpc_clnt;
+extern struct dentry *rpc_create_client_dir(struct dentry *, struct qstr *, struct rpc_clnt *);
+extern int rpc_remove_client_dir(struct dentry *);
+
+struct cache_detail;
+extern struct dentry *rpc_create_cache_dir(struct dentry *,
+ struct qstr *,
+ mode_t umode,
+ struct cache_detail *);
+extern void rpc_remove_cache_dir(struct dentry *);
+
+extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *,
+ const struct rpc_pipe_ops *, int flags);
extern int rpc_unlink(struct dentry *);
extern struct vfsmount *rpc_get_mount(void);
extern void rpc_put_mount(void);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index b99c625fddf..7da466ba4b0 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -117,17 +117,15 @@ static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int le
static inline __be32 *
xdr_encode_hyper(__be32 *p, __u64 val)
{
- *p++ = htonl(val >> 32);
- *p++ = htonl(val & 0xFFFFFFFF);
- return p;
+ *(__be64 *)p = cpu_to_be64(val);
+ return p + 2;
}
static inline __be32 *
xdr_decode_hyper(__be32 *p, __u64 *valp)
{
- *valp = ((__u64) ntohl(*p++)) << 32;
- *valp |= ntohl(*p++);
- return p;
+ *valp = be64_to_cpup((__be64 *)p);
+ return p + 2;
}
/*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 1175d58efc2..c090df44257 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -38,10 +38,8 @@ enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
RPC_DISPLAY_PORT,
RPC_DISPLAY_PROTO,
- RPC_DISPLAY_ALL,
RPC_DISPLAY_HEX_ADDR,
RPC_DISPLAY_HEX_PORT,
- RPC_DISPLAY_UNIVERSAL_ADDR,
RPC_DISPLAY_NETID,
RPC_DISPLAY_MAX,
};
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cb1a6631b8f..73b1f1cec42 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -14,7 +14,6 @@ struct scatterlist;
*/
#define IO_TLB_SEGSIZE 128
-
/*
* log of the size of each IO TLB slab. The number of slabs is command line
* controllable.
@@ -24,16 +23,6 @@ struct scatterlist;
extern void
swiotlb_init(void);
-extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
-extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
-
-extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
- phys_addr_t address);
-extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
- dma_addr_t address);
-
-extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
-
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 80de7003d8c..a8e37821cc6 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -64,6 +64,7 @@ struct perf_counter_attr;
#include <linux/sem.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
+#include <linux/unistd.h>
#include <linux/quota.h>
#include <linux/key.h>
#include <trace/syscall.h>
@@ -97,6 +98,53 @@ struct perf_counter_attr;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
+#ifdef CONFIG_EVENT_PROFILE
+#define TRACE_SYS_ENTER_PROFILE(sname) \
+static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \
+{ \
+ int ret = 0; \
+ if (!atomic_inc_return(&event_enter_##sname.profile_count)) \
+ ret = reg_prof_syscall_enter("sys"#sname); \
+ return ret; \
+} \
+ \
+static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\
+{ \
+ if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \
+ unreg_prof_syscall_enter("sys"#sname); \
+}
+
+#define TRACE_SYS_EXIT_PROFILE(sname) \
+static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \
+{ \
+ int ret = 0; \
+ if (!atomic_inc_return(&event_exit_##sname.profile_count)) \
+ ret = reg_prof_syscall_exit("sys"#sname); \
+ return ret; \
+} \
+ \
+static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \
+{ \
+ if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \
+ unreg_prof_syscall_exit("sys"#sname); \
+}
+
+#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
+ .profile_count = ATOMIC_INIT(-1), \
+ .profile_enable = prof_sysenter_enable_##sname, \
+ .profile_disable = prof_sysenter_disable_##sname,
+
+#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
+ .profile_count = ATOMIC_INIT(-1), \
+ .profile_enable = prof_sysexit_enable_##sname, \
+ .profile_disable = prof_sysexit_disable_##sname,
+#else
+#define TRACE_SYS_ENTER_PROFILE(sname)
+#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
+#define TRACE_SYS_EXIT_PROFILE(sname)
+#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
+#endif
+
#ifdef CONFIG_FTRACE_SYSCALLS
#define __SC_STR_ADECL1(t, a) #a
#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -112,7 +160,81 @@ struct perf_counter_attr;
#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
+#define SYSCALL_TRACE_ENTER_EVENT(sname) \
+ static struct ftrace_event_call event_enter_##sname; \
+ struct trace_event enter_syscall_print_##sname = { \
+ .trace = print_syscall_enter, \
+ }; \
+ static int init_enter_##sname(void) \
+ { \
+ int num, id; \
+ num = syscall_name_to_nr("sys"#sname); \
+ if (num < 0) \
+ return -ENOSYS; \
+ id = register_ftrace_event(&enter_syscall_print_##sname);\
+ if (!id) \
+ return -ENODEV; \
+ event_enter_##sname.id = id; \
+ set_syscall_enter_id(num, id); \
+ INIT_LIST_HEAD(&event_enter_##sname.fields); \
+ return 0; \
+ } \
+ TRACE_SYS_ENTER_PROFILE(sname); \
+ static struct ftrace_event_call __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_events"))) \
+ event_enter_##sname = { \
+ .name = "sys_enter"#sname, \
+ .system = "syscalls", \
+ .event = &event_syscall_enter, \
+ .raw_init = init_enter_##sname, \
+ .show_format = syscall_enter_format, \
+ .define_fields = syscall_enter_define_fields, \
+ .regfunc = reg_event_syscall_enter, \
+ .unregfunc = unreg_event_syscall_enter, \
+ .data = "sys"#sname, \
+ TRACE_SYS_ENTER_PROFILE_INIT(sname) \
+ }
+
+#define SYSCALL_TRACE_EXIT_EVENT(sname) \
+ static struct ftrace_event_call event_exit_##sname; \
+ struct trace_event exit_syscall_print_##sname = { \
+ .trace = print_syscall_exit, \
+ }; \
+ static int init_exit_##sname(void) \
+ { \
+ int num, id; \
+ num = syscall_name_to_nr("sys"#sname); \
+ if (num < 0) \
+ return -ENOSYS; \
+ id = register_ftrace_event(&exit_syscall_print_##sname);\
+ if (!id) \
+ return -ENODEV; \
+ event_exit_##sname.id = id; \
+ set_syscall_exit_id(num, id); \
+ INIT_LIST_HEAD(&event_exit_##sname.fields); \
+ return 0; \
+ } \
+ TRACE_SYS_EXIT_PROFILE(sname); \
+ static struct ftrace_event_call __used \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_events"))) \
+ event_exit_##sname = { \
+ .name = "sys_exit"#sname, \
+ .system = "syscalls", \
+ .event = &event_syscall_exit, \
+ .raw_init = init_exit_##sname, \
+ .show_format = syscall_exit_format, \
+ .define_fields = syscall_exit_define_fields, \
+ .regfunc = reg_event_syscall_exit, \
+ .unregfunc = unreg_event_syscall_exit, \
+ .data = "sys"#sname, \
+ TRACE_SYS_EXIT_PROFILE_INIT(sname) \
+ }
+
#define SYSCALL_METADATA(sname, nb) \
+ SYSCALL_TRACE_ENTER_EVENT(sname); \
+ SYSCALL_TRACE_EXIT_EVENT(sname); \
static const struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
@@ -121,18 +243,23 @@ struct perf_counter_attr;
.nb_args = nb, \
.types = types_##sname, \
.args = args_##sname, \
- }
+ .enter_event = &event_enter_##sname, \
+ .exit_event = &event_exit_##sname, \
+ };
#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_TRACE_ENTER_EVENT(_##sname); \
+ SYSCALL_TRACE_EXIT_EVENT(_##sname); \
static const struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
.name = "sys_"#sname, \
.nb_args = 0, \
+ .enter_event = &event_enter__##sname, \
+ .exit_event = &event_exit__##sname, \
}; \
asmlinkage long sys_##sname(void)
-
#else
#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7402c1a27c4..85e8cf7d393 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -85,20 +85,29 @@ int arch_update_cpu_topology(void);
#define ARCH_HAS_SCHED_WAKE_IDLE
/* Common values for SMT siblings */
#ifndef SD_SIBLING_INIT
-#define SD_SIBLING_INIT (struct sched_domain) { \
- .min_interval = 1, \
- .max_interval = 2, \
- .busy_factor = 64, \
- .imbalance_pct = 110, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_BALANCE_FORK \
- | SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE \
- | SD_WAKE_BALANCE \
- | SD_SHARE_CPUPOWER, \
- .last_balance = jiffies, \
- .balance_interval = 1, \
+#define SD_SIBLING_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 2, \
+ .busy_factor = 64, \
+ .imbalance_pct = 110, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_WAKE_IDLE \
+ | 1*SD_WAKE_AFFINE \
+ | 1*SD_WAKE_BALANCE \
+ | 1*SD_SHARE_CPUPOWER \
+ | 0*SD_POWERSAVINGS_BALANCE \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 0*SD_WAKE_IDLE_FAR \
+ | 0*SD_PREFER_SIBLING \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .smt_gain = 1178, /* 15% */ \
}
#endif
#endif /* CONFIG_SCHED_SMT */
@@ -106,69 +115,94 @@ int arch_update_cpu_topology(void);
#ifdef CONFIG_SCHED_MC
/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
#ifndef SD_MC_INIT
-#define SD_MC_INIT (struct sched_domain) { \
- .min_interval = 1, \
- .max_interval = 4, \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 1, \
- .busy_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_FORK \
- | SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE \
- | SD_WAKE_BALANCE \
- | SD_SHARE_PKG_RESOURCES\
- | sd_balance_for_mc_power()\
- | sd_power_saving_flags(),\
- .last_balance = jiffies, \
- .balance_interval = 1, \
+#define SD_MC_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 1*SD_WAKE_IDLE \
+ | 1*SD_WAKE_AFFINE \
+ | 1*SD_WAKE_BALANCE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 1*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 0*SD_WAKE_IDLE_FAR \
+ | sd_balance_for_mc_power() \
+ | sd_power_saving_flags() \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
}
#endif
#endif /* CONFIG_SCHED_MC */
/* Common values for CPUs */
#ifndef SD_CPU_INIT
-#define SD_CPU_INIT (struct sched_domain) { \
- .min_interval = 1, \
- .max_interval = 4, \
- .busy_factor = 64, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 1, \
- .busy_idx = 2, \
- .idle_idx = 1, \
- .newidle_idx = 2, \
- .wake_idx = 1, \
- .forkexec_idx = 1, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_EXEC \
- | SD_BALANCE_FORK \
- | SD_WAKE_AFFINE \
- | SD_WAKE_BALANCE \
- | sd_balance_for_package_power()\
- | sd_power_saving_flags(),\
- .last_balance = jiffies, \
- .balance_interval = 1, \
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 2, \
+ .wake_idx = 1, \
+ .forkexec_idx = 1, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 1*SD_WAKE_IDLE \
+ | 0*SD_WAKE_AFFINE \
+ | 1*SD_WAKE_BALANCE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 0*SD_WAKE_IDLE_FAR \
+ | sd_balance_for_package_power() \
+ | sd_power_saving_flags() \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
}
#endif
/* sched_domains SD_ALLNODES_INIT for NUMA machines */
-#define SD_ALLNODES_INIT (struct sched_domain) { \
- .min_interval = 64, \
- .max_interval = 64*num_online_cpus(), \
- .busy_factor = 128, \
- .imbalance_pct = 133, \
- .cache_nice_tries = 1, \
- .busy_idx = 3, \
- .idle_idx = 3, \
- .flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_WAKE_AFFINE \
- | SD_SERIALIZE, \
- .last_balance = jiffies, \
- .balance_interval = 64, \
+#define SD_ALLNODES_INIT (struct sched_domain) { \
+ .min_interval = 64, \
+ .max_interval = 64*num_online_cpus(), \
+ .busy_factor = 128, \
+ .imbalance_pct = 133, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 3, \
+ .idle_idx = 3, \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 0*SD_BALANCE_EXEC \
+ | 0*SD_BALANCE_FORK \
+ | 0*SD_WAKE_IDLE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_WAKE_BALANCE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_POWERSAVINGS_BALANCE \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 1*SD_SERIALIZE \
+ | 1*SD_WAKE_IDLE_FAR \
+ | 0*SD_PREFER_SIBLING \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 64, \
}
#ifdef CONFIG_NUMA
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b9dc4ca0246..63a3f7a8058 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -23,6 +23,8 @@ struct tracepoint;
struct tracepoint {
const char *name; /* Tracepoint name */
int state; /* State. */
+ void (*regfunc)(void);
+ void (*unregfunc)(void);
void **funcs;
} __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is
@@ -78,12 +80,16 @@ struct tracepoint {
return tracepoint_probe_unregister(#name, (void *)probe);\
}
-#define DEFINE_TRACE(name) \
+
+#define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \
__attribute__((section("__tracepoints"), aligned(32))) = \
- { __tpstrtab_##name, 0, NULL }
+ { __tpstrtab_##name, 0, reg, unreg, NULL }
+
+#define DEFINE_TRACE(name) \
+ DEFINE_TRACE_FN(name, NULL, NULL);
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
EXPORT_SYMBOL_GPL(__tracepoint_##name)
@@ -108,6 +114,7 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
return -ENOSYS; \
}
+#define DEFINE_TRACE_FN(name, reg, unreg)
#define DEFINE_TRACE(name)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name)
@@ -158,6 +165,15 @@ static inline void tracepoint_synchronize_unregister(void)
#define PARAMS(args...) args
+#endif /* _LINUX_TRACEPOINT_H */
+
+/*
+ * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
+ * This is due to the way trace events work. If a file includes two
+ * trace event headers under one "CREATE_TRACE_POINTS" the first include
+ * will override the TRACE_EVENT and break the second include.
+ */
+
#ifndef TRACE_EVENT
/*
* For use with the TRACE_EVENT macro:
@@ -259,10 +275,15 @@ static inline void tracepoint_synchronize_unregister(void)
* can also by used by generic instrumentation like SystemTap), and
* it is also used to expose a structured trace record in
* /sys/kernel/debug/tracing/events/.
+ *
+ * A set of (un)registration functions can be passed to the variant
+ * TRACE_EVENT_FN to perform any (un)registration work.
*/
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-#endif
+#define TRACE_EVENT_FN(name, proto, args, struct, \
+ assign, print, reg, unreg) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-#endif
+#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e8c6c9136c9..0d3974f59c5 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,7 +23,7 @@
*/
#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
-#define NR_LDISCS 19
+#define NR_LDISCS 20
/* line disciplines */
#define N_TTY 0
@@ -47,6 +47,8 @@
#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */
#define N_PPS 18 /* Pulse per Second */
+#define N_V253 19 /* Codec control over voice modem */
+
/*
* This character is the same as _POSIX_VDISABLE: it cannot be used as
* a c_cc[] character, but indicates that a particular special character
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 13e1adf55c4..6273fa97b52 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -240,6 +240,21 @@ static inline int cancel_delayed_work(struct delayed_work *work)
return ret;
}
+/*
+ * Like above, but uses del_timer() instead of del_timer_sync(). This means,
+ * if it returns 0 the timer function may be running and the queueing is in
+ * progress.
+ */
+static inline int __cancel_delayed_work(struct delayed_work *work)
+{
+ int ret;
+
+ ret = del_timer(&work->timer);
+ if (ret)
+ work_clear_pending(&work->work);
+ return ret;
+}
+
extern int cancel_delayed_work_sync(struct delayed_work *work);
/* Obsolete. use cancel_delayed_work_sync() */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3224820c851..78b1e4684cc 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -14,17 +14,6 @@ extern struct list_head inode_in_use;
extern struct list_head inode_unused;
/*
- * Yes, writeback.h requires sched.h
- * No, sched.h is not included from here.
- */
-static inline int task_is_pdflush(struct task_struct *task)
-{
- return task->flags & PF_FLUSHER;
-}
-
-#define current_is_pdflush() task_is_pdflush(current)
-
-/*
* fs/fs-writeback.c
*/
enum writeback_sync_modes {
@@ -40,6 +29,8 @@ enum writeback_sync_modes {
struct writeback_control {
struct backing_dev_info *bdi; /* If !NULL, only write back this
queue */
+ struct super_block *sb; /* if !NULL, only write inodes from
+ this super_block */
enum writeback_sync_modes sync_mode;
unsigned long *older_than_this; /* If !NULL, only write back inodes
older than this */
@@ -76,9 +67,13 @@ struct writeback_control {
/*
* fs/fs-writeback.c
*/
-void writeback_inodes(struct writeback_control *wbc);
+struct bdi_writeback;
int inode_wait(void *);
-void sync_inodes_sb(struct super_block *, int wait);
+long writeback_inodes_sb(struct super_block *);
+long sync_inodes_sb(struct super_block *);
+void writeback_inodes_wbc(struct writeback_control *wbc);
+long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
+void wakeup_flusher_threads(long nr_pages);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
@@ -98,7 +93,6 @@ static inline void inode_sync_wait(struct inode *inode)
/*
* mm/page-writeback.c
*/
-int wakeup_pdflush(long nr_pages);
void laptop_io_completion(void);
void laptop_sync_completion(void);
void throttle_vm_writeout(gfp_t gfp_mask);
@@ -150,7 +144,6 @@ balance_dirty_pages_ratelimited(struct address_space *mapping)
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
void *data);
-int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
int generic_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int write_cache_pages(struct address_space *mapping,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d131e352cfe..5c84af8c5f6 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -49,6 +49,7 @@ struct xattr_handler {
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
int vfs_removexattr(struct dentry *, const char *);