summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bitops/atomic.h10
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/mman-common.h5
-rw-r--r--include/asm-generic/percpu.h5
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/cs5535.h172
-rw-r--r--include/linux/ctype.h3
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/dm-dirty-log.h6
-rw-r--r--include/linux/dm-ioctl.h13
-rw-r--r--include/linux/dm-region-hash.h3
-rw-r--r--include/linux/dynamic_debug.h13
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/hrtimer.h2
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/hw_breakpoint.h2
-rw-r--r--include/linux/i2c.h92
-rw-r--r--include/linux/i2c/tps65010.h19
-rw-r--r--include/linux/i2c/twl.h (renamed from include/linux/i2c/twl4030.h)209
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/kallsyms.h12
-rw-r--r--include/linux/kernel.h55
-rw-r--r--include/linux/ksm.h88
-rw-r--r--include/linux/lis3lv02d.h15
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mfd/88pm8607.h217
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/adp5520.h299
-rw-r--r--include/linux/mfd/ezx-pcap.h3
-rw-r--r--include/linux/mfd/mc13783-private.h208
-rw-r--r--include/linux/mfd/mc13783.h120
-rw-r--r--include/linux/mfd/pcf50633/core.h17
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/wm831x/core.h43
-rw-r--r--include/linux/mfd/wm831x/pdata.h1
-rw-r--r--include/linux/mfd/wm8350/core.h14
-rw-r--r--include/linux/mfd/wm8350/gpio.h18
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h13
-rw-r--r--include/linux/node.h16
-rw-r--r--include/linux/nodemask.h33
-rw-r--r--include/linux/numa.h2
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/raid/pq.h19
-rw-r--r--include/linux/rmap.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/rwsem-spinlock.h6
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/spi/mpc52xx_spi.h10
-rw-r--r--include/linux/spi/sh_msiof.h10
-rw-r--r--include/linux/spi/xilinx_spi.h20
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--include/linux/string.h10
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/swap.h67
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/vmstat.h12
-rw-r--r--include/linux/vt.h15
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/netfilter/nf_conntrack.h4
-rw-r--r--include/net/snmp.h50
-rw-r--r--include/pcmcia/cs.h4
-rw-r--r--include/pcmcia/ds.h6
-rw-r--r--include/pcmcia/mem_op.h2
-rw-r--r--include/pcmcia/ss.h12
87 files changed, 3141 insertions, 1163 deletions
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index c8946465e63..ecc44a8e2b4 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -15,19 +15,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 4b6755984d2..18c435d7c08 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -113,22 +113,22 @@ extern void warn_slowpath_null(const char *file, const int line);
#endif
#define WARN_ON_ONCE(condition) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN_ON(!__warned)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
#define WARN_ONCE(condition, format...) ({ \
- static int __warned; \
+ static bool __warned; \
int __ret_warn_once = !!(condition); \
\
if (unlikely(__ret_warn_once)) \
if (WARN(!__warned, format)) \
- __warned = 1; \
+ __warned = true; \
unlikely(__ret_warn_once); \
})
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 5ee13b2fd22..20111265afd 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -19,6 +19,11 @@
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
+#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
+# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
+#else
+# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
+#endif
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 90079c373f1..8087b90d467 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
@@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
#define __get_cpu_var(var) per_cpu_var(var)
#define __raw_get_cpu_var(var) per_cpu_var(var)
+#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
+#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
#endif /* SMP */
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 57b1846a3c8..3e09b345f4d 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,8 +3,6 @@
#define ATMEL_MCI_MAX_NR_SLOTS 2
-#include <linux/dw_dmac.h>
-
/**
* struct mci_slot_pdata - board-specific per-slot configuration
* @bus_width: Number of data lines wired up the slot
@@ -34,7 +32,7 @@ struct mci_slot_pdata {
* @slot: Per-slot configuration data.
*/
struct mci_platform_data {
- struct dw_dma_slave dma_slave;
+ struct mci_dma_data *dma_slave;
struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
};
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 00000000000..d5a1d4810b8
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,172 @@
+/*
+ * AMD CS5535/CS5536 definitions
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _CS5535_H
+#define _CS5535_H
+
+/* MSRs */
+#define MSR_GLIU_P2D_RO0 0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG 0x48002001
+#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
+ * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLCP_DOTPLL 0x4C000015
+
+#define MSR_LBAR_SMB 0x5140000B
+#define MSR_LBAR_GPIO 0x5140000C
+#define MSR_LBAR_MFGPT 0x5140000D
+#define MSR_LBAR_ACPI 0x5140000E
+#define MSR_LBAR_PMS 0x5140000F
+
+#define MSR_DIVIL_SOFT_RESET 0x51400017
+
+#define MSR_PIC_YSEL_LOW 0x51400020
+#define MSR_PIC_YSEL_HIGH 0x51400021
+#define MSR_PIC_ZSEL_LOW 0x51400022
+#define MSR_PIC_ZSEL_HIGH 0x51400023
+#define MSR_PIC_IRQM_LPC 0x51400025
+
+#define MSR_MFGPT_IRQ 0x51400028
+#define MSR_MFGPT_NR 0x51400029
+#define MSR_MFGPT_SETUP 0x5140002B
+
+#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
+#define MSR_GX_MSR_PADSEL 0xC0002011
+
+/* resource sizes */
+#define LBAR_GPIO_SIZE 0xFF
+#define LBAR_MFGPT_SIZE 0x40
+#define LBAR_ACPI_SIZE 0x40
+#define LBAR_PMS_SIZE 0x80
+
+/* VSA2 magic values */
+#define VSA_VRC_INDEX 0xAC1C
+#define VSA_VRC_DATA 0xAC1E
+#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
+#define VSA_VR_SIGNATURE 0x0003
+#define VSA_VR_MEM_SIZE 0x0200
+#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
+#define GSW_VSA_SIG 0x534d /* General Software signature */
+
+#include <linux/io.h>
+
+static inline int cs5535_has_vsa2(void)
+{
+ static int has_vsa2 = -1;
+
+ if (has_vsa2 == -1) {
+ uint16_t val;
+
+ /*
+ * The VSA has virtual registers that we can query for a
+ * signature.
+ */
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+ val = inw(VSA_VRC_DATA);
+ has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
+ }
+
+ return has_vsa2;
+}
+
+/* GPIOs */
+#define GPIO_OUTPUT_VAL 0x00
+#define GPIO_OUTPUT_ENABLE 0x04
+#define GPIO_OUTPUT_OPEN_DRAIN 0x08
+#define GPIO_OUTPUT_INVERT 0x0C
+#define GPIO_OUTPUT_AUX1 0x10
+#define GPIO_OUTPUT_AUX2 0x14
+#define GPIO_PULL_UP 0x18
+#define GPIO_PULL_DOWN 0x1C
+#define GPIO_INPUT_ENABLE 0x20
+#define GPIO_INPUT_INVERT 0x24
+#define GPIO_INPUT_FILTER 0x28
+#define GPIO_INPUT_EVENT_COUNT 0x2C
+#define GPIO_READ_BACK 0x30
+#define GPIO_INPUT_AUX1 0x34
+#define GPIO_EVENTS_ENABLE 0x38
+#define GPIO_LOCK_ENABLE 0x3C
+#define GPIO_POSITIVE_EDGE_EN 0x40
+#define GPIO_NEGATIVE_EDGE_EN 0x44
+#define GPIO_POSITIVE_EDGE_STS 0x48
+#define GPIO_NEGATIVE_EDGE_STS 0x4C
+
+#define GPIO_MAP_X 0xE0
+#define GPIO_MAP_Y 0xE4
+#define GPIO_MAP_Z 0xE8
+#define GPIO_MAP_W 0xEC
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg);
+void cs5535_gpio_clear(unsigned offset, unsigned int reg);
+int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+
+/* MFGPTs */
+
+#define MFGPT_MAX_TIMERS 8
+#define MFGPT_TIMER_ANY (-1)
+
+#define MFGPT_DOMAIN_WORKING 1
+#define MFGPT_DOMAIN_STANDBY 2
+#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
+
+#define MFGPT_CMP1 0
+#define MFGPT_CMP2 1
+
+#define MFGPT_EVENT_IRQ 0
+#define MFGPT_EVENT_NMI 1
+#define MFGPT_EVENT_RESET 3
+
+#define MFGPT_REG_CMP1 0
+#define MFGPT_REG_CMP2 2
+#define MFGPT_REG_COUNTER 4
+#define MFGPT_REG_SETUP 6
+
+#define MFGPT_SETUP_CNTEN (1 << 15)
+#define MFGPT_SETUP_CMP2 (1 << 14)
+#define MFGPT_SETUP_CMP1 (1 << 13)
+#define MFGPT_SETUP_SETUP (1 << 12)
+#define MFGPT_SETUP_STOPEN (1 << 11)
+#define MFGPT_SETUP_EXTEN (1 << 10)
+#define MFGPT_SETUP_REVEN (1 << 5)
+#define MFGPT_SETUP_CLKSEL (1 << 4)
+
+struct cs5535_mfgpt_timer;
+
+extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
+ uint16_t reg);
+extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value);
+
+extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable);
+extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
+ int *irq, int enable);
+extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
+ int domain);
+extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
+
+static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
+}
+
+static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
+}
+
+#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index afa36392297..a3d6ee0044f 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -15,7 +15,7 @@
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
+extern const unsigned char _ctype[];
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
@@ -27,6 +27,7 @@ extern unsigned char _ctype[];
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index df7607e6dce..d4c9c0b88ad 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
-int dm_suspended(struct mapped_device *md);
+int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
-struct dm_table *dm_get_table(struct mapped_device *md);
+struct dm_table *dm_get_live_table(struct mapped_device *md);
void dm_table_get(struct dm_table *t);
void dm_table_put(struct dm_table *t);
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t);
/*
* The device must be suspended before calling this method.
+ * Returns the previous table, which the caller must destroy.
*/
-int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+struct dm_table *dm_swap_table(struct mapped_device *md,
+ struct dm_table *t);
/*
* A wrapper around vmalloc.
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 5e8b11d88f6..7084503c340 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -21,6 +21,7 @@ struct dm_dirty_log_type;
struct dm_dirty_log {
struct dm_dirty_log_type *type;
+ int (*flush_callback_fn)(struct dm_target *ti);
void *context;
};
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
* type->constructor/destructor() directly.
*/
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
- struct dm_target *ti,
- unsigned argc, char **argv);
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 2ab84c83c31..aa95508d2f9 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
@@ -266,9 +266,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 15
+#define DM_VERSION_MINOR 16
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2009-04-01)"
+#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -309,4 +309,11 @@ enum {
*/
#define DM_NOFLUSH_FLAG (1 << 11) /* In */
+/*
+ * If set, any table information returned will relate to the inactive
+ * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
+ * is set before using the data returned.
+ */
+#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index a9e652a4137..9e2a7a401df 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region);
/* Delay bios on regions. */
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
-void dm_rh_mark_nosync(struct dm_region_hash *rh,
- struct bio *bio, unsigned done, int error);
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
/*
* Region recovery control.
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a0d9422a156..f8c2e176750 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \
- ##__VA_ARGS__); \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- dev_printk(KERN_DEBUG, dev, \
- KBUILD_MODNAME ": " fmt, \
- ##__VA_ARGS__); \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod)
return 0;
}
-#define dynamic_pr_debug(fmt, ...) do { } while (0)
-#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, format, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
#endif
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ce4581fbc08..fb737bc19a8 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
static inline char *
efi_guid_unparse(efi_guid_t *guid, char *out)
{
- sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- guid->b[3], guid->b[2], guid->b[1], guid->b[0],
- guid->b[5], guid->b[4], guid->b[7], guid->b[6],
- guid->b[8], guid->b[9], guid->b[10], guid->b[11],
- guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
+ sprintf(out, "%pUl", guid->b);
return out;
}
diff --git a/include/linux/err.h b/include/linux/err.h
index ec87f3142bf..1b12642636c 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+static inline long IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index af634e95871..5d86fb2309d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -169,7 +169,7 @@ struct hrtimer_clock_base {
* @max_hang_time: Maximum time spent in hrtimer_interrupt
*/
struct hrtimer_cpu_base {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 41a59afc70f..78b4bc64c00 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+#endif
+
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 69f07a9f127..41235c93e4e 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -93,7 +93,7 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
struct task_struct *tsk) { return NULL; }
static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr) { return NULL; }
+ struct perf_event_attr *attr) { return -ENOSYS; }
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419ab546b26..02fc617782e 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @driver: Device driver model driver
* @id_table: List of I2C devices supported by this driver
* @detect: Callback for device detection
- * @address_data: The I2C addresses to probe (for detect)
+ * @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
@@ -161,8 +161,8 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
- int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
- const struct i2c_client_address_data *address_data;
+ int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ const unsigned short *address_list;
struct list_head clients;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
-/* i2c_client_address_data is the struct for holding default client
- * addresses for a driver and for the parameters supplied on the
- * command line
- */
-struct i2c_client_address_data {
- const unsigned short *normal_i2c;
-};
-
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
@@ -576,82 +568,4 @@ union i2c_smbus_data {
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
#define I2C_SMBUS_I2C_BLOCK_DATA 8
-
-#ifdef __KERNEL__
-
-/* These defines are used for probing i2c client addresses */
-/* The length of the option lists */
-#define I2C_CLIENT_MAX_OPTS 48
-
-/* Default fill of many variables */
-#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
-
-/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
- module header */
-
-#define I2C_CLIENT_MODULE_PARM(var,desc) \
- static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
- static unsigned int var##_num; \
- module_param_array(var, short, &var##_num, 0); \
- MODULE_PARM_DESC(var, desc)
-
-#define I2C_CLIENT_INSMOD_COMMON \
-static const struct i2c_client_address_data addr_data = { \
- .normal_i2c = normal_i2c, \
-}
-
-/* These are the ones you want to use in your own drivers. Pick the one
- which matches the number of devices the driver differenciates between. */
-#define I2C_CLIENT_INSMOD \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_1(chip1) \
-enum chips { any_chip, chip1 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
-enum chips { any_chip, chip1, chip2 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
-enum chips { any_chip, chip1, chip2, chip3 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
-enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7, chip8 }; \
-I2C_CLIENT_INSMOD_COMMON
-#endif /* __KERNEL__ */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h
index 918c5354d9b..08aa92278d7 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/i2c/tps65010.h
@@ -72,6 +72,21 @@
#define TPS_VDCDC1 0x0c
# define TPS_ENABLE_LP (1 << 3)
#define TPS_VDCDC2 0x0d
+# define TPS_LP_COREOFF (1 << 7)
+# define TPS_VCORE_1_8V (7<<4)
+# define TPS_VCORE_1_5V (6 << 4)
+# define TPS_VCORE_1_4V (5 << 4)
+# define TPS_VCORE_1_3V (4 << 4)
+# define TPS_VCORE_1_2V (3 << 4)
+# define TPS_VCORE_1_1V (2 << 4)
+# define TPS_VCORE_1_0V (1 << 4)
+# define TPS_VCORE_0_85V (0 << 4)
+# define TPS_VCORE_LP_1_2V (3 << 2)
+# define TPS_VCORE_LP_1_1V (2 << 2)
+# define TPS_VCORE_LP_1_0V (1 << 2)
+# define TPS_VCORE_LP_0_85V (0 << 2)
+# define TPS_VIB (1 << 1)
+# define TPS_VCORE_DISCH (1 << 0)
#define TPS_VREGS1 0x0e
# define TPS_LDO2_ENABLE (1 << 7)
# define TPS_LDO2_OFF (1 << 6)
@@ -152,6 +167,10 @@ extern int tps65010_config_vregs1(unsigned value);
*/
extern int tps65013_set_low_pwr(unsigned mode);
+/* tps65010_set_vdcdc2
+ * value to be written to VDCDC2
+ */
+extern int tps65010_config_vdcdc2(unsigned value);
struct i2c_client;
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl.h
index 5306a759cbd..bf1c5be1f5b 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __TWL4030_H_
-#define __TWL4030_H_
+#ifndef __TWL_H_
+#define __TWL_H_
#include <linux/types.h>
#include <linux/input/matrix_keypad.h>
@@ -61,28 +61,112 @@
#define TWL4030_MODULE_PWMA 0x0E
#define TWL4030_MODULE_PWMB 0x0F
+#define TWL5031_MODULE_ACCESSORY 0x10
+#define TWL5031_MODULE_INTERRUPTS 0x11
+
/* Slave 3 (i2c address 0x4b) */
-#define TWL4030_MODULE_BACKUP 0x10
-#define TWL4030_MODULE_INT 0x11
-#define TWL4030_MODULE_PM_MASTER 0x12
-#define TWL4030_MODULE_PM_RECEIVER 0x13
-#define TWL4030_MODULE_RTC 0x14
-#define TWL4030_MODULE_SECURED_REG 0x15
+#define TWL4030_MODULE_BACKUP 0x12
+#define TWL4030_MODULE_INT 0x13
+#define TWL4030_MODULE_PM_MASTER 0x14
+#define TWL4030_MODULE_PM_RECEIVER 0x15
+#define TWL4030_MODULE_RTC 0x16
+#define TWL4030_MODULE_SECURED_REG 0x17
+
+#define TWL_MODULE_USB TWL4030_MODULE_USB
+#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
+#define TWL_MODULE_PIH TWL4030_MODULE_PIH
+#define TWL_MODULE_MADC TWL4030_MODULE_MADC
+#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
+#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
+#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
+#define TWL_MODULE_RTC TWL4030_MODULE_RTC
+
+#define GPIO_INTR_OFFSET 0
+#define KEYPAD_INTR_OFFSET 1
+#define BCI_INTR_OFFSET 2
+#define MADC_INTR_OFFSET 3
+#define USB_INTR_OFFSET 4
+#define BCI_PRES_INTR_OFFSET 9
+#define USB_PRES_INTR_OFFSET 10
+#define RTC_INTR_OFFSET 11
+
+/*
+ * Offset from TWL6030_IRQ_BASE / pdata->irq_base
+ */
+#define PWR_INTR_OFFSET 0
+#define HOTDIE_INTR_OFFSET 12
+#define SMPSLDO_INTR_OFFSET 13
+#define BATDETECT_INTR_OFFSET 14
+#define SIMDETECT_INTR_OFFSET 15
+#define MMCDETECT_INTR_OFFSET 16
+#define GASGAUGE_INTR_OFFSET 17
+#define USBOTG_INTR_OFFSET 4
+#define CHARGER_INTR_OFFSET 2
+#define RSV_INTR_OFFSET 0
+
+/* INT register offsets */
+#define REG_INT_STS_A 0x00
+#define REG_INT_STS_B 0x01
+#define REG_INT_STS_C 0x02
+
+#define REG_INT_MSK_LINE_A 0x03
+#define REG_INT_MSK_LINE_B 0x04
+#define REG_INT_MSK_LINE_C 0x05
+
+#define REG_INT_MSK_STS_A 0x06
+#define REG_INT_MSK_STS_B 0x07
+#define REG_INT_MSK_STS_C 0x08
+
+/* MASK INT REG GROUP A */
+#define TWL6030_PWR_INT_MASK 0x07
+#define TWL6030_RTC_INT_MASK 0x18
+#define TWL6030_HOTDIE_INT_MASK 0x20
+#define TWL6030_SMPSLDOA_INT_MASK 0xC0
+
+/* MASK INT REG GROUP B */
+#define TWL6030_SMPSLDOB_INT_MASK 0x01
+#define TWL6030_BATDETECT_INT_MASK 0x02
+#define TWL6030_SIMDETECT_INT_MASK 0x04
+#define TWL6030_MMCDETECT_INT_MASK 0x08
+#define TWL6030_GPADC_INT_MASK 0x60
+#define TWL6030_GASGAUGE_INT_MASK 0x80
+
+/* MASK INT REG GROUP C */
+#define TWL6030_USBOTG_INT_MASK 0x0F
+#define TWL6030_CHARGER_CTRL_INT_MASK 0x10
+#define TWL6030_CHARGER_FAULT_INT_MASK 0x60
+
+
+#define TWL4030_CLASS_ID 0x4030
+#define TWL6030_CLASS_ID 0x6030
+unsigned int twl_rev(void);
+#define GET_TWL_REV (twl_rev())
+#define TWL_CLASS_IS(class, id) \
+static inline int twl_class_is_ ##class(void) \
+{ \
+ return ((id) == (GET_TWL_REV)) ? 1 : 0; \
+}
+
+TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
+TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
/*
* Read and write single 8-bit registers
*/
-int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
-int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
+int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
/*
* Read and write several 8-bit registers at once.
*
- * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1
+ * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1
* for the value, and populate your data starting at offset 1.
*/
-int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
-int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
/*----------------------------------------------------------------------*/
@@ -221,6 +305,38 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
/*----------------------------------------------------------------------*/
+/*
+ * Accessory Interrupts
+ */
+#define TWL5031_ACIIMR_LSB 0x05
+#define TWL5031_ACIIMR_MSB 0x06
+#define TWL5031_ACIIDR_LSB 0x07
+#define TWL5031_ACIIDR_MSB 0x08
+#define TWL5031_ACCISR1 0x0F
+#define TWL5031_ACCIMR1 0x10
+#define TWL5031_ACCISR2 0x11
+#define TWL5031_ACCIMR2 0x12
+#define TWL5031_ACCSIR 0x13
+#define TWL5031_ACCEDR1 0x14
+#define TWL5031_ACCSIHCTRL 0x15
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery Charger Controller
+ */
+
+#define TWL5031_INTERRUPTS_BCIISR1 0x0
+#define TWL5031_INTERRUPTS_BCIIMR1 0x1
+#define TWL5031_INTERRUPTS_BCIISR2 0x2
+#define TWL5031_INTERRUPTS_BCIIMR2 0x3
+#define TWL5031_INTERRUPTS_BCISIR 0x4
+#define TWL5031_INTERRUPTS_BCIEDR1 0x5
+#define TWL5031_INTERRUPTS_BCIEDR2 0x6
+#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
/* Power bus message definitions */
/* The TWL4030/5030 splits its power-management resources (the various
@@ -250,6 +366,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define RES_TYPE_ALL 0x7
+/* Resource states */
#define RES_STATE_WRST 0xF
#define RES_STATE_ACTIVE 0xE
#define RES_STATE_SLEEP 0x8
@@ -310,8 +427,18 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define MSG_SINGULAR(devgrp, id, state) \
((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
+#define MSG_BROADCAST_ALL(devgrp, state) \
+ ((devgrp) << 5 | (state))
+
+#define MSG_BROADCAST_REF MSG_BROADCAST_ALL
+#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL
+#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL
/*----------------------------------------------------------------------*/
+struct twl4030_clock_init_data {
+ bool ck32k_lowpwr_enable;
+};
+
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;
@@ -391,12 +518,15 @@ struct twl4030_resconfig {
u8 devgroup; /* Processor group that Power resource belongs to */
u8 type; /* Power resource addressed, 6 / broadcast message */
u8 type2; /* Power resource addressed, 3 / broadcast message */
+ u8 remap_off; /* off state remapping */
+ u8 remap_sleep; /* sleep state remapping */
};
struct twl4030_power_data {
struct twl4030_script **scripts;
unsigned num;
struct twl4030_resconfig *resource_config;
+#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
};
extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
@@ -421,6 +551,7 @@ struct twl4030_codec_data {
struct twl4030_platform_data {
unsigned irq_base, irq_end;
+ struct twl4030_clock_init_data *clock;
struct twl4030_bci_platform_data *bci;
struct twl4030_gpio_platform_data *gpio;
struct twl4030_madc_platform_data *madc;
@@ -429,19 +560,31 @@ struct twl4030_platform_data {
struct twl4030_power_data *power;
struct twl4030_codec_data *codec;
- /* LDO regulators */
+ /* Common LDO regulators for TWL4030/TWL6030 */
struct regulator_init_data *vdac;
+ struct regulator_init_data *vaux1;
+ struct regulator_init_data *vaux2;
+ struct regulator_init_data *vaux3;
+ /* TWL4030 LDO regulators */
struct regulator_init_data *vpll1;
struct regulator_init_data *vpll2;
struct regulator_init_data *vmmc1;
struct regulator_init_data *vmmc2;
struct regulator_init_data *vsim;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
struct regulator_init_data *vaux4;
-
- /* REVISIT more to come ... _nothing_ should be hard-wired */
+ struct regulator_init_data *vio;
+ struct regulator_init_data *vdd1;
+ struct regulator_init_data *vdd2;
+ struct regulator_init_data *vintana1;
+ struct regulator_init_data *vintana2;
+ struct regulator_init_data *vintdig;
+ /* TWL6030 LDO regulators */
+ struct regulator_init_data *vmmc;
+ struct regulator_init_data *vpp;
+ struct regulator_init_data *vusim;
+ struct regulator_init_data *vana;
+ struct regulator_init_data *vcxio;
+ struct regulator_init_data *vusb;
};
/*----------------------------------------------------------------------*/
@@ -473,6 +616,7 @@ int twl4030_sih_setup(int module);
* VIO is generally fixed.
*/
+/* TWL4030 SMPS/LDO's */
/* EXTERNAL dc-to-dc buck converters */
#define TWL4030_REG_VDD1 0
#define TWL4030_REG_VDD2 1
@@ -499,4 +643,31 @@ int twl4030_sih_setup(int module);
#define TWL4030_REG_VUSB1V8 18
#define TWL4030_REG_VUSB3V1 19
+/* TWL6030 SMPS/LDO's */
+/* EXTERNAL dc-to-dc buck convertor contollable via SR */
+#define TWL6030_REG_VDD1 30
+#define TWL6030_REG_VDD2 31
+#define TWL6030_REG_VDD3 32
+
+/* Non SR compliant dc-to-dc buck convertors */
+#define TWL6030_REG_VMEM 33
+#define TWL6030_REG_V2V1 34
+#define TWL6030_REG_V1V29 35
+#define TWL6030_REG_V1V8 36
+
+/* EXTERNAL LDOs */
+#define TWL6030_REG_VAUX1_6030 37
+#define TWL6030_REG_VAUX2_6030 38
+#define TWL6030_REG_VAUX3_6030 39
+#define TWL6030_REG_VMMC 40
+#define TWL6030_REG_VPP 41
+#define TWL6030_REG_VUSIM 42
+#define TWL6030_REG_VANA 43
+#define TWL6030_REG_VCXIO 44
+#define TWL6030_REG_VDAC 45
+#define TWL6030_REG_VUSB 46
+
+/* INTERNAL LDOs */
+#define TWL6030_REG_VRTC 47
+
#endif /* End of __TWL4030_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8d10aa7fd4c..5ed8b9c5035 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -111,6 +111,12 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
+#ifdef CONFIG_FS_JOURNAL_INFO
+#define INIT_JOURNAL_INFO .journal_info = NULL,
+#else
+#define INIT_JOURNAL_INFO
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,10 +168,9 @@ extern struct cred init_cred;
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
@@ -173,6 +178,7 @@ extern struct cred init_cred;
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
+ INIT_JOURNAL_INFO \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a..451481c082b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 792274269f2..d8e9b3d1c23 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
__builtin_extract_return_addr((void *)addr));
}
-/*
- * Pretty-print a function pointer. This function is deprecated.
- * Please use the "%pF" vsprintf format instead.
- */
-static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
-{
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
- addr = *(void **)addr;
-#endif
- print_symbol(fmt, (unsigned long)addr);
-}
-
static inline void print_ip_sym(unsigned long ip)
{
printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fa4c590cf1..4d9c916d06d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -251,10 +251,10 @@ extern int printk_delay_msec;
* Print a one-time message (analogous to WARN_ONCE() et al):
*/
#define printk_once(x...) ({ \
- static bool __print_once = true; \
+ static bool __print_once; \
\
- if (__print_once) { \
- __print_once = false; \
+ if (!__print_once) { \
+ __print_once = true; \
printk(x); \
} \
})
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#elif defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
-#define pr_debug(fmt, ...) do { \
- dynamic_pr_debug(fmt, ##__VA_ARGS__); \
- } while (0)
+#define pr_debug(fmt, ...) \
+ dynamic_pr_debug(fmt, ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#endif
/*
+ * ratelimited messages with local ratelimit_state,
+ * no local ratelimit_state used in the !PRINTK case
+ */
+#ifdef CONFIG_PRINTK
+#define printk_ratelimited(fmt, ...) ({ \
+ static struct ratelimit_state _rs = { \
+ .interval = DEFAULT_RATELIMIT_INTERVAL, \
+ .burst = DEFAULT_RATELIMIT_BURST, \
+ }; \
+ \
+ if (!__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+#else
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_ratelimited printk
+#endif
+
+#define pr_emerg_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_ratelimited, don't do that... */
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_ratelimited(fmt, ...) \
+ ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
+ ##__VA_ARGS__); 0; })
+#endif
+
+/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a485c14ecd5..bed5f16ba82 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -9,8 +9,12 @@
#include <linux/bitops.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/vmstat.h>
+
+struct stable_node;
+struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -34,23 +38,60 @@ static inline void ksm_exit(struct mm_struct *mm)
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
*/
static inline int PageKsm(struct page *page)
{
- return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+
+static inline struct stable_node *page_stable_node(struct page *page)
+{
+ return PageKsm(page) ? page_rmapping(page) : NULL;
+}
+
+static inline void set_page_stable_node(struct page *page,
+ struct stable_node *stable_node)
+{
+ page->mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
}
/*
- * But we have to avoid the checking which page_add_anon_rmap() performs.
+ * When do_swap_page() first faults in from swap what used to be a KSM page,
+ * no problem, it will be assigned to this vma's anon_vma; but thereafter,
+ * it might be faulted into a different anon_vma (or perhaps to a different
+ * offset in the same anon_vma). do_swap_page() cannot do all the locking
+ * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
+ * a copy, and leave remerging the pages to a later pass of ksmd.
+ *
+ * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
+ * but what if the vma was unmerged while the page was swapped out?
*/
-static inline void page_add_ksm_rmap(struct page *page)
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- page->mapping = (void *) PAGE_MAPPING_ANON;
- __inc_zone_page_state(page, NR_ANON_PAGES);
- }
+ struct anon_vma *anon_vma = page_anon_vma(page);
+
+ if (!anon_vma ||
+ (anon_vma == vma->anon_vma &&
+ page->index == linear_page_index(vma, address)))
+ return page;
+
+ return ksm_does_need_to_copy(page, vma, address);
}
+
+int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags);
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+
#else /* !CONFIG_KSM */
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -73,7 +114,32 @@ static inline int PageKsm(struct page *page)
return 0;
}
-/* No stub required for page_add_ksm_rmap(page) */
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return page;
+}
+
+static inline int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags)
+{
+ return 0;
+}
+
+static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
+{
+ return 0;
+}
+
+static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+ return 0;
+}
+
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
#endif /* !CONFIG_KSM */
-#endif
+#endif /* __LINUX_KSM_H */
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index 3cc2f2c53e4..f1ca0dcc162 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data {
#define LIS3_WAKEUP_Z_HI (1 << 5)
unsigned char wakeup_flags;
unsigned char wakeup_thresh;
+#define LIS3_NO_MAP 0
+#define LIS3_DEV_X 1
+#define LIS3_DEV_Y 2
+#define LIS3_DEV_Z 3
+#define LIS3_INV_DEV_X -1
+#define LIS3_INV_DEV_Y -2
+#define LIS3_INV_DEV_Z -3
+ s8 axis_x;
+ s8 axis_y;
+ s8 axis_z;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+ /* Limits for selftest are specified in chip data sheet */
+ s16 st_min_limits[3]; /* min pass limit x, y, z */
+ s16 st_max_limits[3]; /* max pass limit x, y, z */
};
#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fed969281a4..35b07b773e6 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -69,7 +69,6 @@ extern void online_page(struct page *page);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
-extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 085c903fe0f..1cc966cd3e5 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
+extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
return node_zonelist(0, gfp_flags);
}
+static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
+
static inline int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes,
const nodemask_t *to_nodes, int flags)
diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h
new file mode 100644
index 00000000000..f41b428d2ce
--- /dev/null
+++ b/include/linux/mfd/88pm8607.h
@@ -0,0 +1,217 @@
+/*
+ * Marvell 88PM8607 Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM8607_H
+#define __LINUX_MFD_88PM8607_H
+
+enum {
+ PM8607_ID_BUCK1 = 0,
+ PM8607_ID_BUCK2,
+ PM8607_ID_BUCK3,
+
+ PM8607_ID_LDO1,
+ PM8607_ID_LDO2,
+ PM8607_ID_LDO3,
+ PM8607_ID_LDO4,
+ PM8607_ID_LDO5,
+ PM8607_ID_LDO6,
+ PM8607_ID_LDO7,
+ PM8607_ID_LDO8,
+ PM8607_ID_LDO9,
+ PM8607_ID_LDO10,
+ PM8607_ID_LDO12,
+ PM8607_ID_LDO14,
+
+ PM8607_ID_RG_MAX,
+};
+
+#define CHIP_ID (0x40)
+#define CHIP_ID_MASK (0xF8)
+
+/* Interrupt Registers */
+#define PM8607_STATUS_1 (0x01)
+#define PM8607_STATUS_2 (0x02)
+#define PM8607_INT_STATUS1 (0x03)
+#define PM8607_INT_STATUS2 (0x04)
+#define PM8607_INT_STATUS3 (0x05)
+#define PM8607_INT_MASK_1 (0x06)
+#define PM8607_INT_MASK_2 (0x07)
+#define PM8607_INT_MASK_3 (0x08)
+
+/* Regulator Control Registers */
+#define PM8607_LDO1 (0x10)
+#define PM8607_LDO2 (0x11)
+#define PM8607_LDO3 (0x12)
+#define PM8607_LDO4 (0x13)
+#define PM8607_LDO5 (0x14)
+#define PM8607_LDO6 (0x15)
+#define PM8607_LDO7 (0x16)
+#define PM8607_LDO8 (0x17)
+#define PM8607_LDO9 (0x18)
+#define PM8607_LDO10 (0x19)
+#define PM8607_LDO12 (0x1A)
+#define PM8607_LDO14 (0x1B)
+#define PM8607_SLEEP_MODE1 (0x1C)
+#define PM8607_SLEEP_MODE2 (0x1D)
+#define PM8607_SLEEP_MODE3 (0x1E)
+#define PM8607_SLEEP_MODE4 (0x1F)
+#define PM8607_GO (0x20)
+#define PM8607_SLEEP_BUCK1 (0x21)
+#define PM8607_SLEEP_BUCK2 (0x22)
+#define PM8607_SLEEP_BUCK3 (0x23)
+#define PM8607_BUCK1 (0x24)
+#define PM8607_BUCK2 (0x25)
+#define PM8607_BUCK3 (0x26)
+#define PM8607_BUCK_CONTROLS (0x27)
+#define PM8607_SUPPLIES_EN11 (0x2B)
+#define PM8607_SUPPLIES_EN12 (0x2C)
+#define PM8607_GROUP1 (0x2D)
+#define PM8607_GROUP2 (0x2E)
+#define PM8607_GROUP3 (0x2F)
+#define PM8607_GROUP4 (0x30)
+#define PM8607_GROUP5 (0x31)
+#define PM8607_GROUP6 (0x32)
+#define PM8607_SUPPLIES_EN21 (0x33)
+#define PM8607_SUPPLIES_EN22 (0x34)
+
+/* RTC Control Registers */
+#define PM8607_RTC1 (0xA0)
+#define PM8607_RTC_COUNTER1 (0xA1)
+#define PM8607_RTC_COUNTER2 (0xA2)
+#define PM8607_RTC_COUNTER3 (0xA3)
+#define PM8607_RTC_COUNTER4 (0xA4)
+#define PM8607_RTC_EXPIRE1 (0xA5)
+#define PM8607_RTC_EXPIRE2 (0xA6)
+#define PM8607_RTC_EXPIRE3 (0xA7)
+#define PM8607_RTC_EXPIRE4 (0xA8)
+#define PM8607_RTC_TRIM1 (0xA9)
+#define PM8607_RTC_TRIM2 (0xAA)
+#define PM8607_RTC_TRIM3 (0xAB)
+#define PM8607_RTC_TRIM4 (0xAC)
+#define PM8607_RTC_MISC1 (0xAD)
+#define PM8607_RTC_MISC2 (0xAE)
+#define PM8607_RTC_MISC3 (0xAF)
+
+/* Misc Registers */
+#define PM8607_CHIP_ID (0x00)
+#define PM8607_LDO1 (0x10)
+#define PM8607_DVC3 (0x26)
+#define PM8607_MISC1 (0x40)
+
+/* bit definitions for PM8607 events */
+#define PM8607_EVENT_ONKEY (1 << 0)
+#define PM8607_EVENT_EXTON (1 << 1)
+#define PM8607_EVENT_CHG (1 << 2)
+#define PM8607_EVENT_BAT (1 << 3)
+#define PM8607_EVENT_RTC (1 << 4)
+#define PM8607_EVENT_CC (1 << 5)
+#define PM8607_EVENT_VBAT (1 << 8)
+#define PM8607_EVENT_VCHG (1 << 9)
+#define PM8607_EVENT_VSYS (1 << 10)
+#define PM8607_EVENT_TINT (1 << 11)
+#define PM8607_EVENT_GPADC0 (1 << 12)
+#define PM8607_EVENT_GPADC1 (1 << 13)
+#define PM8607_EVENT_GPADC2 (1 << 14)
+#define PM8607_EVENT_GPADC3 (1 << 15)
+#define PM8607_EVENT_AUDIO_SHORT (1 << 16)
+#define PM8607_EVENT_PEN (1 << 17)
+#define PM8607_EVENT_HEADSET (1 << 18)
+#define PM8607_EVENT_HOOK (1 << 19)
+#define PM8607_EVENT_MICIN (1 << 20)
+#define PM8607_EVENT_CHG_TIMEOUT (1 << 21)
+#define PM8607_EVENT_CHG_DONE (1 << 22)
+#define PM8607_EVENT_CHG_FAULT (1 << 23)
+
+/* bit definitions of Status Query Interface */
+#define PM8607_STATUS_CC (1 << 3)
+#define PM8607_STATUS_PEN (1 << 4)
+#define PM8607_STATUS_HEADSET (1 << 5)
+#define PM8607_STATUS_HOOK (1 << 6)
+#define PM8607_STATUS_MICIN (1 << 7)
+#define PM8607_STATUS_ONKEY (1 << 8)
+#define PM8607_STATUS_EXTON (1 << 9)
+#define PM8607_STATUS_CHG (1 << 10)
+#define PM8607_STATUS_BAT (1 << 11)
+#define PM8607_STATUS_VBUS (1 << 12)
+#define PM8607_STATUS_OV (1 << 13)
+
+/* bit definitions of BUCK3 */
+#define PM8607_BUCK3_DOUBLE (1 << 6)
+
+/* bit definitions of Misc1 */
+#define PM8607_MISC1_PI2C (1 << 0)
+
+/* Interrupt Number in 88PM8607 */
+enum {
+ PM8607_IRQ_ONKEY = 0,
+ PM8607_IRQ_EXTON,
+ PM8607_IRQ_CHG,
+ PM8607_IRQ_BAT,
+ PM8607_IRQ_RTC,
+ PM8607_IRQ_VBAT = 8,
+ PM8607_IRQ_VCHG,
+ PM8607_IRQ_VSYS,
+ PM8607_IRQ_TINT,
+ PM8607_IRQ_GPADC0,
+ PM8607_IRQ_GPADC1,
+ PM8607_IRQ_GPADC2,
+ PM8607_IRQ_GPADC3,
+ PM8607_IRQ_AUDIO_SHORT = 16,
+ PM8607_IRQ_PEN,
+ PM8607_IRQ_HEADSET,
+ PM8607_IRQ_HOOK,
+ PM8607_IRQ_MICIN,
+ PM8607_IRQ_CHG_FAIL,
+ PM8607_IRQ_CHG_DONE,
+ PM8607_IRQ_CHG_FAULT,
+};
+
+enum {
+ PM8607_CHIP_A0 = 0x40,
+ PM8607_CHIP_A1 = 0x41,
+ PM8607_CHIP_B0 = 0x48,
+};
+
+
+struct pm8607_chip {
+ struct device *dev;
+ struct mutex io_lock;
+ struct i2c_client *client;
+
+ int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest);
+ int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src);
+
+ int buck3_double; /* DVC ramp slope double */
+ unsigned char chip_id;
+
+};
+
+#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
+
+enum {
+ GI2C_PORT = 0,
+ PI2C_PORT,
+};
+
+struct pm8607_platform_data {
+ int i2c_port; /* Controlled by GI2C or PI2C */
+ struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
+};
+
+extern int pm8607_reg_read(struct pm8607_chip *, int);
+extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char);
+extern int pm8607_bulk_read(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_bulk_write(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char,
+ unsigned char);
+#endif /* __LINUX_MFD_88PM8607_H */
diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h
new file mode 100644
index 00000000000..a42a7033ae5
--- /dev/null
+++ b/include/linux/mfd/ab4500.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson
+ *
+ * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * AB4500 device core funtions, for client access
+ */
+#ifndef MFD_AB4500_H
+#define MFD_AB4500_H
+
+#include <linux/device.h>
+
+/*
+ * AB4500 bank addresses
+ */
+#define AB4500_SYS_CTRL1_BLOCK 0x1
+#define AB4500_SYS_CTRL2_BLOCK 0x2
+#define AB4500_REGU_CTRL1 0x3
+#define AB4500_REGU_CTRL2 0x4
+#define AB4500_USB 0x5
+#define AB4500_TVOUT 0x6
+#define AB4500_DBI 0x7
+#define AB4500_ECI_AV_ACC 0x8
+#define AB4500_RESERVED 0x9
+#define AB4500_GPADC 0xA
+#define AB4500_CHARGER 0xB
+#define AB4500_GAS_GAUGE 0xC
+#define AB4500_AUDIO 0xD
+#define AB4500_INTERRUPT 0xE
+#define AB4500_RTC 0xF
+#define AB4500_MISC 0x10
+#define AB4500_DEBUG 0x12
+#define AB4500_PROD_TEST 0x13
+#define AB4500_OTP_EMUL 0x15
+
+/*
+ * System control 1 register offsets.
+ * Bank = 0x01
+ */
+#define AB4500_TURNON_STAT_REG 0x0100
+#define AB4500_RESET_STAT_REG 0x0101
+#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102
+
+#define AB4500_FSM_STAT1_REG 0x0140
+#define AB4500_FSM_STAT2_REG 0x0141
+#define AB4500_SYSCLK_REQ_STAT_REG 0x0142
+#define AB4500_USB_STAT1_REG 0x0143
+#define AB4500_USB_STAT2_REG 0x0144
+#define AB4500_STATUS_SPARE1_REG 0x0145
+#define AB4500_STATUS_SPARE2_REG 0x0146
+
+#define AB4500_CTRL1_REG 0x0180
+#define AB4500_CTRL2_REG 0x0181
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB4500_CTRL3_REG 0x0200
+#define AB4500_MAIN_WDOG_CTRL_REG 0x0201
+#define AB4500_MAIN_WDOG_TIMER_REG 0x0202
+#define AB4500_LOW_BAT_REG 0x0203
+#define AB4500_BATT_OK_REG 0x0204
+#define AB4500_SYSCLK_TIMER_REG 0x0205
+#define AB4500_SMPSCLK_CTRL_REG 0x0206
+#define AB4500_SMPSCLK_SEL1_REG 0x0207
+#define AB4500_SMPSCLK_SEL2_REG 0x0208
+#define AB4500_SMPSCLK_SEL3_REG 0x0209
+#define AB4500_SYSULPCLK_CONF_REG 0x020A
+#define AB4500_SYSULPCLK_CTRL1_REG 0x020B
+#define AB4500_SYSCLK_CTRL_REG 0x020C
+#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D
+#define AB4500_SYSCLK_REQ_VALID_REG 0x020E
+#define AB4500_SYSCTRL_SPARE_REG 0x020F
+#define AB4500_PAD_CONF_REG 0x0210
+
+/*
+ * Regu control1 register offsets
+ * Bank = 0x03
+ */
+#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300
+#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301
+#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302
+#define AB4500_REGU_REQ_CTRL1_REG 0x0303
+#define AB4500_REGU_REQ_CTRL2_REG 0x0304
+#define AB4500_REGU_REQ_CTRL3_REG 0x0305
+#define AB4500_REGU_REQ_CTRL4_REG 0x0306
+#define AB4500_REGU_MISC1_REG 0x0380
+#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381
+#define AB4500_REGU_VUSB_CTRL_REG 0x0382
+#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383
+#define AB4500_REGU_CTRL1_SPARE_REG 0x0384
+
+/*
+ * Regu control2 Vmod register offsets
+ */
+#define AB4500_REGU_VMOD_REGU_REG 0x0440
+#define AB4500_REGU_VMOD_SEL1_REG 0x0441
+#define AB4500_REGU_VMOD_SEL2_REG 0x0442
+#define AB4500_REGU_CTRL_DISCH_REG 0x0443
+#define AB4500_REGU_CTRL_DISCH2_REG 0x0444
+
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB4500_USB_LINE_STAT_REG 0x0580
+#define AB4500_USB_LINE_CTRL1_REG 0x0581
+#define AB4500_USB_LINE_CTRL2_REG 0x0582
+#define AB4500_USB_LINE_CTRL3_REG 0x0583
+#define AB4500_USB_LINE_CTRL4_REG 0x0584
+#define AB4500_USB_LINE_CTRL5_REG 0x0585
+#define AB4500_USB_OTG_CTRL_REG 0x0587
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_CTRL_SPARE_REG 0x0589
+#define AB4500_USB_PHY_CTRL_REG 0x058A
+
+/*
+ * TVOUT / CTRL register offsets
+ * Bank : 0x06
+ */
+#define AB4500_TVOUT_CTRL_REG 0x0680
+
+/*
+ * DBI register offsets
+ * Bank : 0x07
+ */
+#define AB4500_DBI_REG1_REG 0x0700
+#define AB4500_DBI_REG2_REG 0x0701
+
+/*
+ * ECI regsiter offsets
+ * Bank : 0x08
+ */
+#define AB4500_ECI_CTRL_REG 0x0800
+#define AB4500_ECI_HOOKLEVEL_REG 0x0801
+#define AB4500_ECI_DATAOUT_REG 0x0802
+#define AB4500_ECI_DATAIN_REG 0x0803
+
+/*
+ * AV Connector register offsets
+ * Bank : 0x08
+ */
+#define AB4500_AV_CONN_REG 0x0840
+
+/*
+ * Accessory detection register offsets
+ * Bank : 0x08
+ */
+#define AB4500_ACC_DET_DB1_REG 0x0880
+#define AB4500_ACC_DET_DB2_REG 0x0881
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB4500_GPADC_CTRL1_REG 0x0A00
+#define AB4500_GPADC_CTRL2_REG 0x0A01
+#define AB4500_GPADC_CTRL3_REG 0x0A02
+#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03
+#define AB4500_GPADC_STAT_REG 0x0A04
+#define AB4500_GPADC_MANDATAL_REG 0x0A05
+#define AB4500_GPADC_MANDATAH_REG 0x0A06
+#define AB4500_GPADC_AUTODATAL_REG 0x0A07
+#define AB4500_GPADC_AUTODATAH_REG 0x0A08
+#define AB4500_GPADC_MUX_CTRL_REG 0x0A09
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_STATUS1_REG 0x0B00
+#define AB4500_CH_STATUS2_REG 0x0B01
+#define AB4500_CH_USBCH_STAT1_REG 0x0B02
+#define AB4500_CH_USBCH_STAT2_REG 0x0B03
+#define AB4500_CH_FSM_STAT_REG 0x0B04
+#define AB4500_CH_STAT_REG 0x0B05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_VOLT_LVL_REG 0x0B40
+
+/*
+ * Charger / main control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_MCH_CTRL1 0x0B80
+#define AB4500_MCH_CTRL2 0x0B81
+#define AB4500_MCH_IPT_CURLVL_REG 0x0B82
+#define AB4500_CH_WD_REG 0x0B83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB4500_USBCH_CTRL1_REG 0x0BC0
+#define AB4500_USBCH_CTRL2_REG 0x0BC1
+#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2
+
+/*
+ * RTC bank register offsets
+ * Bank : 0xF
+ */
+#define AB4500_RTC_SOFF_STAT_REG 0x0F00
+#define AB4500_RTC_CC_CONF_REG 0x0F01
+#define AB4500_RTC_READ_REQ_REG 0x0F02
+#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03
+#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04
+#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05
+#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06
+#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07
+#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08
+#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09
+#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A
+#define AB4500_RTC_STAT_REG 0x0F0B
+#define AB4500_RTC_BKUP_CHG_REG 0x0F0C
+#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D
+#define AB4500_RTC_CALIB_REG 0x0F0E
+#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F
+
+/*
+ * PWM Out generators
+ * Bank: 0x10
+ */
+#define AB4500_PWM_OUT_CTRL1_REG 0x1060
+#define AB4500_PWM_OUT_CTRL2_REG 0x1061
+#define AB4500_PWM_OUT_CTRL3_REG 0x1062
+#define AB4500_PWM_OUT_CTRL4_REG 0x1063
+#define AB4500_PWM_OUT_CTRL5_REG 0x1064
+#define AB4500_PWM_OUT_CTRL6_REG 0x1065
+#define AB4500_PWM_OUT_CTRL7_REG 0x1066
+
+#define AB4500_I2C_PAD_CTRL_REG 0x1067
+#define AB4500_REV_REG 0x1080
+
+/**
+ * struct ab4500
+ * @spi: spi device structure
+ * @tx_buf: transmit buffer
+ * @rx_buf: receive buffer
+ * @lock: sync primitive
+ */
+struct ab4500 {
+ struct spi_device *spi;
+ unsigned long tx_buf[4];
+ unsigned long rx_buf[4];
+ struct mutex lock;
+};
+
+int ab4500_write(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr, unsigned char data);
+int ab4500_read(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr);
+
+#endif /* MFD_AB4500_H */
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
new file mode 100644
index 00000000000..ac37558a467
--- /dev/null
+++ b/include/linux/mfd/adp5520.h
@@ -0,0 +1,299 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef __LINUX_MFD_ADP5520_H
+#define __LINUX_MFD_ADP5520_H
+
+#define ID_ADP5520 5520
+#define ID_ADP5501 5501
+
+/*
+ * ADP5520/ADP5501 Register Map
+ */
+
+#define ADP5520_MODE_STATUS 0x00
+#define ADP5520_INTERRUPT_ENABLE 0x01
+#define ADP5520_BL_CONTROL 0x02
+#define ADP5520_BL_TIME 0x03
+#define ADP5520_BL_FADE 0x04
+#define ADP5520_DAYLIGHT_MAX 0x05
+#define ADP5520_DAYLIGHT_DIM 0x06
+#define ADP5520_OFFICE_MAX 0x07
+#define ADP5520_OFFICE_DIM 0x08
+#define ADP5520_DARK_MAX 0x09
+#define ADP5520_DARK_DIM 0x0A
+#define ADP5520_BL_VALUE 0x0B
+#define ADP5520_ALS_CMPR_CFG 0x0C
+#define ADP5520_L2_TRIP 0x0D
+#define ADP5520_L2_HYS 0x0E
+#define ADP5520_L3_TRIP 0x0F
+#define ADP5520_L3_HYS 0x10
+#define ADP5520_LED_CONTROL 0x11
+#define ADP5520_LED_TIME 0x12
+#define ADP5520_LED_FADE 0x13
+#define ADP5520_LED1_CURRENT 0x14
+#define ADP5520_LED2_CURRENT 0x15
+#define ADP5520_LED3_CURRENT 0x16
+
+/*
+ * ADP5520 Register Map
+ */
+
+#define ADP5520_GPIO_CFG_1 0x17
+#define ADP5520_GPIO_CFG_2 0x18
+#define ADP5520_GPIO_IN 0x19
+#define ADP5520_GPIO_OUT 0x1A
+#define ADP5520_GPIO_INT_EN 0x1B
+#define ADP5520_GPIO_INT_STAT 0x1C
+#define ADP5520_GPIO_INT_LVL 0x1D
+#define ADP5520_GPIO_DEBOUNCE 0x1E
+#define ADP5520_GPIO_PULLUP 0x1F
+#define ADP5520_KP_INT_STAT_1 0x20
+#define ADP5520_KP_INT_STAT_2 0x21
+#define ADP5520_KR_INT_STAT_1 0x22
+#define ADP5520_KR_INT_STAT_2 0x23
+#define ADP5520_KEY_STAT_1 0x24
+#define ADP5520_KEY_STAT_2 0x25
+
+/*
+ * MODE_STATUS bits
+ */
+
+#define ADP5520_nSTNBY (1 << 7)
+#define ADP5520_BL_EN (1 << 6)
+#define ADP5520_DIM_EN (1 << 5)
+#define ADP5520_OVP_INT (1 << 4)
+#define ADP5520_CMPR_INT (1 << 3)
+#define ADP5520_GPI_INT (1 << 2)
+#define ADP5520_KR_INT (1 << 1)
+#define ADP5520_KP_INT (1 << 0)
+
+/*
+ * INTERRUPT_ENABLE bits
+ */
+
+#define ADP5520_AUTO_LD_EN (1 << 4)
+#define ADP5520_CMPR_IEN (1 << 3)
+#define ADP5520_OVP_IEN (1 << 2)
+#define ADP5520_KR_IEN (1 << 1)
+#define ADP5520_KP_IEN (1 << 0)
+
+/*
+ * BL_CONTROL bits
+ */
+
+#define ADP5520_BL_LVL ((x) << 5)
+#define ADP5520_BL_LAW ((x) << 4)
+#define ADP5520_BL_AUTO_ADJ (1 << 3)
+#define ADP5520_OVP_EN (1 << 2)
+#define ADP5520_FOVR (1 << 1)
+#define ADP5520_KP_BL_EN (1 << 0)
+
+/*
+ * ALS_CMPR_CFG bits
+ */
+
+#define ADP5520_L3_OUT (1 << 3)
+#define ADP5520_L2_OUT (1 << 2)
+#define ADP5520_L3_EN (1 << 1)
+
+#define ADP5020_MAX_BRIGHTNESS 0x7F
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4))
+#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en)
+
+/*
+ * LEDs subdevice bits and masks
+ */
+
+#define ADP5520_01_MAXLEDS 3
+
+#define ADP5520_FLAG_LED_MASK 0x3
+#define ADP5520_FLAG_OFFT_SHIFT 8
+#define ADP5520_FLAG_OFFT_MASK 0x3
+
+#define ADP5520_R3_MODE (1 << 5)
+#define ADP5520_C3_MODE (1 << 4)
+#define ADP5520_LED_LAW (1 << 3)
+#define ADP5520_LED3_EN (1 << 2)
+#define ADP5520_LED2_EN (1 << 1)
+#define ADP5520_LED1_EN (1 << 0)
+
+/*
+ * GPIO subdevice bits and masks
+ */
+
+#define ADP5520_MAXGPIOS 8
+
+#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_GPIO_C2 (1 << 6)
+#define ADP5520_GPIO_C1 (1 << 5)
+#define ADP5520_GPIO_C0 (1 << 4)
+#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_GPIO_R2 (1 << 2)
+#define ADP5520_GPIO_R1 (1 << 1)
+#define ADP5520_GPIO_R0 (1 << 0)
+
+struct adp5520_gpio_platform_data {
+ unsigned gpio_start;
+ u8 gpio_en_mask;
+ u8 gpio_pullup_mask;
+};
+
+/*
+ * Keypad subdevice bits and masks
+ */
+
+#define ADP5520_MAXKEYS 16
+
+#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_COL_C2 (1 << 6)
+#define ADP5520_COL_C1 (1 << 5)
+#define ADP5520_COL_C0 (1 << 4)
+#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_ROW_R2 (1 << 2)
+#define ADP5520_ROW_R1 (1 << 1)
+#define ADP5520_ROW_R0 (1 << 0)
+
+#define ADP5520_KEY(row, col) (col + row * 4)
+#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS
+
+struct adp5520_keys_platform_data {
+ int rows_en_mask; /* Number of rows */
+ int cols_en_mask; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+};
+
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */
+#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */
+#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */
+
+#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT)
+
+#define ADP5520_LED_ONT_200ms 0
+#define ADP5520_LED_ONT_600ms 1
+#define ADP5520_LED_ONT_800ms 2
+#define ADP5520_LED_ONT_1200ms 3
+
+struct adp5520_leds_platform_data {
+ int num_leds;
+ struct led_info *leds;
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 led_on_time;
+};
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP5520_FADE_T_600ms 2
+#define ADP5520_FADE_T_900ms 3
+#define ADP5520_FADE_T_1200ms 4
+#define ADP5520_FADE_T_1500ms 5
+#define ADP5520_FADE_T_1800ms 6
+#define ADP5520_FADE_T_2100ms 7
+#define ADP5520_FADE_T_2400ms 8
+#define ADP5520_FADE_T_2700ms 9
+#define ADP5520_FADE_T_3000ms 10
+#define ADP5520_FADE_T_3500ms 11
+#define ADP5520_FADE_T_4000ms 12
+#define ADP5520_FADE_T_4500ms 13
+#define ADP5520_FADE_T_5000ms 14
+#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP5520_BL_LAW_LINEAR 0
+#define ADP5520_BL_LAW_SQUARE 1
+#define ADP5520_BL_LAW_CUBIC1 2
+#define ADP5520_BL_LAW_CUBIC2 3
+
+#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP5520_BL_AMBL_FILT_160ms 1
+#define ADP5520_BL_AMBL_FILT_320ms 2
+#define ADP5520_BL_AMBL_FILT_640ms 3
+#define ADP5520_BL_AMBL_FILT_1280ms 4
+#define ADP5520_BL_AMBL_FILT_2560ms 5
+#define ADP5520_BL_AMBL_FILT_5120ms 6
+#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+ /*
+ * Blacklight current 0..30mA
+ */
+#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30)
+
+ /*
+ * L2 comparator current 0..1000uA
+ */
+#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000)
+
+ /*
+ * L3 comparator current 0..127uA
+ */
+#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127)
+
+struct adp5520_backlight_platform_data {
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 fade_led_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+};
+
+/*
+ * MFD chip platform data
+ */
+
+struct adp5520_platform_data {
+ struct adp5520_keys_platform_data *keys;
+ struct adp5520_gpio_platform_data *gpio;
+ struct adp5520_leds_platform_data *leds;
+ struct adp5520_backlight_platform_data *backlight;
+};
+
+/*
+ * MFD chip functions
+ */
+
+extern int adp5520_read(struct device *dev, int reg, uint8_t *val);
+extern int adp5520_write(struct device *dev, int reg, u8 val);
+extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+
+extern int adp5520_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+extern int adp5520_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+#endif /* __LINUX_MFD_ADP5520_H */
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 3402042ddc3..40c372165f3 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -231,9 +231,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_LED_4MA 1
#define PCAP_LED_5MA 2
#define PCAP_LED_9MA 3
-#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff
-#define PCAP_LED_GPIO_EN 0x01000000
-#define PCAP_LED_GPIO_INVERT 0x02000000
#define PCAP_LED_T_MASK 0xf
#define PCAP_LED_C_MASK 0x3
#define PCAP_BL_MASK 0x1f
diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h
index 47e698cb0f1..95cf9360553 100644
--- a/include/linux/mfd/mc13783-private.h
+++ b/include/linux/mfd/mc13783-private.h
@@ -24,52 +24,23 @@
#include <linux/platform_device.h>
#include <linux/mfd/mc13783.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-
-struct mc13783_irq {
- void (*handler)(int, void *);
- void *data;
-};
-
-#define MC13783_NUM_IRQ 2
-#define MC13783_IRQ_TS 0
-#define MC13783_IRQ_REGULATOR 1
-
-#define MC13783_ADC_MODE_TS 1
-#define MC13783_ADC_MODE_SINGLE_CHAN 2
-#define MC13783_ADC_MODE_MULT_CHAN 3
+#include <linux/interrupt.h>
struct mc13783 {
- int revision;
- struct device *dev;
- struct spi_device *spi_device;
-
- int (*read_dev)(void *data, char reg, int count, u32 *dst);
- int (*write_dev)(void *data, char reg, int count, const u32 *src);
-
- struct mutex io_lock;
- void *io_data;
+ struct spi_device *spidev;
+ struct mutex lock;
int irq;
- unsigned int flags;
+ int flags;
- struct mc13783_irq irq_handler[MC13783_NUM_IRQ];
- struct work_struct work;
- struct completion adc_done;
- unsigned int ts_active;
- struct mutex adc_conv_lock;
+ irq_handler_t irqhandler[MC13783_NUM_IRQ];
+ void *irqdata[MC13783_NUM_IRQ];
+ /* XXX these should go as platformdata to the regulator subdevice */
struct mc13783_regulator_init_data *regulators;
int num_regulators;
};
-int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *);
-int mc13783_reg_write(struct mc13783 *, int, u32);
-int mc13783_set_bits(struct mc13783 *, int, u32, u32);
-int mc13783_free_irq(struct mc13783 *mc13783, int irq);
-int mc13783_register_irq(struct mc13783 *mc13783, int irq,
- void (*handler) (int, void *), void *data);
-
#define MC13783_REG_INTERRUPT_STATUS_0 0
#define MC13783_REG_INTERRUPT_MASK_0 1
#define MC13783_REG_INTERRUPT_SENSE_0 2
@@ -136,55 +107,6 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_REG_TEST_3 63
#define MC13783_REG_NB 64
-
-/*
- * Interrupt Status
- */
-#define MC13783_INT_STAT_ADCDONEI (1 << 0)
-#define MC13783_INT_STAT_ADCBISDONEI (1 << 1)
-#define MC13783_INT_STAT_TSI (1 << 2)
-#define MC13783_INT_STAT_WHIGHI (1 << 3)
-#define MC13783_INT_STAT_WLOWI (1 << 4)
-#define MC13783_INT_STAT_CHGDETI (1 << 6)
-#define MC13783_INT_STAT_CHGOVI (1 << 7)
-#define MC13783_INT_STAT_CHGREVI (1 << 8)
-#define MC13783_INT_STAT_CHGSHORTI (1 << 9)
-#define MC13783_INT_STAT_CCCVI (1 << 10)
-#define MC13783_INT_STAT_CHGCURRI (1 << 11)
-#define MC13783_INT_STAT_BPONI (1 << 12)
-#define MC13783_INT_STAT_LOBATLI (1 << 13)
-#define MC13783_INT_STAT_LOBATHI (1 << 14)
-#define MC13783_INT_STAT_UDPI (1 << 15)
-#define MC13783_INT_STAT_USBI (1 << 16)
-#define MC13783_INT_STAT_IDI (1 << 19)
-#define MC13783_INT_STAT_Unused (1 << 20)
-#define MC13783_INT_STAT_SE1I (1 << 21)
-#define MC13783_INT_STAT_CKDETI (1 << 22)
-#define MC13783_INT_STAT_UDMI (1 << 23)
-
-/*
- * Interrupt Mask
- */
-#define MC13783_INT_MASK_ADCDONEM (1 << 0)
-#define MC13783_INT_MASK_ADCBISDONEM (1 << 1)
-#define MC13783_INT_MASK_TSM (1 << 2)
-#define MC13783_INT_MASK_WHIGHM (1 << 3)
-#define MC13783_INT_MASK_WLOWM (1 << 4)
-#define MC13783_INT_MASK_CHGDETM (1 << 6)
-#define MC13783_INT_MASK_CHGOVM (1 << 7)
-#define MC13783_INT_MASK_CHGREVM (1 << 8)
-#define MC13783_INT_MASK_CHGSHORTM (1 << 9)
-#define MC13783_INT_MASK_CCCVM (1 << 10)
-#define MC13783_INT_MASK_CHGCURRM (1 << 11)
-#define MC13783_INT_MASK_BPONM (1 << 12)
-#define MC13783_INT_MASK_LOBATLM (1 << 13)
-#define MC13783_INT_MASK_LOBATHM (1 << 14)
-#define MC13783_INT_MASK_UDPM (1 << 15)
-#define MC13783_INT_MASK_USBM (1 << 16)
-#define MC13783_INT_MASK_IDM (1 << 19)
-#define MC13783_INT_MASK_SE1M (1 << 21)
-#define MC13783_INT_MASK_CKDETM (1 << 22)
-
/*
* Reg Regulator Mode 0
*/
@@ -284,113 +206,15 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_SWCTRL_SW3_STBY (1 << 21)
#define MC13783_SWCTRL_SW3_MODE (1 << 22)
-/*
- * ADC/Touch
- */
-#define MC13783_ADC0_LICELLCON (1 << 0)
-#define MC13783_ADC0_CHRGICON (1 << 1)
-#define MC13783_ADC0_BATICON (1 << 2)
-#define MC13783_ADC0_RTHEN (1 << 3)
-#define MC13783_ADC0_DTHEN (1 << 4)
-#define MC13783_ADC0_UIDEN (1 << 5)
-#define MC13783_ADC0_ADOUTEN (1 << 6)
-#define MC13783_ADC0_ADOUTPER (1 << 7)
-#define MC13783_ADC0_ADREFEN (1 << 10)
-#define MC13783_ADC0_ADREFMODE (1 << 11)
-#define MC13783_ADC0_TSMOD0 (1 << 12)
-#define MC13783_ADC0_TSMOD1 (1 << 13)
-#define MC13783_ADC0_TSMOD2 (1 << 14)
-#define MC13783_ADC0_CHRGRAWDIV (1 << 15)
-#define MC13783_ADC0_ADINC1 (1 << 16)
-#define MC13783_ADC0_ADINC2 (1 << 17)
-#define MC13783_ADC0_WCOMP (1 << 18)
-#define MC13783_ADC0_ADCBIS0 (1 << 23)
-
-#define MC13783_ADC1_ADEN (1 << 0)
-#define MC13783_ADC1_RAND (1 << 1)
-#define MC13783_ADC1_ADSEL (1 << 3)
-#define MC13783_ADC1_TRIGMASK (1 << 4)
-#define MC13783_ADC1_ADA10 (1 << 5)
-#define MC13783_ADC1_ADA11 (1 << 6)
-#define MC13783_ADC1_ADA12 (1 << 7)
-#define MC13783_ADC1_ADA20 (1 << 8)
-#define MC13783_ADC1_ADA21 (1 << 9)
-#define MC13783_ADC1_ADA22 (1 << 10)
-#define MC13783_ADC1_ATO0 (1 << 11)
-#define MC13783_ADC1_ATO1 (1 << 12)
-#define MC13783_ADC1_ATO2 (1 << 13)
-#define MC13783_ADC1_ATO3 (1 << 14)
-#define MC13783_ADC1_ATO4 (1 << 15)
-#define MC13783_ADC1_ATO5 (1 << 16)
-#define MC13783_ADC1_ATO6 (1 << 17)
-#define MC13783_ADC1_ATO7 (1 << 18)
-#define MC13783_ADC1_ATOX (1 << 19)
-#define MC13783_ADC1_ASC (1 << 20)
-#define MC13783_ADC1_ADTRIGIGN (1 << 21)
-#define MC13783_ADC1_ADONESHOT (1 << 22)
-#define MC13783_ADC1_ADCBIS1 (1 << 23)
-
-#define MC13783_ADC1_CHAN0_SHIFT 5
-#define MC13783_ADC1_CHAN1_SHIFT 8
-
-#define MC13783_ADC2_ADD10 (1 << 2)
-#define MC13783_ADC2_ADD11 (1 << 3)
-#define MC13783_ADC2_ADD12 (1 << 4)
-#define MC13783_ADC2_ADD13 (1 << 5)
-#define MC13783_ADC2_ADD14 (1 << 6)
-#define MC13783_ADC2_ADD15 (1 << 7)
-#define MC13783_ADC2_ADD16 (1 << 8)
-#define MC13783_ADC2_ADD17 (1 << 9)
-#define MC13783_ADC2_ADD18 (1 << 10)
-#define MC13783_ADC2_ADD19 (1 << 11)
-#define MC13783_ADC2_ADD20 (1 << 14)
-#define MC13783_ADC2_ADD21 (1 << 15)
-#define MC13783_ADC2_ADD22 (1 << 16)
-#define MC13783_ADC2_ADD23 (1 << 17)
-#define MC13783_ADC2_ADD24 (1 << 18)
-#define MC13783_ADC2_ADD25 (1 << 19)
-#define MC13783_ADC2_ADD26 (1 << 20)
-#define MC13783_ADC2_ADD27 (1 << 21)
-#define MC13783_ADC2_ADD28 (1 << 22)
-#define MC13783_ADC2_ADD29 (1 << 23)
+static inline int mc13783_set_bits(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val)
+{
+ int ret;
+ mc13783_lock(mc13783);
+ ret = mc13783_reg_rmw(mc13783, offset, mask, val);
+ mc13783_unlock(mc13783);
-#define MC13783_ADC3_WHIGH0 (1 << 0)
-#define MC13783_ADC3_WHIGH1 (1 << 1)
-#define MC13783_ADC3_WHIGH2 (1 << 2)
-#define MC13783_ADC3_WHIGH3 (1 << 3)
-#define MC13783_ADC3_WHIGH4 (1 << 4)
-#define MC13783_ADC3_WHIGH5 (1 << 5)
-#define MC13783_ADC3_ICID0 (1 << 6)
-#define MC13783_ADC3_ICID1 (1 << 7)
-#define MC13783_ADC3_ICID2 (1 << 8)
-#define MC13783_ADC3_WLOW0 (1 << 9)
-#define MC13783_ADC3_WLOW1 (1 << 10)
-#define MC13783_ADC3_WLOW2 (1 << 11)
-#define MC13783_ADC3_WLOW3 (1 << 12)
-#define MC13783_ADC3_WLOW4 (1 << 13)
-#define MC13783_ADC3_WLOW5 (1 << 14)
-#define MC13783_ADC3_ADCBIS2 (1 << 23)
-
-#define MC13783_ADC4_ADDBIS10 (1 << 2)
-#define MC13783_ADC4_ADDBIS11 (1 << 3)
-#define MC13783_ADC4_ADDBIS12 (1 << 4)
-#define MC13783_ADC4_ADDBIS13 (1 << 5)
-#define MC13783_ADC4_ADDBIS14 (1 << 6)
-#define MC13783_ADC4_ADDBIS15 (1 << 7)
-#define MC13783_ADC4_ADDBIS16 (1 << 8)
-#define MC13783_ADC4_ADDBIS17 (1 << 9)
-#define MC13783_ADC4_ADDBIS18 (1 << 10)
-#define MC13783_ADC4_ADDBIS19 (1 << 11)
-#define MC13783_ADC4_ADDBIS20 (1 << 14)
-#define MC13783_ADC4_ADDBIS21 (1 << 15)
-#define MC13783_ADC4_ADDBIS22 (1 << 16)
-#define MC13783_ADC4_ADDBIS23 (1 << 17)
-#define MC13783_ADC4_ADDBIS24 (1 << 18)
-#define MC13783_ADC4_ADDBIS25 (1 << 19)
-#define MC13783_ADC4_ADDBIS26 (1 << 20)
-#define MC13783_ADC4_ADDBIS27 (1 << 21)
-#define MC13783_ADC4_ADDBIS28 (1 << 22)
-#define MC13783_ADC4_ADDBIS29 (1 << 23)
+ return ret;
+}
#endif /* __LINUX_MFD_MC13783_PRIV_H */
-
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b3a2a724357..35680409b8c 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,28 +1,50 @@
/*
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright 2009 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
- * Initial development of this code was funded by
- * Phytec Messtechnik GmbH, http://www.phytec.de
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
*/
+#ifndef __LINUX_MFD_MC13783_H
+#define __LINUX_MFD_MC13783_H
-#ifndef __INCLUDE_LINUX_MFD_MC13783_H
-#define __INCLUDE_LINUX_MFD_MC13783_H
+#include <linux/interrupt.h>
struct mc13783;
+
+void mc13783_lock(struct mc13783 *mc13783);
+void mc13783_unlock(struct mc13783 *mc13783);
+
+int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val);
+int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val);
+int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val);
+
+int mc13783_irq_request(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev);
+int mc13783_ackirq(struct mc13783 *mc13783, int irq);
+
+int mc13783_mask(struct mc13783 *mc13783, int irq);
+int mc13783_unmask(struct mc13783 *mc13783, int irq);
+
+#define MC13783_ADC0 43
+#define MC13783_ADC0_ADREFEN (1 << 10)
+#define MC13783_ADC0_ADREFMODE (1 << 11)
+#define MC13783_ADC0_TSMOD0 (1 << 12)
+#define MC13783_ADC0_TSMOD1 (1 << 13)
+#define MC13783_ADC0_TSMOD2 (1 << 14)
+#define MC13783_ADC0_ADINC1 (1 << 16)
+#define MC13783_ADC0_ADINC2 (1 << 17)
+
+#define MC13783_ADC0_TSMOD_MASK (MC13783_ADC0_TSMOD0 | \
+ MC13783_ADC0_TSMOD1 | \
+ MC13783_ADC0_TSMOD2)
+
+/* to be cleaned up */
struct regulator_init_data;
struct mc13783_regulator_init_data {
@@ -30,23 +52,30 @@ struct mc13783_regulator_init_data {
struct regulator_init_data *init_data;
};
-struct mc13783_platform_data {
- struct mc13783_regulator_init_data *regulators;
+struct mc13783_regulator_platform_data {
int num_regulators;
- unsigned int flags;
+ struct mc13783_regulator_init_data *regulators;
};
-/* mc13783_platform_data flags */
+struct mc13783_platform_data {
+ int num_regulators;
+ struct mc13783_regulator_init_data *regulators;
+
#define MC13783_USE_TOUCHSCREEN (1 << 0)
#define MC13783_USE_CODEC (1 << 1)
#define MC13783_USE_ADC (1 << 2)
#define MC13783_USE_RTC (1 << 3)
#define MC13783_USE_REGULATOR (1 << 4)
+ unsigned int flags;
+};
+
+#define MC13783_ADC_MODE_TS 1
+#define MC13783_ADC_MODE_SINGLE_CHAN 2
+#define MC13783_ADC_MODE_MULT_CHAN 3
int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
unsigned int channel, unsigned int *sample);
-void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_SW_SW1A 0
#define MC13783_SW_SW1B 1
@@ -80,5 +109,46 @@ void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_REGU_V3 29
#define MC13783_REGU_V4 30
-#endif /* __INCLUDE_LINUX_MFD_MC13783_H */
+#define MC13783_IRQ_ADCDONE 0
+#define MC13783_IRQ_ADCBISDONE 1
+#define MC13783_IRQ_TS 2
+#define MC13783_IRQ_WHIGH 3
+#define MC13783_IRQ_WLOW 4
+#define MC13783_IRQ_CHGDET 6
+#define MC13783_IRQ_CHGOV 7
+#define MC13783_IRQ_CHGREV 8
+#define MC13783_IRQ_CHGSHORT 9
+#define MC13783_IRQ_CCCV 10
+#define MC13783_IRQ_CHGCURR 11
+#define MC13783_IRQ_BPON 12
+#define MC13783_IRQ_LOBATL 13
+#define MC13783_IRQ_LOBATH 14
+#define MC13783_IRQ_UDP 15
+#define MC13783_IRQ_USB 16
+#define MC13783_IRQ_ID 19
+#define MC13783_IRQ_SE1 21
+#define MC13783_IRQ_CKDET 22
+#define MC13783_IRQ_UDM 23
+#define MC13783_IRQ_1HZ 24
+#define MC13783_IRQ_TODA 25
+#define MC13783_IRQ_ONOFD1 27
+#define MC13783_IRQ_ONOFD2 28
+#define MC13783_IRQ_ONOFD3 29
+#define MC13783_IRQ_SYSRST 30
+#define MC13783_IRQ_RTCRST 31
+#define MC13783_IRQ_PC 32
+#define MC13783_IRQ_WARM 33
+#define MC13783_IRQ_MEMHLD 34
+#define MC13783_IRQ_PWRRDY 35
+#define MC13783_IRQ_THWARNL 36
+#define MC13783_IRQ_THWARNH 37
+#define MC13783_IRQ_CLK 38
+#define MC13783_IRQ_SEMAF 39
+#define MC13783_IRQ_MC2B 41
+#define MC13783_IRQ_HSDET 42
+#define MC13783_IRQ_HSL 43
+#define MC13783_IRQ_ALSPTH 44
+#define MC13783_IRQ_AHSSHORT 45
+#define MC13783_NUM_IRQ 46
+#endif /* __LINUX_MFD_MC13783_H */
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 9aba7b779fb..3398bd9aab1 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,7 +29,12 @@ struct pcf50633_platform_data {
char **batteries;
int num_batteries;
- int charging_restart_interval;
+ /*
+ * Should be set accordingly to the reference resistor used, see
+ * I_{ch(ref)} charger reference current in the pcf50633 User
+ * Manual.
+ */
+ int charger_reference_current_ma;
/* Callbacks */
void (*probe_done)(struct pcf50633 *);
@@ -40,10 +45,6 @@ struct pcf50633_platform_data {
u8 resumers[5];
};
-struct pcf50633_subdev_pdata {
- struct pcf50633 *pcf;
-};
-
struct pcf50633_irq {
void (*handler) (int, void *);
void *data;
@@ -217,5 +218,9 @@ enum pcf50633_reg_int5 {
#define PCF50633_REG_LEDCTL 0x2a
#define PCF50633_REG_LEDDIM 0x2b
-#endif
+static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+#endif
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 4119579acf2..df4f5fa88de 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 {
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
int pcf50633_mbc_get_status(struct pcf50633 *);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
#endif
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 91eb493bf14..5184b79c700 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -16,7 +16,6 @@
#define __MFD_WM831X_CORE_H__
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
/*
* Register values.
@@ -117,6 +116,7 @@
#define WM831X_DC3_SLEEP_CONTROL 0x4063
#define WM831X_DC4_CONTROL 0x4064
#define WM831X_DC4_SLEEP_CONTROL 0x4065
+#define WM832X_DC4_SLEEP_CONTROL 0x4067
#define WM831X_EPE1_CONTROL 0x4066
#define WM831X_EPE2_CONTROL 0x4067
#define WM831X_LDO1_CONTROL 0x4068
@@ -235,6 +235,8 @@
struct regulator_dev;
+#define WM831X_NUM_IRQ_REGS 5
+
struct wm831x {
struct mutex io_lock;
@@ -248,10 +250,11 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- struct workqueue_struct *irq_wq;
- struct work_struct irq_work;
unsigned int irq_base;
- int irq_masks[5];
+ int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
+ int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+
+ int num_gpio;
struct mutex auxadc_lock;
@@ -278,12 +281,30 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
-int __must_check wm831x_request_irq(struct wm831x *wm831x,
- unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name,
- void *dev);
-void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *);
-void wm831x_disable_irq(struct wm831x *wm831x, int irq);
-void wm831x_enable_irq(struct wm831x *wm831x, int irq);
+static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
+ unsigned int irq,
+ irq_handler_t handler,
+ unsigned long flags,
+ const char *name,
+ void *dev)
+{
+ return request_threaded_irq(irq, NULL, handler, flags, name, dev);
+}
+
+static inline void wm831x_free_irq(struct wm831x *wm831x,
+ unsigned int irq, void *dev)
+{
+ free_irq(irq, dev);
+}
+
+static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
+{
+ disable_irq(irq);
+}
+
+static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
+{
+ enable_irq(irq);
+}
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 90d820260aa..415c228743d 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -91,6 +91,7 @@ struct wm831x_pdata {
/** Called after subdevices are set up */
int (*post_init)(struct wm831x *wm831x);
+ int irq_base;
int gpio_base;
struct wm831x_backlight_pdata *backlight;
struct wm831x_backup_pdata *backup;
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 1d595de6a05..43868899bf4 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -601,7 +601,7 @@ extern const u16 wm8352_mode3_defaults[];
struct wm8350;
struct wm8350_irq {
- void (*handler) (struct wm8350 *, int, void *);
+ irq_handler_t handler;
void *data;
};
@@ -646,10 +646,12 @@ struct wm8350 {
* @init: Function called during driver initialisation. Should be
* used by the platform to configure GPIO functions and similar.
* @irq_high: Set if WM8350 IRQ is active high.
+ * @irq_base: Base IRQ for genirq (not currently used).
*/
struct wm8350_platform_data {
int (*init)(struct wm8350 *wm8350);
int irq_high;
+ int irq_base;
};
@@ -676,11 +678,13 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
* WM8350 internal interrupts
*/
int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- void (*handler) (struct wm8350 *, int, void *),
- void *data);
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data);
int wm8350_free_irq(struct wm8350 *wm8350, int irq);
int wm8350_mask_irq(struct wm8350 *wm8350, int irq);
int wm8350_unmask_irq(struct wm8350 *wm8350, int irq);
-
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata);
+int wm8350_irq_exit(struct wm8350 *wm8350);
#endif
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index ed91e8f5d29..71af3d6ebe9 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -173,6 +173,24 @@
#define WM8350_GPIO_DEBOUNCE_ON 1
/*
+ * R30 (0x1E) - GPIO Interrupt Status
+ */
+#define WM8350_GP12_EINT 0x1000
+#define WM8350_GP11_EINT 0x0800
+#define WM8350_GP10_EINT 0x0400
+#define WM8350_GP9_EINT 0x0200
+#define WM8350_GP8_EINT 0x0100
+#define WM8350_GP7_EINT 0x0080
+#define WM8350_GP6_EINT 0x0040
+#define WM8350_GP5_EINT 0x0020
+#define WM8350_GP4_EINT 0x0010
+#define WM8350_GP3_EINT 0x0008
+#define WM8350_GP2_EINT 0x0004
+#define WM8350_GP1_EINT 0x0002
+#define WM8350_GP0_EINT 0x0001
+
+
+/*
* R128 (0x80) - GPIO Debounce
*/
#define WM8350_GP12_DB 0x1000
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 527602cdea1..7f085c97c79 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
-extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
+extern int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, int offlining);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
- unsigned long private) { return -ENOSYS; }
-
-static inline int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest) { return 0; }
+ unsigned long private, int offlining) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4..9d65ae4ba0e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it.
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else
-#endif
- if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
+/* Neutral page->mapping pointer to address_space or anon_vma or other */
+static inline void *page_rmapping(struct page *page)
+{
+ return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
*
* (see walk_page_range for more details)
*/
@@ -767,6 +780,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
+ struct mm_walk *);
struct mm_struct *mm;
void *private;
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c4c06020810..9b8299af374 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -128,6 +128,8 @@
#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
#define SEQ4_STATUS_LEASE_MOVED 0x00000080
#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
+#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200
+#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400
#define NFS4_MAX_UINT64 (~(u64)0)
@@ -528,6 +530,7 @@ enum {
NFSPROC4_CLNT_DESTROY_SESSION,
NFSPROC4_CLNT_SEQUENCE,
NFSPROC4_CLNT_GET_LEASE_TIME,
+ NFSPROC4_CLNT_RECLAIM_COMPLETE,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569eabe3..34fc6be5bfc 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
+ struct completion complete;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62f63fb0c4c..51071b33575 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -170,8 +170,9 @@ struct nfs4_sequence_args {
struct nfs4_sequence_res {
struct nfs4_session *sr_session;
u8 sr_slotid; /* slot used to send request */
- unsigned long sr_renewal_time;
int sr_status; /* sequence operation status */
+ unsigned long sr_renewal_time;
+ u32 sr_status_flags;
};
struct nfs4_get_lease_time_args {
@@ -938,6 +939,16 @@ struct nfs41_create_session_args {
struct nfs41_create_session_res {
struct nfs_client *client;
};
+
+struct nfs41_reclaim_complete_args {
+ /* In the future extend to include curr_fh for use with migration */
+ unsigned char one_fs:1;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs41_reclaim_complete_res {
+ struct nfs4_sequence_res seq_res;
+};
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
diff --git a/include/linux/node.h b/include/linux/node.h
index 681a697b9a8..06292dac3ea 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -21,13 +21,19 @@
#include <linux/sysdev.h>
#include <linux/cpumask.h>
+#include <linux/workqueue.h>
struct node {
struct sys_device sysdev;
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ struct work_struct node_work;
+#endif
};
struct memory_block;
extern struct node node_devices[];
+typedef void (*node_registration_func_t)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
+
+#ifdef CONFIG_HUGETLBFS
+extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister);
+#endif
#else
static inline int register_one_node(int nid)
{
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
{
return 0;
}
+
+static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
+ node_registration_func_t unreg)
+{
+}
#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b359c4a9ec9..454997cccbd 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+{
+ nodes_clear(*mask);
+ node_set(node, *mask);
+}
+
#define nodemask_of_node(node) \
({ \
typeof(_unused_nodemask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
- m.bits[0] = 1UL<<(node); \
+ m.bits[0] = 1UL << (node); \
} else { \
- nodes_clear(m); \
- node_set((node), m); \
+ init_nodemask_of_node(&m, (node)); \
} \
m; \
})
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
+ * For nodemask scrach area.
+ * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
+ * name.
*/
-
-#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
-#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define NODEMASK_FREE(m) kfree(m)
+#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#define NODEMASK_ALLOC(type, name, gfp_flags) \
+ type *name = kmalloc(sizeof(*name), gfp_flags)
+#define NODEMASK_FREE(m) kfree(m)
#else
-#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
-#define NODEMASK_FREE(m)
+#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name
+#define NODEMASK_FREE(m) do {} while (0)
#endif
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
@@ -497,8 +504,10 @@ struct nodemask_scratch {
nodemask_t mask2;
};
-#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
-#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+#define NODEMASK_SCRATCH(x) \
+ NODEMASK_ALLOC(struct nodemask_scratch, x, \
+ GFP_KERNEL | __GFP_NORETRY)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a31a7301b15..3aaa31603a8 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -10,4 +10,6 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define NUMA_NO_NODE (-1)
+
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b17395..49e907bd067 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,7 +99,7 @@ enum pageflags {
PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-#define MLOCK_PAGES 1
+#ifdef CONFIG_MMU
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else
-#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif
@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_MLOCKED 0
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 04771b9c331..bf1e6708084 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1255,7 +1255,7 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size;
-extern u8 pci_dfl_cache_line_size;
+extern u8 __devinitdata pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd..5a5d6ce4bd5 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999..cf5efbcf716 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
#ifdef CONFIG_SMP
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align);
-
-#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-struct percpu_data {
- void *ptrs[1];
-};
-
-/* pointer disguising messes up the kmemleak objects tracking */
-#ifndef CONFIG_DEBUG_KMEMLEAK
-#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-#else
-#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
-#endif
-
-#define per_cpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
-})
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
kfree(p);
}
+static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ return __pa(addr);
+}
+
static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
- __alignof__(type))
+#define alloc_percpu(type) \
+ (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
#endif
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#define __pcpu_size_call_return(stem, variable) \
+({ typeof(variable) pscr_ret__; \
+ switch(sizeof(variable)) { \
+ case 1: pscr_ret__ = stem##1(variable);break; \
+ case 2: pscr_ret__ = stem##2(variable);break; \
+ case 4: pscr_ret__ = stem##4(variable);break; \
+ case 8: pscr_ret__ = stem##8(variable);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+ pscr_ret__; \
+})
+
+#define __pcpu_size_call(stem, variable, ...) \
+do { \
+ switch(sizeof(variable)) { \
+ case 1: stem##1(variable, __VA_ARGS__);break; \
+ case 2: stem##2(variable, __VA_ARGS__);break; \
+ case 4: stem##4(variable, __VA_ARGS__);break; \
+ case 8: stem##8(variable, __VA_ARGS__);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+} while (0)
+
+/*
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables (can be determined
+ * using per_cpu_var(xx).
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The first group is used for accesses that must be done in a
+ * preemption safe way since we know that the context is not preempt
+ * safe. Interrupts may occur. If the interrupt modifies the variable
+ * too then RMW actions will not be reliable.
+ *
+ * The arch code can provide optimized functions in two ways:
+ *
+ * 1. Override the function completely. F.e. define this_cpu_add().
+ * The arch must then ensure that the various scalar format passed
+ * are handled correctly.
+ *
+ * 2. Provide functions for certain scalar sizes. F.e. provide
+ * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
+ * sized RMW actions. If arch code does not provide operations for
+ * a scalar size then the fallback in the generic code will be
+ * used.
+ */
+
+#define _this_cpu_generic_read(pcp) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = *this_cpu_ptr(&(pcp)); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_read
+# ifndef this_cpu_read_1
+# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_2
+# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_4
+# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_8
+# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
+# endif
+# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
+#endif
+
+#define _this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ preempt_disable(); \
+ *__this_cpu_ptr(&pcp) op val; \
+ preempt_enable(); \
+} while (0)
+
+#ifndef this_cpu_write
+# ifndef this_cpu_write_1
+# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_2
+# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_4
+# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_8
+# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_add
+# ifndef this_cpu_add_1
+# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_2
+# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_4
+# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_8
+# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_sub
+# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef this_cpu_inc
+# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
+#endif
+
+#ifndef this_cpu_dec
+# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef this_cpu_and
+# ifndef this_cpu_and_1
+# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_2
+# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_4
+# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_8
+# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_or
+# ifndef this_cpu_or_1
+# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_2
+# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_4
+# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_8
+# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_xor
+# ifndef this_cpu_xor_1
+# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_2
+# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_4
+# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_8
+# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+/*
+ * Generic percpu operations that do not require preemption handling.
+ * Either we do not care about races or the caller has the
+ * responsibility of handling preemptions issues. Arch code can still
+ * override these instructions since the arch per cpu code may be more
+ * efficient and may actually get race freeness for free (that is the
+ * case for x86 for example).
+ *
+ * If there is no other protection through preempt disable and/or
+ * disabling interupts then one of these RMW operations can show unexpected
+ * behavior because the execution thread was rescheduled on another processor
+ * or an interrupt occurred and the same percpu variable was modified from
+ * the interrupt context.
+ */
+#ifndef __this_cpu_read
+# ifndef __this_cpu_read_1
+# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_2
+# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_4
+# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_8
+# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
+#endif
+
+#define __this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ *__this_cpu_ptr(&(pcp)) op val; \
+} while (0)
+
+#ifndef __this_cpu_write
+# ifndef __this_cpu_write_1
+# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_2
+# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_4
+# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_8
+# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_add
+# ifndef __this_cpu_add_1
+# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_2
+# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_4
+# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_8
+# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_sub
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef __this_cpu_inc
+# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
+#endif
+
+#ifndef __this_cpu_dec
+# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef __this_cpu_and
+# ifndef __this_cpu_and_1
+# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_2
+# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_4
+# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_8
+# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_or
+# ifndef __this_cpu_or_1
+# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_2
+# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_4
+# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_8
+# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_xor
+# ifndef __this_cpu_xor_1
+# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_2
+# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_4
+# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_8
+# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
+#endif
+
+/*
+ * IRQ safe versions of the per cpu RMW operations. Note that these operations
+ * are *not* safe against modification of the same variable from another
+ * processors (which one gets when using regular atomic operations)
+ . They are guaranteed to be atomic vs. local interrupts and
+ * preemption only.
+ */
+#define irqsafe_cpu_generic_to_op(pcp, val, op) \
+do { \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ *__this_cpu_ptr(&(pcp)) op val; \
+ local_irq_restore(flags); \
+} while (0)
+
+#ifndef irqsafe_cpu_add
+# ifndef irqsafe_cpu_add_1
+# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_2
+# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_4
+# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_8
+# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef irqsafe_cpu_sub
+# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
+#endif
+
+#ifndef irqsafe_cpu_inc
+# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_dec
+# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_and
+# ifndef irqsafe_cpu_and_1
+# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_2
+# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_4
+# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_8
+# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
+#endif
+
+#ifndef irqsafe_cpu_or
+# ifndef irqsafe_cpu_or_1
+# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_2
+# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_4
+# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_8
+# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
+#endif
+
+#ifndef irqsafe_cpu_xor
+# ifndef irqsafe_cpu_xor_1
+# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_2
+# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_4
+# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_8
+# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
+#endif
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 64a53f74c9a..da7bdc23f27 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -681,7 +681,7 @@ struct perf_event_context {
* Protect the states of the events in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6a..8227f717c70 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ raw_spinlock_t *rawlock;
+ spinlock_t *spinlock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_RAW(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
- * @lock: list spinlock, remembered for debugging
+ * @lock: spinlock protecting the list (debugging)
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->spinlock = lock;
+ head->rawlock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_raw - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: raw_spinlock protecting the list (debugging)
+ */
+static inline void
+plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->rawlock = lock;
+ head->spinlock = NULL;
#endif
}
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0d65934246a..198b8f9fe05 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -219,7 +219,7 @@ struct dev_pm_ops {
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-struct dev_pm_ops name = { \
+const struct dev_pm_ops name = { \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index d92480f8285..1cbbd2c11aa 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -78,6 +78,25 @@ struct raid6_calls {
/* Selected algorithm */
extern struct raid6_calls raid6_call;
+/* Various routine sets */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_calls raid6_intx16;
+extern const struct raid6_calls raid6_intx32;
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
int raid6_select_algo(void);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index cb0ba703260..b019ae64e2a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,9 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
+#ifdef CONFIG_KSM
+ atomic_t ksm_refcount;
+#endif
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
@@ -38,6 +41,34 @@ struct anon_vma {
};
#ifdef CONFIG_MMU
+#ifdef CONFIG_KSM
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+ atomic_set(&anon_vma->ksm_refcount, 0);
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return atomic_read(&anon_vma->ksm_refcount);
+}
+#else
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return 0;
+}
+#endif /* CONFIG_KSM */
+
+static inline struct anon_vma *page_anon_vma(struct page *page)
+{
+ if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
+ PAGE_MAPPING_ANON)
+ return NULL;
+ return page_rmapping(page);
+}
static inline void anon_vma_lock(struct vm_area_struct *vma)
{
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
void anon_vma_unlink(struct vm_area_struct *);
void anon_vma_link(struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
+void anon_vma_free(struct anon_vma *);
/*
* rmap interfaces called when adding or removing pte of page
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page)
*/
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *cnt, unsigned long *vm_flags);
+int page_referenced_one(struct page *, struct vm_area_struct *,
+ unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+
enum ttu_flags {
TTU_UNMAP = 0, /* unmap mode */
TTU_MIGRATION = 1, /* migration mode */
@@ -94,6 +129,8 @@ enum ttu_flags {
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap_one(struct page *, struct vm_area_struct *,
+ unsigned long address, enum ttu_flags flags);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d53..281d8fd775e 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 00000000000..71e0b00b6f2
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void do_raw_read_lock(rwlock_t *lock);
+#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
+ extern int do_raw_read_trylock(rwlock_t *lock);
+ extern void do_raw_read_unlock(rwlock_t *lock);
+ extern void do_raw_write_lock(rwlock_t *lock);
+#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
+ extern int do_raw_write_trylock(rwlock_t *lock);
+ extern void do_raw_write_unlock(rwlock_t *lock);
+#else
+# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
+# define do_raw_read_lock_flags(lock, flags) \
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
+# define do_raw_write_lock_flags(lock, flags) \
+ arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+
+#define write_lock(lock) _raw_write_lock(lock)
+#define read_lock(lock) _raw_read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _raw_read_lock_irq(lock)
+#define read_lock_bh(lock) _raw_read_lock_bh(lock)
+#define write_lock_irq(lock) _raw_write_lock_irq(lock)
+#define write_lock_bh(lock) _raw_write_lock_bh(lock)
+#define read_unlock(lock) _raw_read_unlock(lock)
+#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
+#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 00000000000..9c9f0495d37
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc
+_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _raw_read_lock(lock) __raw_read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _raw_write_lock(lock) __raw_write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _raw_read_trylock(lock) __raw_read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _raw_write_trylock(lock) __raw_write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _raw_read_unlock(lock) __raw_read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _raw_write_unlock(lock) __raw_write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __raw_read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __raw_write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __raw_write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __raw_read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
+ do_raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline void __raw_read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
+ do_raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __raw_write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void
+__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 00000000000..bd31808c7d8
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ arch_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6c261..bdfcc252797 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
-
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->activity != 0);
-}
+extern int rwsem_is_locked(struct rw_semaphore *sem);
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 294eb2f8014..5c858f38e81 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1409,7 +1409,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
@@ -1446,8 +1446,10 @@ struct task_struct {
gfp_t lockdep_reclaim_gfp;
#endif
+#ifdef CONFIG_FS_JOURNAL_INFO
/* journalling filesystem info */
void *journal_info;
+#endif
/* stacked block device info */
struct bio *bio_list, **bio_tail;
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 850d057500d..ca6b2b31799 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
@@ -166,7 +166,7 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5ad70a60fd7..1e14beb23f9 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else
static __always_inline void *
@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node);
diff --git a/include/linux/spi/mpc52xx_spi.h b/include/linux/spi/mpc52xx_spi.h
deleted file mode 100644
index d1004cf0924..00000000000
--- a/include/linux/spi/mpc52xx_spi.h
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#ifndef INCLUDE_MPC5200_SPI_H
-#define INCLUDE_MPC5200_SPI_H
-
-extern void mpc52xx_spi_set_premessage_hook(struct spi_master *master,
- void (*hook)(struct spi_message *m,
- void *context),
- void *hook_context);
-
-#endif
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
new file mode 100644
index 00000000000..2e8db3d2d2e
--- /dev/null
+++ b/include/linux/spi/sh_msiof.h
@@ -0,0 +1,10 @@
+#ifndef __SPI_SH_MSIOF_H__
+#define __SPI_SH_MSIOF_H__
+
+struct sh_msiof_spi_info {
+ int tx_fifo_override;
+ int rx_fifo_override;
+ u16 num_chipselect;
+};
+
+#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
new file mode 100644
index 00000000000..6f17278810b
--- /dev/null
+++ b/include/linux/spi/xilinx_spi.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_SPI_XILINX_SPI_H
+#define __LINUX_SPI_XILINX_SPI_H
+
+/**
+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
+ * @num_chipselect: Number of chip select by the IP.
+ * @little_endian: If registers should be accessed little endian or not.
+ * @bits_per_word: Number of bits per word.
+ * @devices: Devices to add when the driver is probed.
+ * @num_devices: Number of devices in the devices array.
+ */
+struct xspi_platform_data {
+ u16 num_chipselect;
+ bool little_endian;
+ u8 bits_per_word;
+ struct spi_board_info *devices;
+ u8 num_devices;
+};
+
+#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d8..86088213334 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
- * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
* defines the generic type and initializers
*
* linux/spinlock_up.h:
- * contains the __raw_spin_*()/etc. version of UP
+ * contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
@@ -75,12 +75,12 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
- * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define spin_lock_init(lock) \
+ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __spin_lock_init((lock), #lock, &__key); \
+ __raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
-# define spin_lock_init(lock) \
- do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
+# define raw_spin_lock_init(lock) \
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define rwlock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key); \
-} while (0)
-#else
-# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
-#endif
-
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
-#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#ifdef arch_spin_is_contended
+#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
-#define spin_is_contended(lock) (((void)(lock), 0))
-#endif /*__raw_spin_is_contended*/
+#define raw_spin_is_contended(lock) (((void)(lock), 0))
+#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
- * spin_unlock_wait - wait until the spinlock gets unlocked
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
+ extern void do_raw_spin_lock(raw_spinlock_t *lock);
+#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
-# define _raw_spin_lock_flags(lock, flags) \
- __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+static inline void do_raw_spin_lock(raw_spinlock_t *lock)
+{
+ arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+{
+ arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ arch_spin_unlock(&lock->raw_lock);
+}
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
-
/*
- * Define the various spin_lock and rw_lock methods. Note we define these
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
- * methods are defined as nops in the case they are not required.
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
*/
-#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
-#define spin_lock(lock) _spin_lock(lock)
+#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock) \
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock_nested(lock, subclass)
+
+# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
- _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define spin_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave_nested(lock, subclass); \
+ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_lock_irqsave(lock, flags); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
+ _raw_spin_lock_irqsave(lock, flags); \
} while (0)
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
- spin_lock_irqsave(lock, flags)
-#endif
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ raw_spin_lock_irqsave(lock, flags)
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
-#define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
-#define spin_unlock(lock) _spin_unlock(lock)
-#define read_unlock(lock) _read_unlock(lock)
-#define write_unlock(lock) _write_unlock(lock)
-#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-#define read_unlock_irq(lock) _read_unlock_irq(lock)
-#define write_unlock_irq(lock) _write_unlock_irq(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_unlock_irqrestore(lock, flags); \
- } while (0)
-#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+#endif
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
- } while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
+#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) \
+ __cond_lock(lock, _raw_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) \
+#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
-#define spin_trylock_irqsave(lock, flags) \
+#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+ return &lock->rlock;
+}
+
+#define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ raw_spin_lock_init(&(_lock)->rlock); \
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ raw_spin_lock_nested(spinlock_check(lock), subclass); \
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+ return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+ return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return raw_spin_can_lock(&lock->rlock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ assert_raw_spin_locked(&lock->rlock);
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define spin_can_lock(lock) (!spin_is_locked(lock))
-
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459b..e253ccd7a60 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
int in_lock_functions(unsigned long addr);
-#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
-
-void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
- __acquires(lock);
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
+
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
+
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
-#define _spin_lock(lock) __spin_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
-#define _spin_lock_bh(lock) __spin_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
-#define _spin_lock_irq(lock) __spin_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
-#define _spin_trylock(lock) __spin_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
-#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK
-#define _spin_unlock(lock) __spin_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
-#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif
-static inline int __spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
return 0;
}
-static inline int __read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
-static inline int __write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
+ * do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
- _raw_spin_lock_flags(lock, &flags);
+ do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
-static inline void __spin_lock_irq(spinlock_t *lock)
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __spin_lock_bh(spinlock_t *lock)
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-
-static inline void __read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline void __read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-
-static inline void __write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-static inline void __spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline void __write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#endif /* CONFIG_PREEMPT */
-static inline void __spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable();
-}
-
-static inline void __write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-
-static inline void __read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable();
}
-static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
-static inline void __spin_unlock_irq(spinlock_t *lock)
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
-static inline void __spin_unlock_bh(spinlock_t *lock)
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
- unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline int __spin_trylock_bh(spinlock_t *lock)
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
return 0;
}
+#include <linux/rwlock_api_smp.h>
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d316457..af1f47229e7 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
-#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable_no_resched(); local_bh_enable(); \
+ __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
-#define _spin_lock(lock) __LOCK(lock)
-#define _spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _read_lock(lock) __LOCK(lock)
-#define _write_lock(lock) __LOCK(lock)
-#define _spin_lock_bh(lock) __LOCK_BH(lock)
-#define _read_lock_bh(lock) __LOCK_BH(lock)
-#define _write_lock_bh(lock) __LOCK_BH(lock)
-#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _read_lock_irq(lock) __LOCK_IRQ(lock)
-#define _write_lock_irq(lock) __LOCK_IRQ(lock)
-#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
-#define _spin_unlock(lock) __UNLOCK(lock)
-#define _read_unlock(lock) __UNLOCK(lock)
-#define _write_unlock(lock) __UNLOCK(lock)
-#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_spin_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_write_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a..851b7783720 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
#include <linux/lockdep.h>
-typedef struct {
- raw_spinlock_t raw_lock;
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
@@ -29,26 +29,10 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+} raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
-typedef struct {
- raw_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# define RW_DEP_MAP_INIT(lockname)
+# define SPIN_DEBUG_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- .magic = SPINLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- .magic = RWLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
-#else
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
/*
- * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
- * are hence deprecated.
- * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
- * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198..c09b6407ae1 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif
typedef struct {
/* no debug version on UP */
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { }
+#define __ARCH_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215..b14f6a91e19 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-#define __raw_spin_is_locked(x) ((x)->slock == 0)
+#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return oldval > 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
/*
* Read-write spinlocks. No debug version.
*/
-#define __raw_read_lock(lock) do { (void)(lock); } while (0)
-#define __raw_write_lock(lock) do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
-#define __raw_spin_is_locked(lock) ((void)(lock), 0)
+#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
-# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
-#define __raw_spin_is_contended(lock) (((void)(lock), 0))
+#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define __raw_read_can_lock(lock) (((void)(lock), 1))
-#define __raw_write_can_lock(lock) (((void)(lock), 1))
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
-#define __raw_spin_unlock_wait(lock) \
- do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+#define arch_spin_unlock_wait(lock) \
+ do { cpu_relax(); } while (arch_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index b8508868d5a..651839a2a75 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,7 +62,15 @@ extern char * strnchr(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
-extern char * __must_check strstrip(char *);
+extern char * __must_check skip_spaces(const char *);
+
+extern char *strim(char *);
+
+static inline __must_check char *strstrip(char *str)
+{
+ return strim(str);
+}
+
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
#endif
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 401097781fc..1906782ec86 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,12 +130,14 @@ struct rpc_task_setup {
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
+#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4ec90019c1a..a2602a8207a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -145,38 +145,43 @@ enum {
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
+ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
/* add others here before... */
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32
-#define SWAP_MAP_MAX 0x7ffe
-#define SWAP_MAP_BAD 0x7fff
-#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
-#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
+#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
+#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
+#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
+#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+
/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
- unsigned long flags;
- int prio; /* swap priority */
- int next; /* next entry on swap list */
- struct file *swap_file;
- struct block_device *bdev;
- struct list_head extent_list;
- struct swap_extent *curr_swap_extent;
- unsigned short *swap_map;
- unsigned int lowest_bit;
- unsigned int highest_bit;
+ unsigned long flags; /* SWP_USED etc: see above */
+ signed short prio; /* swap priority of this type */
+ signed char type; /* strange name for an index */
+ signed char next; /* next type on the swap list */
+ unsigned int max; /* extent of the swap_map */
+ unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned int lowest_bit; /* index of first free in swap_map */
+ unsigned int highest_bit; /* index of last free in swap_map */
+ unsigned int pages; /* total of usable pages of swap */
+ unsigned int inuse_pages; /* number of those currently in use */
+ unsigned int cluster_next; /* likely index for next allocation */
+ unsigned int cluster_nr; /* countdown to next cluster search */
unsigned int lowest_alloc; /* while preparing discard cluster */
unsigned int highest_alloc; /* while preparing discard cluster */
- unsigned int cluster_next;
- unsigned int cluster_nr;
- unsigned int pages;
- unsigned int max;
- unsigned int inuse_pages;
- unsigned int old_block_size;
+ struct swap_extent *curr_swap_extent;
+ struct swap_extent first_swap_extent;
+ struct block_device *bdev; /* swap device or bdev of swap file */
+ struct file *swap_file; /* seldom referenced */
+ unsigned int old_block_size; /* seldom referenced */
};
struct swap_list_t {
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
extern int kswapd_run(int nid);
+extern void kswapd_stop(int nid);
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
@@ -309,17 +315,18 @@ extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
-extern void swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
+extern int add_swap_count_continuation(swp_entry_t, gfp_t);
+extern void swap_shmem_alloc(swp_entry_t);
+extern int swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free(swp_entry_t, struct page *page);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void)
#define free_swap_and_cache(swp) is_migration_entry(swp)
#define swapcache_prepare(swp) is_migration_entry(swp)
-static inline void swap_duplicate(swp_entry_t swp)
+static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
+ return 0;
+}
+
+static inline void swap_shmem_alloc(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
}
static inline void swap_free(swp_entry_t swp)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 405a9035fe4..ef3a2947b10 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *);
extern struct ktermios tty_std_termios;
-extern int kmsg_redirect;
-
extern void console_init(void);
extern int vcs_init(void);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a..ee03bba9c5d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+ KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+ KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -76,24 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __get_cpu_var(vm_event_states).event[item]++;
+ __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- get_cpu_var(vm_event_states).event[item]++;
- put_cpu();
+ this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __get_cpu_var(vm_event_states).event[item] += delta;
+ __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- get_cpu_var(vm_event_states).event[item] += delta;
- put_cpu();
+ this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7ffa11f0623..3fb9944e50a 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -84,4 +84,19 @@ struct vt_setactivate {
#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
+#ifdef CONFIG_VT_CONSOLE
+
+extern int vt_kmsg_redirect(int new);
+
+#else
+
+static inline int vt_kmsg_redirect(int new)
+{
+ return 0;
+}
+
+#endif
+
+#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
+
#endif /* _LINUX_VT_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 0302f31a2fb..b0173202cad 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -88,12 +88,7 @@ struct neigh_statistics {
unsigned long unres_discards; /* number of unresolved drops */
};
-#define NEIGH_CACHE_STAT_INC(tbl, field) \
- do { \
- preempt_disable(); \
- (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
- preempt_enable(); \
- } while (0)
+#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
struct neighbour {
struct neighbour *next;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5cf7270e3ff..a0904adfb8f 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
#define NF_CT_STAT_INC(net, count) \
- (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++)
+ __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) \
do { \
local_bh_disable(); \
- per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \
+ __this_cpu_inc((net)->ct.stat->count); \
local_bh_enable(); \
} while (0)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8c842e06bec..f0d756f2ac9 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -136,45 +136,31 @@ struct linux_xfrm_mib {
#define SNMP_STAT_BHPTR(name) (name[0])
#define SNMP_STAT_USRPTR(name) (name[1])
-#define SNMP_INC_STATS_BH(mib, field) \
- (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
-#define SNMP_INC_STATS_USER(mib, field) \
- do { \
- per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \
- put_cpu(); \
- } while (0)
-#define SNMP_INC_STATS(mib, field) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \
- put_cpu(); \
- } while (0)
-#define SNMP_DEC_STATS(mib, field) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \
- put_cpu(); \
- } while (0)
-#define SNMP_ADD_STATS(mib, field, addend) \
- do { \
- per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \
- put_cpu(); \
- } while (0)
-#define SNMP_ADD_STATS_BH(mib, field, addend) \
- (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
-#define SNMP_ADD_STATS_USER(mib, field, addend) \
- do { \
- per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \
- put_cpu(); \
- } while (0)
+#define SNMP_INC_STATS_BH(mib, field) \
+ __this_cpu_inc(mib[0]->mibs[field])
+#define SNMP_INC_STATS_USER(mib, field) \
+ this_cpu_inc(mib[1]->mibs[field])
+#define SNMP_INC_STATS(mib, field) \
+ this_cpu_inc(mib[!in_softirq()]->mibs[field])
+#define SNMP_DEC_STATS(mib, field) \
+ this_cpu_dec(mib[!in_softirq()]->mibs[field])
+#define SNMP_ADD_STATS_BH(mib, field, addend) \
+ __this_cpu_add(mib[0]->mibs[field], addend)
+#define SNMP_ADD_STATS_USER(mib, field, addend) \
+ this_cpu_add(mib[1]->mibs[field], addend)
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \
- __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\
+ __typeof__(mib[0]) ptr; \
+ preempt_disable(); \
+ ptr = this_cpu_ptr((mib)[!in_softirq()]); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
- put_cpu(); \
+ preempt_enable(); \
} while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
do { \
- __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\
+ __typeof__(mib[0]) ptr = \
+ __this_cpu_ptr((mib)[!in_softirq()]); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend;\
} while (0)
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index afc2bfb9e91..75fa3530345 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -126,8 +126,8 @@ typedef struct irq_req_t {
#define IRQ_TYPE_TIME 0x01
#define IRQ_TYPE_DYNAMIC_SHARING 0x02
#define IRQ_FORCED_PULSE 0x04
-#define IRQ_FIRST_SHARED 0x08
-//#define IRQ_HANDLE_PRESENT 0x10
+#define IRQ_FIRST_SHARED 0x08 /* unused */
+#define IRQ_HANDLE_PRESENT 0x10 /* unused */
#define IRQ_PULSE_ALLOCATED 0x100
/* Bits in IRQInfo1 field */
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index d403c12f797..ee148573c11 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -82,7 +82,7 @@ struct pcmcia_device {
/* the hardware "function" device; certain subdevices can
* share one hardware "function" device. */
u8 func;
- struct config_t* function_config;
+ struct config_t *function_config;
struct list_head socket_device_list;
@@ -121,14 +121,14 @@ struct pcmcia_device {
u16 manf_id;
u16 card_id;
- char * prod_id[4];
+ char *prod_id[4];
u64 dma_mask;
struct device dev;
#ifdef CONFIG_PCMCIA_IOCTL
/* device driver wanted by cardmgr */
- struct pcmcia_driver * cardmgr;
+ struct pcmcia_driver *cardmgr;
#endif
/* data private to drivers */
diff --git a/include/pcmcia/mem_op.h b/include/pcmcia/mem_op.h
index 8d19b9401a5..0fa06e5d537 100644
--- a/include/pcmcia/mem_op.h
+++ b/include/pcmcia/mem_op.h
@@ -15,8 +15,8 @@
#ifndef _LINUX_MEM_OP_H
#define _LINUX_MEM_OP_H
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
/*
If UNSAFE_MEMCPY is defined, we use the (optimized) system routines
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 7c23be706f1..cbfba885eb8 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -154,7 +154,7 @@ struct pcmcia_socket {
struct list_head socket_list;
struct completion socket_released;
- /* deprecated */
+ /* deprecated */
unsigned int sock; /* socket number */
@@ -164,7 +164,7 @@ struct pcmcia_socket {
u_int map_size;
u_int io_offset;
u_int pci_irq;
- struct pci_dev * cb_dev;
+ struct pci_dev *cb_dev;
/* socket setup is done so resources should be able to be allocated.
@@ -179,9 +179,9 @@ struct pcmcia_socket {
u8 reserved:5;
/* socket operations */
- struct pccard_operations * ops;
- struct pccard_resource_ops * resource_ops;
- void * resource_data;
+ struct pccard_operations *ops;
+ struct pccard_resource_ops *resource_ops;
+ void *resource_data;
/* Zoom video behaviour is so chip specific its not worth adding
this to _ops */
@@ -245,7 +245,7 @@ struct pcmcia_socket {
/* cardbus (32-bit) */
#ifdef CONFIG_CARDBUS
- struct resource * cb_cis_res;
+ struct resource *cb_cis_res;
void __iomem *cb_cis_virt;
#endif /* CONFIG_CARDBUS */