summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/bitmap.h11
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cs5535.h172
-rw-r--r--include/linux/ctype.h3
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/dm-dirty-log.h6
-rw-r--r--include/linux/dm-ioctl.h13
-rw-r--r--include/linux/dm-region-hash.h3
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/dynamic_debug.h13
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/fsl_devices.h11
-rw-r--r--include/linux/ftrace_event.h1
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/hrtimer.h58
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/hw_breakpoint.h40
-rw-r--r--include/linux/i2c.h92
-rw-r--r--include/linux/i2c/tps65010.h19
-rw-r--r--include/linux/i2c/twl.h (renamed from include/linux/i2c/twl4030.h)209
-rw-r--r--include/linux/i8042.h18
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iommu-helper.h3
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/kallsyms.h12
-rw-r--r--include/linux/kernel.h55
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kmsg_dump.h60
-rw-r--r--include/linux/ksm.h96
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/lis3lv02d.h15
-rw-r--r--include/linux/memcontrol.h17
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mfd/88pm8607.h217
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/adp5520.h299
-rw-r--r--include/linux/mfd/ezx-pcap.h3
-rw-r--r--include/linux/mfd/mc13783-private.h208
-rw-r--r--include/linux/mfd/mc13783.h120
-rw-r--r--include/linux/mfd/pcf50633/core.h17
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/wm831x/core.h43
-rw-r--r--include/linux/mfd/wm831x/pdata.h1
-rw-r--r--include/linux/mfd/wm8350/core.h14
-rw-r--r--include/linux/mfd/wm8350/gpio.h18
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/cfi.h9
-rw-r--r--include/linux/mtd/flashchip.h9
-rw-r--r--include/linux/mtd/nand.h97
-rw-r--r--include/linux/mtd/nand_ecc.h10
-rw-r--r--include/linux/mtd/onenand.h23
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h13
-rw-r--r--include/linux/node.h16
-rw-r--r--include/linux/nodemask.h33
-rw-r--r--include/linux/numa.h2
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--include/linux/page_cgroup.h7
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h14
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/perf_event.h43
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/ptrace.h23
-rw-r--r--include/linux/raid/pq.h19
-rw-r--r--include/linux/reiserfs_fs.h35
-rw-r--r--include/linux/rmap.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/rwsem-spinlock.h6
-rw-r--r--include/linux/sched.h41
-rw-r--r--include/linux/sem.h5
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/sm501-regs.h2
-rw-r--r--include/linux/spi/sh_msiof.h10
-rw-r--r--include/linux/spi/xilinx_spi.h20
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--include/linux/string.h10
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/swap.h67
-rw-r--r--include/linux/timb_gpio.h37
-rw-r--r--include/linux/trace_seq.h7
-rw-r--r--include/linux/tracehook.h7
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usbdevice_fs.h26
-rw-r--r--include/linux/videodev2.h123
-rw-r--r--include/linux/vmstat.h12
-rw-r--r--include/linux/vt.h15
113 files changed, 3625 insertions, 1394 deletions
diff --git a/include/linux/aio.h b/include/linux/aio.h
index aea219d7d8d..811dbb36937 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -102,7 +102,6 @@ struct kiocb {
} ki_obj;
__u64 ki_user_data; /* user's data for completion */
- wait_queue_t ki_wait;
loff_t ki_pos;
void *private;
@@ -140,7 +139,6 @@ struct kiocb {
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
(x)->ki_user_data = 0; \
- init_wait((&(x)->ki_wait)); \
} while (0)
#define AIO_RING_MAGIC 0xa10a10a1
@@ -223,8 +221,6 @@ struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
#endif /* CONFIG_AIO */
-#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-
static inline struct kiocb *list_kiocb(struct list_head *h)
{
return list_entry(h, struct kiocb, ki_list);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 57b1846a3c8..3e09b345f4d 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,8 +3,6 @@
#define ATMEL_MCI_MAX_NR_SLOTS 2
-#include <linux/dw_dmac.h>
-
/**
* struct mci_slot_pdata - board-specific per-slot configuration
* @bus_width: Number of data lines wired up the slot
@@ -34,7 +32,7 @@ struct mci_slot_pdata {
* @slot: Per-slot configuration data.
*/
struct mci_platform_data {
- struct dw_dma_slave dma_slave;
+ struct mci_dma_data *dma_slave;
struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
};
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 756d78b8c1c..daf8c480c78 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -42,6 +42,9 @@
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -108,6 +111,14 @@ extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
+extern void bitmap_set(unsigned long *map, int i, int len);
+extern void bitmap_clear(unsigned long *map, int start, int nr);
+extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask);
+
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 47536197ffd..e287863ac05 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
+extern ssize_t arch_cpu_probe(const char *, size_t);
+extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
@@ -115,6 +117,19 @@ extern void put_online_cpus(void);
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+extern void cpu_hotplug_driver_lock(void);
+extern void cpu_hotplug_driver_unlock(void);
+#else
+static inline void cpu_hotplug_driver_lock(void)
+{
+}
+
+static inline void cpu_hotplug_driver_unlock(void)
+{
+}
+#endif
+
#else /* CONFIG_HOTPLUG_CPU */
#define get_online_cpus() do { } while (0)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 789cf5f920c..d77b54733c5 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() cpumask_weight(cpu_online_mask)
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
#define num_online_cpus() 1
#define num_possible_cpus() 1
#define num_present_cpus() 1
+#define num_active_cpus() 1
#define cpu_online(cpu) ((cpu) == 0)
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 00000000000..d5a1d4810b8
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,172 @@
+/*
+ * AMD CS5535/CS5536 definitions
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _CS5535_H
+#define _CS5535_H
+
+/* MSRs */
+#define MSR_GLIU_P2D_RO0 0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG 0x48002001
+#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
+ * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLCP_DOTPLL 0x4C000015
+
+#define MSR_LBAR_SMB 0x5140000B
+#define MSR_LBAR_GPIO 0x5140000C
+#define MSR_LBAR_MFGPT 0x5140000D
+#define MSR_LBAR_ACPI 0x5140000E
+#define MSR_LBAR_PMS 0x5140000F
+
+#define MSR_DIVIL_SOFT_RESET 0x51400017
+
+#define MSR_PIC_YSEL_LOW 0x51400020
+#define MSR_PIC_YSEL_HIGH 0x51400021
+#define MSR_PIC_ZSEL_LOW 0x51400022
+#define MSR_PIC_ZSEL_HIGH 0x51400023
+#define MSR_PIC_IRQM_LPC 0x51400025
+
+#define MSR_MFGPT_IRQ 0x51400028
+#define MSR_MFGPT_NR 0x51400029
+#define MSR_MFGPT_SETUP 0x5140002B
+
+#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
+#define MSR_GX_MSR_PADSEL 0xC0002011
+
+/* resource sizes */
+#define LBAR_GPIO_SIZE 0xFF
+#define LBAR_MFGPT_SIZE 0x40
+#define LBAR_ACPI_SIZE 0x40
+#define LBAR_PMS_SIZE 0x80
+
+/* VSA2 magic values */
+#define VSA_VRC_INDEX 0xAC1C
+#define VSA_VRC_DATA 0xAC1E
+#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
+#define VSA_VR_SIGNATURE 0x0003
+#define VSA_VR_MEM_SIZE 0x0200
+#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
+#define GSW_VSA_SIG 0x534d /* General Software signature */
+
+#include <linux/io.h>
+
+static inline int cs5535_has_vsa2(void)
+{
+ static int has_vsa2 = -1;
+
+ if (has_vsa2 == -1) {
+ uint16_t val;
+
+ /*
+ * The VSA has virtual registers that we can query for a
+ * signature.
+ */
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+ val = inw(VSA_VRC_DATA);
+ has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
+ }
+
+ return has_vsa2;
+}
+
+/* GPIOs */
+#define GPIO_OUTPUT_VAL 0x00
+#define GPIO_OUTPUT_ENABLE 0x04
+#define GPIO_OUTPUT_OPEN_DRAIN 0x08
+#define GPIO_OUTPUT_INVERT 0x0C
+#define GPIO_OUTPUT_AUX1 0x10
+#define GPIO_OUTPUT_AUX2 0x14
+#define GPIO_PULL_UP 0x18
+#define GPIO_PULL_DOWN 0x1C
+#define GPIO_INPUT_ENABLE 0x20
+#define GPIO_INPUT_INVERT 0x24
+#define GPIO_INPUT_FILTER 0x28
+#define GPIO_INPUT_EVENT_COUNT 0x2C
+#define GPIO_READ_BACK 0x30
+#define GPIO_INPUT_AUX1 0x34
+#define GPIO_EVENTS_ENABLE 0x38
+#define GPIO_LOCK_ENABLE 0x3C
+#define GPIO_POSITIVE_EDGE_EN 0x40
+#define GPIO_NEGATIVE_EDGE_EN 0x44
+#define GPIO_POSITIVE_EDGE_STS 0x48
+#define GPIO_NEGATIVE_EDGE_STS 0x4C
+
+#define GPIO_MAP_X 0xE0
+#define GPIO_MAP_Y 0xE4
+#define GPIO_MAP_Z 0xE8
+#define GPIO_MAP_W 0xEC
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg);
+void cs5535_gpio_clear(unsigned offset, unsigned int reg);
+int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+
+/* MFGPTs */
+
+#define MFGPT_MAX_TIMERS 8
+#define MFGPT_TIMER_ANY (-1)
+
+#define MFGPT_DOMAIN_WORKING 1
+#define MFGPT_DOMAIN_STANDBY 2
+#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
+
+#define MFGPT_CMP1 0
+#define MFGPT_CMP2 1
+
+#define MFGPT_EVENT_IRQ 0
+#define MFGPT_EVENT_NMI 1
+#define MFGPT_EVENT_RESET 3
+
+#define MFGPT_REG_CMP1 0
+#define MFGPT_REG_CMP2 2
+#define MFGPT_REG_COUNTER 4
+#define MFGPT_REG_SETUP 6
+
+#define MFGPT_SETUP_CNTEN (1 << 15)
+#define MFGPT_SETUP_CMP2 (1 << 14)
+#define MFGPT_SETUP_CMP1 (1 << 13)
+#define MFGPT_SETUP_SETUP (1 << 12)
+#define MFGPT_SETUP_STOPEN (1 << 11)
+#define MFGPT_SETUP_EXTEN (1 << 10)
+#define MFGPT_SETUP_REVEN (1 << 5)
+#define MFGPT_SETUP_CLKSEL (1 << 4)
+
+struct cs5535_mfgpt_timer;
+
+extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
+ uint16_t reg);
+extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value);
+
+extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable);
+extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
+ int *irq, int enable);
+extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
+ int domain);
+extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
+
+static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
+}
+
+static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
+}
+
+#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index afa36392297..a3d6ee0044f 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -15,7 +15,7 @@
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
-extern unsigned char _ctype[];
+extern const unsigned char _ctype[];
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
@@ -27,6 +27,7 @@ extern unsigned char _ctype[];
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index df7607e6dce..d4c9c0b88ad 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
-int dm_suspended(struct mapped_device *md);
+int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
-struct dm_table *dm_get_table(struct mapped_device *md);
+struct dm_table *dm_get_live_table(struct mapped_device *md);
void dm_table_get(struct dm_table *t);
void dm_table_put(struct dm_table *t);
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t);
/*
* The device must be suspended before calling this method.
+ * Returns the previous table, which the caller must destroy.
*/
-int dm_swap_table(struct mapped_device *md, struct dm_table *t);
+struct dm_table *dm_swap_table(struct mapped_device *md,
+ struct dm_table *t);
/*
* A wrapper around vmalloc.
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 5e8b11d88f6..7084503c340 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -21,6 +21,7 @@ struct dm_dirty_log_type;
struct dm_dirty_log {
struct dm_dirty_log_type *type;
+ int (*flush_callback_fn)(struct dm_target *ti);
void *context;
};
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
* type->constructor/destructor() directly.
*/
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
- struct dm_target *ti,
- unsigned argc, char **argv);
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 2ab84c83c31..aa95508d2f9 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
- * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
@@ -266,9 +266,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 15
+#define DM_VERSION_MINOR 16
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2009-04-01)"
+#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -309,4 +309,11 @@ enum {
*/
#define DM_NOFLUSH_FLAG (1 << 11) /* In */
+/*
+ * If set, any table information returned will relate to the inactive
+ * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
+ * is set before using the data returned.
+ */
+#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index a9e652a4137..9e2a7a401df 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region);
/* Delay bios on regions. */
void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
-void dm_rh_mark_nosync(struct dm_region_hash *rh,
- struct bio *bio, unsigned done, int error);
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
/*
* Region recovery control.
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 2b9f2ac7ed6..78784982b33 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -74,7 +74,7 @@ enum dma_transaction_type {
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
- * @DMA_CTRL_ACK - the descriptor cannot be reused until the client
+ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a0d9422a156..f8c2e176750 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \
- ##__VA_ARGS__); \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name);
{ KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
if (__dynamic_dbg_enabled(descriptor)) \
- dev_printk(KERN_DEBUG, dev, \
- KBUILD_MODNAME ": " fmt, \
- ##__VA_ARGS__); \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod)
return 0;
}
-#define dynamic_pr_debug(fmt, ...) do { } while (0)
-#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, format, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
#endif
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ce4581fbc08..fb737bc19a8 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
static inline char *
efi_guid_unparse(efi_guid_t *guid, char *out)
{
- sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
- guid->b[3], guid->b[2], guid->b[1], guid->b[0],
- guid->b[5], guid->b[4], guid->b[7], guid->b[6],
- guid->b[8], guid->b[9], guid->b[10], guid->b[11],
- guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
+ sprintf(out, "%pUl", guid->b);
return out;
}
diff --git a/include/linux/err.h b/include/linux/err.h
index ec87f3142bf..1b12642636c 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+static inline long IS_ERR_OR_NULL(const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a057f48eb15..b23a7018eb9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2264,9 +2264,11 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
int lock_type);
enum {
- DIO_LOCKING = 1, /* need locking between buffered and direct access */
- DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */
- DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
+ /* need locking between buffered and direct access */
+ DIO_LOCKING = 0x01,
+
+ /* filesystem does not support filling holes */
+ DIO_SKIP_HOLES = 0x02,
};
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
@@ -2275,7 +2277,8 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_LOCKING);
+ nr_segs, get_block, end_io,
+ DIO_LOCKING | DIO_SKIP_HOLES);
}
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
@@ -2284,16 +2287,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_NO_LOCKING);
-}
-
-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
- struct inode *inode, struct block_device *bdev, const struct iovec *iov,
- loff_t offset, unsigned long nr_segs, get_block_t get_block,
- dio_iodone_t end_io)
-{
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
- nr_segs, get_block, end_io, DIO_OWN_LOCKING);
+ nr_segs, get_block, end_io, 0);
}
#endif
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 43fc95d822d..28e33fea510 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -74,7 +74,12 @@ struct spi_device;
struct fsl_spi_platform_data {
u32 initial_spmode; /* initial SPMODE value */
s16 bus_num;
- bool qe_mode;
+ unsigned int flags;
+#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */
+#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */
+#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */
+#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */
+#define SPI_QE (1 << 4) /* SPI unit is in QE block */
/* board specific information */
u16 max_chipselect;
void (*cs_control)(struct spi_device *spi, bool on);
@@ -90,6 +95,10 @@ struct mpc8xx_pcmcia_ops {
* lead to a deep sleep (i.e. power removed from the core,
* instead of just the clock).
*/
+#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND)
int fsl_deep_sleep(void);
+#else
+static inline int fsl_deep_sleep(void) { return 0; }
+#endif
#endif /* _FSL_DEVICE_H_ */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 47bbdf9c38d..38f8d655383 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -57,6 +57,7 @@ struct trace_iterator {
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
+ int leftover;
int cpu;
u64 ts;
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 059bd189d35..4e949a5b5b8 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -99,6 +99,12 @@ static inline int gpio_export_link(struct device *dev, const char *name,
return -EINVAL;
}
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
static inline void gpio_unexport(unsigned gpio)
{
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 9bace4b9f4f..5d86fb2309d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -162,18 +162,23 @@ struct hrtimer_clock_base {
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @hres_active: State of high resolution mode
- * @check_clocks: Indictator, when set evaluate time source and clock
- * event devices whether high resolution mode can be
- * activated.
- * @nr_events: Total number of timer interrupt events
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
*/
struct hrtimer_cpu_base {
- spinlock_t lock;
+ raw_spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
+ int hang_detected;
unsigned long nr_events;
+ unsigned long nr_retries;
+ unsigned long nr_hangs;
+ ktime_t max_hang_time;
#endif
};
@@ -435,47 +440,4 @@ extern u64 ktime_divns(const ktime_t kt, s64 div);
/* Show pending timers: */
extern void sysrq_timer_list_show(void);
-/*
- * Timer-statistics info:
- */
-#ifdef CONFIG_TIMER_STATS
-
-extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
- void *timerf, char *comm,
- unsigned int timer_flag);
-
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
- if (likely(!timer_stats_active))
- return;
- timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
- timer->function, timer->start_comm, 0);
-}
-
-extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
- void *addr);
-
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
- __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
- timer->start_site = NULL;
-}
-#else
-static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
-{
-}
-
-static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
-{
-}
-
-static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 41a59afc70f..78b4bc64c00 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+#endif
+
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index a03daed08c5..41235c93e4e 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -20,19 +20,18 @@ enum {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-/* As it's for in-kernel or ptrace use, we want it to be pinned */
-#define DEFINE_BREAKPOINT_ATTR(name) \
-struct perf_event_attr name = { \
- .type = PERF_TYPE_BREAKPOINT, \
- .size = sizeof(name), \
- .pinned = 1, \
-};
-
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
{
+ memset(attr, 0, sizeof(*attr));
+
attr->type = PERF_TYPE_BREAKPOINT;
attr->size = sizeof(*attr);
+ /*
+ * As it's for in-kernel or ptrace use, we want it to be pinned
+ * and to call its callback every hits.
+ */
attr->pinned = 1;
+ attr->sample_period = 1;
}
static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
@@ -52,27 +51,24 @@ static inline int hw_breakpoint_len(struct perf_event *bp)
extern struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk);
/* FIXME: only change from the attr, and don't unregister */
-extern struct perf_event *
-modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk);
+extern int
+modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
/*
* Kernel breakpoints are not associated with any particular thread.
*/
extern struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
int cpu);
extern struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered);
+ perf_overflow_handler_t triggered);
extern int register_perf_hw_breakpoint(struct perf_event *bp);
extern int __register_perf_hw_breakpoint(struct perf_event *bp);
@@ -93,20 +89,18 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
static inline struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
struct task_struct *tsk) { return NULL; }
-static inline struct perf_event *
+static inline int
modify_user_hw_breakpoint(struct perf_event *bp,
- struct perf_event_attr *attr,
- perf_callback_t triggered,
- struct task_struct *tsk) { return NULL; }
+ struct perf_event_attr *attr) { return -ENOSYS; }
static inline struct perf_event *
register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
- perf_callback_t triggered,
+ perf_overflow_handler_t triggered,
int cpu) { return NULL; }
static inline struct perf_event **
register_wide_hw_breakpoint(struct perf_event_attr *attr,
- perf_callback_t triggered) { return NULL; }
+ perf_overflow_handler_t triggered) { return NULL; }
static inline int
register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline int
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419ab546b26..02fc617782e 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @driver: Device driver model driver
* @id_table: List of I2C devices supported by this driver
* @detect: Callback for device detection
- * @address_data: The I2C addresses to probe (for detect)
+ * @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
*
* The driver.owner field should be set to the module owner of this driver.
@@ -161,8 +161,8 @@ struct i2c_driver {
const struct i2c_device_id *id_table;
/* Device detection callback for automatic device creation */
- int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *);
- const struct i2c_client_address_data *address_data;
+ int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ const unsigned short *address_list;
struct list_head clients;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
-/* i2c_client_address_data is the struct for holding default client
- * addresses for a driver and for the parameters supplied on the
- * command line
- */
-struct i2c_client_address_data {
- const unsigned short *normal_i2c;
-};
-
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
@@ -576,82 +568,4 @@ union i2c_smbus_data {
#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
#define I2C_SMBUS_I2C_BLOCK_DATA 8
-
-#ifdef __KERNEL__
-
-/* These defines are used for probing i2c client addresses */
-/* The length of the option lists */
-#define I2C_CLIENT_MAX_OPTS 48
-
-/* Default fill of many variables */
-#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
-
-/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
- module header */
-
-#define I2C_CLIENT_MODULE_PARM(var,desc) \
- static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
- static unsigned int var##_num; \
- module_param_array(var, short, &var##_num, 0); \
- MODULE_PARM_DESC(var, desc)
-
-#define I2C_CLIENT_INSMOD_COMMON \
-static const struct i2c_client_address_data addr_data = { \
- .normal_i2c = normal_i2c, \
-}
-
-/* These are the ones you want to use in your own drivers. Pick the one
- which matches the number of devices the driver differenciates between. */
-#define I2C_CLIENT_INSMOD \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_1(chip1) \
-enum chips { any_chip, chip1 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
-enum chips { any_chip, chip1, chip2 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
-enum chips { any_chip, chip1, chip2, chip3 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
-enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7 }; \
-I2C_CLIENT_INSMOD_COMMON
-
-#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
-enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
- chip7, chip8 }; \
-I2C_CLIENT_INSMOD_COMMON
-#endif /* __KERNEL__ */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h
index 918c5354d9b..08aa92278d7 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/i2c/tps65010.h
@@ -72,6 +72,21 @@
#define TPS_VDCDC1 0x0c
# define TPS_ENABLE_LP (1 << 3)
#define TPS_VDCDC2 0x0d
+# define TPS_LP_COREOFF (1 << 7)
+# define TPS_VCORE_1_8V (7<<4)
+# define TPS_VCORE_1_5V (6 << 4)
+# define TPS_VCORE_1_4V (5 << 4)
+# define TPS_VCORE_1_3V (4 << 4)
+# define TPS_VCORE_1_2V (3 << 4)
+# define TPS_VCORE_1_1V (2 << 4)
+# define TPS_VCORE_1_0V (1 << 4)
+# define TPS_VCORE_0_85V (0 << 4)
+# define TPS_VCORE_LP_1_2V (3 << 2)
+# define TPS_VCORE_LP_1_1V (2 << 2)
+# define TPS_VCORE_LP_1_0V (1 << 2)
+# define TPS_VCORE_LP_0_85V (0 << 2)
+# define TPS_VIB (1 << 1)
+# define TPS_VCORE_DISCH (1 << 0)
#define TPS_VREGS1 0x0e
# define TPS_LDO2_ENABLE (1 << 7)
# define TPS_LDO2_OFF (1 << 6)
@@ -152,6 +167,10 @@ extern int tps65010_config_vregs1(unsigned value);
*/
extern int tps65013_set_low_pwr(unsigned mode);
+/* tps65010_set_vdcdc2
+ * value to be written to VDCDC2
+ */
+extern int tps65010_config_vdcdc2(unsigned value);
struct i2c_client;
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl.h
index 5306a759cbd..bf1c5be1f5b 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __TWL4030_H_
-#define __TWL4030_H_
+#ifndef __TWL_H_
+#define __TWL_H_
#include <linux/types.h>
#include <linux/input/matrix_keypad.h>
@@ -61,28 +61,112 @@
#define TWL4030_MODULE_PWMA 0x0E
#define TWL4030_MODULE_PWMB 0x0F
+#define TWL5031_MODULE_ACCESSORY 0x10
+#define TWL5031_MODULE_INTERRUPTS 0x11
+
/* Slave 3 (i2c address 0x4b) */
-#define TWL4030_MODULE_BACKUP 0x10
-#define TWL4030_MODULE_INT 0x11
-#define TWL4030_MODULE_PM_MASTER 0x12
-#define TWL4030_MODULE_PM_RECEIVER 0x13
-#define TWL4030_MODULE_RTC 0x14
-#define TWL4030_MODULE_SECURED_REG 0x15
+#define TWL4030_MODULE_BACKUP 0x12
+#define TWL4030_MODULE_INT 0x13
+#define TWL4030_MODULE_PM_MASTER 0x14
+#define TWL4030_MODULE_PM_RECEIVER 0x15
+#define TWL4030_MODULE_RTC 0x16
+#define TWL4030_MODULE_SECURED_REG 0x17
+
+#define TWL_MODULE_USB TWL4030_MODULE_USB
+#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
+#define TWL_MODULE_PIH TWL4030_MODULE_PIH
+#define TWL_MODULE_MADC TWL4030_MODULE_MADC
+#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
+#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
+#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
+#define TWL_MODULE_RTC TWL4030_MODULE_RTC
+
+#define GPIO_INTR_OFFSET 0
+#define KEYPAD_INTR_OFFSET 1
+#define BCI_INTR_OFFSET 2
+#define MADC_INTR_OFFSET 3
+#define USB_INTR_OFFSET 4
+#define BCI_PRES_INTR_OFFSET 9
+#define USB_PRES_INTR_OFFSET 10
+#define RTC_INTR_OFFSET 11
+
+/*
+ * Offset from TWL6030_IRQ_BASE / pdata->irq_base
+ */
+#define PWR_INTR_OFFSET 0
+#define HOTDIE_INTR_OFFSET 12
+#define SMPSLDO_INTR_OFFSET 13
+#define BATDETECT_INTR_OFFSET 14
+#define SIMDETECT_INTR_OFFSET 15
+#define MMCDETECT_INTR_OFFSET 16
+#define GASGAUGE_INTR_OFFSET 17
+#define USBOTG_INTR_OFFSET 4
+#define CHARGER_INTR_OFFSET 2
+#define RSV_INTR_OFFSET 0
+
+/* INT register offsets */
+#define REG_INT_STS_A 0x00
+#define REG_INT_STS_B 0x01
+#define REG_INT_STS_C 0x02
+
+#define REG_INT_MSK_LINE_A 0x03
+#define REG_INT_MSK_LINE_B 0x04
+#define REG_INT_MSK_LINE_C 0x05
+
+#define REG_INT_MSK_STS_A 0x06
+#define REG_INT_MSK_STS_B 0x07
+#define REG_INT_MSK_STS_C 0x08
+
+/* MASK INT REG GROUP A */
+#define TWL6030_PWR_INT_MASK 0x07
+#define TWL6030_RTC_INT_MASK 0x18
+#define TWL6030_HOTDIE_INT_MASK 0x20
+#define TWL6030_SMPSLDOA_INT_MASK 0xC0
+
+/* MASK INT REG GROUP B */
+#define TWL6030_SMPSLDOB_INT_MASK 0x01
+#define TWL6030_BATDETECT_INT_MASK 0x02
+#define TWL6030_SIMDETECT_INT_MASK 0x04
+#define TWL6030_MMCDETECT_INT_MASK 0x08
+#define TWL6030_GPADC_INT_MASK 0x60
+#define TWL6030_GASGAUGE_INT_MASK 0x80
+
+/* MASK INT REG GROUP C */
+#define TWL6030_USBOTG_INT_MASK 0x0F
+#define TWL6030_CHARGER_CTRL_INT_MASK 0x10
+#define TWL6030_CHARGER_FAULT_INT_MASK 0x60
+
+
+#define TWL4030_CLASS_ID 0x4030
+#define TWL6030_CLASS_ID 0x6030
+unsigned int twl_rev(void);
+#define GET_TWL_REV (twl_rev())
+#define TWL_CLASS_IS(class, id) \
+static inline int twl_class_is_ ##class(void) \
+{ \
+ return ((id) == (GET_TWL_REV)) ? 1 : 0; \
+}
+
+TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
+TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
/*
* Read and write single 8-bit registers
*/
-int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
-int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
+int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
/*
* Read and write several 8-bit registers at once.
*
- * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1
+ * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1
* for the value, and populate your data starting at offset 1.
*/
-int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
-int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
/*----------------------------------------------------------------------*/
@@ -221,6 +305,38 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
/*----------------------------------------------------------------------*/
+/*
+ * Accessory Interrupts
+ */
+#define TWL5031_ACIIMR_LSB 0x05
+#define TWL5031_ACIIMR_MSB 0x06
+#define TWL5031_ACIIDR_LSB 0x07
+#define TWL5031_ACIIDR_MSB 0x08
+#define TWL5031_ACCISR1 0x0F
+#define TWL5031_ACCIMR1 0x10
+#define TWL5031_ACCISR2 0x11
+#define TWL5031_ACCIMR2 0x12
+#define TWL5031_ACCSIR 0x13
+#define TWL5031_ACCEDR1 0x14
+#define TWL5031_ACCSIHCTRL 0x15
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery Charger Controller
+ */
+
+#define TWL5031_INTERRUPTS_BCIISR1 0x0
+#define TWL5031_INTERRUPTS_BCIIMR1 0x1
+#define TWL5031_INTERRUPTS_BCIISR2 0x2
+#define TWL5031_INTERRUPTS_BCIIMR2 0x3
+#define TWL5031_INTERRUPTS_BCISIR 0x4
+#define TWL5031_INTERRUPTS_BCIEDR1 0x5
+#define TWL5031_INTERRUPTS_BCIEDR2 0x6
+#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
/* Power bus message definitions */
/* The TWL4030/5030 splits its power-management resources (the various
@@ -250,6 +366,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define RES_TYPE_ALL 0x7
+/* Resource states */
#define RES_STATE_WRST 0xF
#define RES_STATE_ACTIVE 0xE
#define RES_STATE_SLEEP 0x8
@@ -310,8 +427,18 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
#define MSG_SINGULAR(devgrp, id, state) \
((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
+#define MSG_BROADCAST_ALL(devgrp, state) \
+ ((devgrp) << 5 | (state))
+
+#define MSG_BROADCAST_REF MSG_BROADCAST_ALL
+#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL
+#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL
/*----------------------------------------------------------------------*/
+struct twl4030_clock_init_data {
+ bool ck32k_lowpwr_enable;
+};
+
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;
@@ -391,12 +518,15 @@ struct twl4030_resconfig {
u8 devgroup; /* Processor group that Power resource belongs to */
u8 type; /* Power resource addressed, 6 / broadcast message */
u8 type2; /* Power resource addressed, 3 / broadcast message */
+ u8 remap_off; /* off state remapping */
+ u8 remap_sleep; /* sleep state remapping */
};
struct twl4030_power_data {
struct twl4030_script **scripts;
unsigned num;
struct twl4030_resconfig *resource_config;
+#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
};
extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
@@ -421,6 +551,7 @@ struct twl4030_codec_data {
struct twl4030_platform_data {
unsigned irq_base, irq_end;
+ struct twl4030_clock_init_data *clock;
struct twl4030_bci_platform_data *bci;
struct twl4030_gpio_platform_data *gpio;
struct twl4030_madc_platform_data *madc;
@@ -429,19 +560,31 @@ struct twl4030_platform_data {
struct twl4030_power_data *power;
struct twl4030_codec_data *codec;
- /* LDO regulators */
+ /* Common LDO regulators for TWL4030/TWL6030 */
struct regulator_init_data *vdac;
+ struct regulator_init_data *vaux1;
+ struct regulator_init_data *vaux2;
+ struct regulator_init_data *vaux3;
+ /* TWL4030 LDO regulators */
struct regulator_init_data *vpll1;
struct regulator_init_data *vpll2;
struct regulator_init_data *vmmc1;
struct regulator_init_data *vmmc2;
struct regulator_init_data *vsim;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
struct regulator_init_data *vaux4;
-
- /* REVISIT more to come ... _nothing_ should be hard-wired */
+ struct regulator_init_data *vio;
+ struct regulator_init_data *vdd1;
+ struct regulator_init_data *vdd2;
+ struct regulator_init_data *vintana1;
+ struct regulator_init_data *vintana2;
+ struct regulator_init_data *vintdig;
+ /* TWL6030 LDO regulators */
+ struct regulator_init_data *vmmc;
+ struct regulator_init_data *vpp;
+ struct regulator_init_data *vusim;
+ struct regulator_init_data *vana;
+ struct regulator_init_data *vcxio;
+ struct regulator_init_data *vusb;
};
/*----------------------------------------------------------------------*/
@@ -473,6 +616,7 @@ int twl4030_sih_setup(int module);
* VIO is generally fixed.
*/
+/* TWL4030 SMPS/LDO's */
/* EXTERNAL dc-to-dc buck converters */
#define TWL4030_REG_VDD1 0
#define TWL4030_REG_VDD2 1
@@ -499,4 +643,31 @@ int twl4030_sih_setup(int module);
#define TWL4030_REG_VUSB1V8 18
#define TWL4030_REG_VUSB3V1 19
+/* TWL6030 SMPS/LDO's */
+/* EXTERNAL dc-to-dc buck convertor contollable via SR */
+#define TWL6030_REG_VDD1 30
+#define TWL6030_REG_VDD2 31
+#define TWL6030_REG_VDD3 32
+
+/* Non SR compliant dc-to-dc buck convertors */
+#define TWL6030_REG_VMEM 33
+#define TWL6030_REG_V2V1 34
+#define TWL6030_REG_V1V29 35
+#define TWL6030_REG_V1V8 36
+
+/* EXTERNAL LDOs */
+#define TWL6030_REG_VAUX1_6030 37
+#define TWL6030_REG_VAUX2_6030 38
+#define TWL6030_REG_VAUX3_6030 39
+#define TWL6030_REG_VMMC 40
+#define TWL6030_REG_VPP 41
+#define TWL6030_REG_VUSIM 42
+#define TWL6030_REG_VANA 43
+#define TWL6030_REG_VCXIO 44
+#define TWL6030_REG_VDAC 45
+#define TWL6030_REG_VUSB 46
+
+/* INTERNAL LDOs */
+#define TWL6030_REG_VRTC 47
+
#endif /* End of __TWL4030_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 60c3360ef6a..9bf6870ee5f 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -39,6 +39,10 @@ void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
bool i8042_check_port_owner(const struct serio *);
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
#else
@@ -52,7 +56,7 @@ void i8042_unlock_chip(void)
int i8042_command(unsigned char *param, int command)
{
- return -ENOSYS;
+ return -ENODEV;
}
bool i8042_check_port_owner(const struct serio *serio)
@@ -60,6 +64,18 @@ bool i8042_check_port_owner(const struct serio *serio)
return false;
}
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
#endif
#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8d10aa7fd4c..5ed8b9c5035 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -111,6 +111,12 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
+#ifdef CONFIG_FS_JOURNAL_INFO
+#define INIT_JOURNAL_INFO .journal_info = NULL,
+#else
+#define INIT_JOURNAL_INFO
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,10 +168,9 @@ extern struct cred init_cred;
.signal = {{0}}}, \
.blocked = {{0}}, \
.alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
@@ -173,6 +178,7 @@ extern struct cred init_cred;
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
+ INIT_JOURNAL_INFO \
INIT_IDS \
INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 4f0a72a9740..9310c699a37 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -332,6 +332,7 @@ struct intel_iommu {
#ifdef CONFIG_INTR_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
#endif
+ int node;
};
static inline void __iommu_flush_cache(
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 3b068e5b567..64d1b638745 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -14,14 +14,11 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long boundary_size);
-extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
unsigned long boundary_size,
unsigned long align_mask);
-extern void iommu_area_free(unsigned long *map, unsigned long start,
- unsigned int nr);
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
unsigned long io_page_size);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 83aa81297ea..7129504e053 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -126,11 +126,11 @@ extern int allocate_resource(struct resource *root, struct resource *new,
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
-static inline resource_size_t resource_size(struct resource *res)
+static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
-static inline unsigned long resource_type(struct resource *res)
+static inline unsigned long resource_type(const struct resource *res)
{
return res->flags & IORESOURCE_TYPE_BITS;
}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e408722a84c..07baa38bce3 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -87,7 +87,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
/* default values */
#define DFLT_QUEUESMAX 256 /* max number of message queues */
#define DFLT_MSGMAX 10 /* max number of messages in each queue */
-#define HARD_MSGMAX (131072/sizeof(void *))
+#define HARD_MSGMAX (32768*sizeof(void *)/4)
#define DFLT_MSGSIZEMAX 8192 /* max message size */
#else
static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a..451481c082b 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
- spinlock_t lock;
+ raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
unsigned int node;
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 792274269f2..d8e9b3d1c23 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
__builtin_extract_return_addr((void *)addr));
}
-/*
- * Pretty-print a function pointer. This function is deprecated.
- * Please use the "%pF" vsprintf format instead.
- */
-static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
-{
-#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
- addr = *(void **)addr;
-#endif
- print_symbol(fmt, (unsigned long)addr);
-}
-
static inline void print_ip_sym(unsigned long ip)
{
printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fa4c590cf1..4d9c916d06d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -251,10 +251,10 @@ extern int printk_delay_msec;
* Print a one-time message (analogous to WARN_ONCE() et al):
*/
#define printk_once(x...) ({ \
- static bool __print_once = true; \
+ static bool __print_once; \
\
- if (__print_once) { \
- __print_once = false; \
+ if (!__print_once) { \
+ __print_once = true; \
printk(x); \
} \
})
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#elif defined(CONFIG_DYNAMIC_DEBUG)
/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
-#define pr_debug(fmt, ...) do { \
- dynamic_pr_debug(fmt, ##__VA_ARGS__); \
- } while (0)
+#define pr_debug(fmt, ...) \
+ dynamic_pr_debug(fmt, ##__VA_ARGS__)
#else
#define pr_debug(fmt, ...) \
({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#endif
/*
+ * ratelimited messages with local ratelimit_state,
+ * no local ratelimit_state used in the !PRINTK case
+ */
+#ifdef CONFIG_PRINTK
+#define printk_ratelimited(fmt, ...) ({ \
+ static struct ratelimit_state _rs = { \
+ .interval = DEFAULT_RATELIMIT_INTERVAL, \
+ .burst = DEFAULT_RATELIMIT_BURST, \
+ }; \
+ \
+ if (!__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+#else
+/* No effect, but we still get type checking even in the !PRINTK case: */
+#define printk_ratelimited printk
+#endif
+
+#define pr_emerg_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_ratelimited, don't do that... */
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_ratelimited(fmt, ...) \
+ ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
+ ##__VA_ARGS__); 0; })
+#endif
+
+/*
* General tracing related utility functions - trace_printk(),
* tracing_on/tracing_off and tracing_start()/tracing_stop
*
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index adc34f2c6ef..c356b6914ff 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -206,6 +206,8 @@ extern size_t vmcoreinfo_max_size;
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
+int crash_shrink_memory(unsigned long new_size);
+size_t crash_get_memory_size(void);
#else /* !CONFIG_KEXEC */
struct pt_regs;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
new file mode 100644
index 00000000000..e32aa268efa
--- /dev/null
+++ b/include/linux/kmsg_dump.h
@@ -0,0 +1,60 @@
+/*
+ * linux/include/kmsg_dump.h
+ *
+ * Copyright (C) 2009 Net Insight AB
+ *
+ * Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+#ifndef _LINUX_KMSG_DUMP_H
+#define _LINUX_KMSG_DUMP_H
+
+#include <linux/list.h>
+
+enum kmsg_dump_reason {
+ KMSG_DUMP_OOPS,
+ KMSG_DUMP_PANIC,
+};
+
+/**
+ * struct kmsg_dumper - kernel crash message dumper structure
+ * @dump: The callback which gets called on crashes. The buffer is passed
+ * as two sections, where s1 (length l1) contains the older
+ * messages and s2 (length l2) contains the newer.
+ * @list: Entry in the dumper list (private)
+ * @registered: Flag that specifies if this is already registered
+ */
+struct kmsg_dumper {
+ void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
+ const char *s1, unsigned long l1,
+ const char *s2, unsigned long l2);
+ struct list_head list;
+ int registered;
+};
+
+#ifdef CONFIG_PRINTK
+void kmsg_dump(enum kmsg_dump_reason reason);
+
+int kmsg_dump_register(struct kmsg_dumper *dumper);
+
+int kmsg_dump_unregister(struct kmsg_dumper *dumper);
+#else
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+}
+
+static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+
+static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a485c14ecd5..43bdab769fc 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -9,8 +9,12 @@
#include <linux/bitops.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/vmstat.h>
+
+struct stable_node;
+struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -34,46 +38,110 @@ static inline void ksm_exit(struct mm_struct *mm)
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
* which KSM maps into multiple mms, wherever identical anonymous page content
- * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
*/
static inline int PageKsm(struct page *page)
{
- return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+
+static inline struct stable_node *page_stable_node(struct page *page)
+{
+ return PageKsm(page) ? page_rmapping(page) : NULL;
+}
+
+static inline void set_page_stable_node(struct page *page,
+ struct stable_node *stable_node)
+{
+ page->mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
}
/*
- * But we have to avoid the checking which page_add_anon_rmap() performs.
+ * When do_swap_page() first faults in from swap what used to be a KSM page,
+ * no problem, it will be assigned to this vma's anon_vma; but thereafter,
+ * it might be faulted into a different anon_vma (or perhaps to a different
+ * offset in the same anon_vma). do_swap_page() cannot do all the locking
+ * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
+ * a copy, and leave remerging the pages to a later pass of ksmd.
+ *
+ * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
+ * but what if the vma was unmerged while the page was swapped out?
*/
-static inline void page_add_ksm_rmap(struct page *page)
+struct page *ksm_does_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- page->mapping = (void *) PAGE_MAPPING_ANON;
- __inc_zone_page_state(page, NR_ANON_PAGES);
- }
+ struct anon_vma *anon_vma = page_anon_vma(page);
+
+ if (!anon_vma ||
+ (anon_vma == vma->anon_vma &&
+ page->index == linear_page_index(vma, address)))
+ return page;
+
+ return ksm_does_need_to_copy(page, vma, address);
}
+
+int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags);
+int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
+int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+
#else /* !CONFIG_KSM */
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+
+static inline int PageKsm(struct page *page)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
{
return 0;
}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return page;
+}
+
+static inline int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags)
{
return 0;
}
-static inline void ksm_exit(struct mm_struct *mm)
+static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
+ return 0;
}
-static inline int PageKsm(struct page *page)
+static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
+ struct vm_area_struct *, unsigned long, void *), void *arg)
{
return 0;
}
-/* No stub required for page_add_ksm_rmap(page) */
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
+#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
-#endif
+#endif /* __LINUX_KSM_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 2d241da0723..a24de0b1858 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -496,6 +496,7 @@ struct kvm_ioeventfd {
#define KVM_CAP_VCPU_EVENTS 41
#endif
#define KVM_CAP_S390_PSW 42
+#define KVM_CAP_PPC_SEGSTATE 43
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index 3cc2f2c53e4..f1ca0dcc162 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data {
#define LIS3_WAKEUP_Z_HI (1 << 5)
unsigned char wakeup_flags;
unsigned char wakeup_thresh;
+#define LIS3_NO_MAP 0
+#define LIS3_DEV_X 1
+#define LIS3_DEV_Y 2
+#define LIS3_DEV_Z 3
+#define LIS3_INV_DEV_X -1
+#define LIS3_INV_DEV_Y -2
+#define LIS3_INV_DEV_Z -3
+ s8 axis_x;
+ s8 axis_y;
+ s8 axis_z;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+ /* Limits for selftest are specified in chip data sheet */
+ s16 st_min_limits[3]; /* min pass limit x, y, z */
+ s16 st_max_limits[3]; /* max pass limit x, y, z */
};
#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bf9213b2db8..0b46c2068b9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -54,6 +54,11 @@ extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
extern void mem_cgroup_del_lru(struct page *page);
extern void mem_cgroup_move_lists(struct page *page,
enum lru_list from, enum lru_list to);
+
+/* For coalescing uncharge for reducing memcg' overhead*/
+extern void mem_cgroup_uncharge_start(void);
+extern void mem_cgroup_uncharge_end(void);
+
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern int mem_cgroup_shmem_charge_fallback(struct page *page,
@@ -117,7 +122,7 @@ static inline bool mem_cgroup_disabled(void)
}
extern bool mem_cgroup_oom_called(struct task_struct *task);
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val);
+void mem_cgroup_update_file_mapped(struct page *page, int val);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask, int nid,
int zid);
@@ -151,6 +156,14 @@ static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
{
}
+static inline void mem_cgroup_uncharge_start(void)
+{
+}
+
+static inline void mem_cgroup_uncharge_end(void)
+{
+}
+
static inline void mem_cgroup_uncharge_page(struct page *page)
{
}
@@ -274,7 +287,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void mem_cgroup_update_mapped_file_stat(struct page *page,
+static inline void mem_cgroup_update_file_mapped(struct page *page,
int val)
{
}
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fed969281a4..35b07b773e6 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -69,7 +69,6 @@ extern void online_page(struct page *page);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
-extern int offline_pages(unsigned long, unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 085c903fe0f..1cc966cd3e5 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
+extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern unsigned slab_node(struct mempolicy *policy);
extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
return node_zonelist(0, gfp_flags);
}
+static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
+
static inline int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes,
const nodemask_t *to_nodes, int flags)
diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h
new file mode 100644
index 00000000000..f41b428d2ce
--- /dev/null
+++ b/include/linux/mfd/88pm8607.h
@@ -0,0 +1,217 @@
+/*
+ * Marvell 88PM8607 Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM8607_H
+#define __LINUX_MFD_88PM8607_H
+
+enum {
+ PM8607_ID_BUCK1 = 0,
+ PM8607_ID_BUCK2,
+ PM8607_ID_BUCK3,
+
+ PM8607_ID_LDO1,
+ PM8607_ID_LDO2,
+ PM8607_ID_LDO3,
+ PM8607_ID_LDO4,
+ PM8607_ID_LDO5,
+ PM8607_ID_LDO6,
+ PM8607_ID_LDO7,
+ PM8607_ID_LDO8,
+ PM8607_ID_LDO9,
+ PM8607_ID_LDO10,
+ PM8607_ID_LDO12,
+ PM8607_ID_LDO14,
+
+ PM8607_ID_RG_MAX,
+};
+
+#define CHIP_ID (0x40)
+#define CHIP_ID_MASK (0xF8)
+
+/* Interrupt Registers */
+#define PM8607_STATUS_1 (0x01)
+#define PM8607_STATUS_2 (0x02)
+#define PM8607_INT_STATUS1 (0x03)
+#define PM8607_INT_STATUS2 (0x04)
+#define PM8607_INT_STATUS3 (0x05)
+#define PM8607_INT_MASK_1 (0x06)
+#define PM8607_INT_MASK_2 (0x07)
+#define PM8607_INT_MASK_3 (0x08)
+
+/* Regulator Control Registers */
+#define PM8607_LDO1 (0x10)
+#define PM8607_LDO2 (0x11)
+#define PM8607_LDO3 (0x12)
+#define PM8607_LDO4 (0x13)
+#define PM8607_LDO5 (0x14)
+#define PM8607_LDO6 (0x15)
+#define PM8607_LDO7 (0x16)
+#define PM8607_LDO8 (0x17)
+#define PM8607_LDO9 (0x18)
+#define PM8607_LDO10 (0x19)
+#define PM8607_LDO12 (0x1A)
+#define PM8607_LDO14 (0x1B)
+#define PM8607_SLEEP_MODE1 (0x1C)
+#define PM8607_SLEEP_MODE2 (0x1D)
+#define PM8607_SLEEP_MODE3 (0x1E)
+#define PM8607_SLEEP_MODE4 (0x1F)
+#define PM8607_GO (0x20)
+#define PM8607_SLEEP_BUCK1 (0x21)
+#define PM8607_SLEEP_BUCK2 (0x22)
+#define PM8607_SLEEP_BUCK3 (0x23)
+#define PM8607_BUCK1 (0x24)
+#define PM8607_BUCK2 (0x25)
+#define PM8607_BUCK3 (0x26)
+#define PM8607_BUCK_CONTROLS (0x27)
+#define PM8607_SUPPLIES_EN11 (0x2B)
+#define PM8607_SUPPLIES_EN12 (0x2C)
+#define PM8607_GROUP1 (0x2D)
+#define PM8607_GROUP2 (0x2E)
+#define PM8607_GROUP3 (0x2F)
+#define PM8607_GROUP4 (0x30)
+#define PM8607_GROUP5 (0x31)
+#define PM8607_GROUP6 (0x32)
+#define PM8607_SUPPLIES_EN21 (0x33)
+#define PM8607_SUPPLIES_EN22 (0x34)
+
+/* RTC Control Registers */
+#define PM8607_RTC1 (0xA0)
+#define PM8607_RTC_COUNTER1 (0xA1)
+#define PM8607_RTC_COUNTER2 (0xA2)
+#define PM8607_RTC_COUNTER3 (0xA3)
+#define PM8607_RTC_COUNTER4 (0xA4)
+#define PM8607_RTC_EXPIRE1 (0xA5)
+#define PM8607_RTC_EXPIRE2 (0xA6)
+#define PM8607_RTC_EXPIRE3 (0xA7)
+#define PM8607_RTC_EXPIRE4 (0xA8)
+#define PM8607_RTC_TRIM1 (0xA9)
+#define PM8607_RTC_TRIM2 (0xAA)
+#define PM8607_RTC_TRIM3 (0xAB)
+#define PM8607_RTC_TRIM4 (0xAC)
+#define PM8607_RTC_MISC1 (0xAD)
+#define PM8607_RTC_MISC2 (0xAE)
+#define PM8607_RTC_MISC3 (0xAF)
+
+/* Misc Registers */
+#define PM8607_CHIP_ID (0x00)
+#define PM8607_LDO1 (0x10)
+#define PM8607_DVC3 (0x26)
+#define PM8607_MISC1 (0x40)
+
+/* bit definitions for PM8607 events */
+#define PM8607_EVENT_ONKEY (1 << 0)
+#define PM8607_EVENT_EXTON (1 << 1)
+#define PM8607_EVENT_CHG (1 << 2)
+#define PM8607_EVENT_BAT (1 << 3)
+#define PM8607_EVENT_RTC (1 << 4)
+#define PM8607_EVENT_CC (1 << 5)
+#define PM8607_EVENT_VBAT (1 << 8)
+#define PM8607_EVENT_VCHG (1 << 9)
+#define PM8607_EVENT_VSYS (1 << 10)
+#define PM8607_EVENT_TINT (1 << 11)
+#define PM8607_EVENT_GPADC0 (1 << 12)
+#define PM8607_EVENT_GPADC1 (1 << 13)
+#define PM8607_EVENT_GPADC2 (1 << 14)
+#define PM8607_EVENT_GPADC3 (1 << 15)
+#define PM8607_EVENT_AUDIO_SHORT (1 << 16)
+#define PM8607_EVENT_PEN (1 << 17)
+#define PM8607_EVENT_HEADSET (1 << 18)
+#define PM8607_EVENT_HOOK (1 << 19)
+#define PM8607_EVENT_MICIN (1 << 20)
+#define PM8607_EVENT_CHG_TIMEOUT (1 << 21)
+#define PM8607_EVENT_CHG_DONE (1 << 22)
+#define PM8607_EVENT_CHG_FAULT (1 << 23)
+
+/* bit definitions of Status Query Interface */
+#define PM8607_STATUS_CC (1 << 3)
+#define PM8607_STATUS_PEN (1 << 4)
+#define PM8607_STATUS_HEADSET (1 << 5)
+#define PM8607_STATUS_HOOK (1 << 6)
+#define PM8607_STATUS_MICIN (1 << 7)
+#define PM8607_STATUS_ONKEY (1 << 8)
+#define PM8607_STATUS_EXTON (1 << 9)
+#define PM8607_STATUS_CHG (1 << 10)
+#define PM8607_STATUS_BAT (1 << 11)
+#define PM8607_STATUS_VBUS (1 << 12)
+#define PM8607_STATUS_OV (1 << 13)
+
+/* bit definitions of BUCK3 */
+#define PM8607_BUCK3_DOUBLE (1 << 6)
+
+/* bit definitions of Misc1 */
+#define PM8607_MISC1_PI2C (1 << 0)
+
+/* Interrupt Number in 88PM8607 */
+enum {
+ PM8607_IRQ_ONKEY = 0,
+ PM8607_IRQ_EXTON,
+ PM8607_IRQ_CHG,
+ PM8607_IRQ_BAT,
+ PM8607_IRQ_RTC,
+ PM8607_IRQ_VBAT = 8,
+ PM8607_IRQ_VCHG,
+ PM8607_IRQ_VSYS,
+ PM8607_IRQ_TINT,
+ PM8607_IRQ_GPADC0,
+ PM8607_IRQ_GPADC1,
+ PM8607_IRQ_GPADC2,
+ PM8607_IRQ_GPADC3,
+ PM8607_IRQ_AUDIO_SHORT = 16,
+ PM8607_IRQ_PEN,
+ PM8607_IRQ_HEADSET,
+ PM8607_IRQ_HOOK,
+ PM8607_IRQ_MICIN,
+ PM8607_IRQ_CHG_FAIL,
+ PM8607_IRQ_CHG_DONE,
+ PM8607_IRQ_CHG_FAULT,
+};
+
+enum {
+ PM8607_CHIP_A0 = 0x40,
+ PM8607_CHIP_A1 = 0x41,
+ PM8607_CHIP_B0 = 0x48,
+};
+
+
+struct pm8607_chip {
+ struct device *dev;
+ struct mutex io_lock;
+ struct i2c_client *client;
+
+ int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest);
+ int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src);
+
+ int buck3_double; /* DVC ramp slope double */
+ unsigned char chip_id;
+
+};
+
+#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
+
+enum {
+ GI2C_PORT = 0,
+ PI2C_PORT,
+};
+
+struct pm8607_platform_data {
+ int i2c_port; /* Controlled by GI2C or PI2C */
+ struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
+};
+
+extern int pm8607_reg_read(struct pm8607_chip *, int);
+extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char);
+extern int pm8607_bulk_read(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_bulk_write(struct pm8607_chip *, int, int,
+ unsigned char *);
+extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char,
+ unsigned char);
+#endif /* __LINUX_MFD_88PM8607_H */
diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h
new file mode 100644
index 00000000000..a42a7033ae5
--- /dev/null
+++ b/include/linux/mfd/ab4500.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2009 ST-Ericsson
+ *
+ * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * AB4500 device core funtions, for client access
+ */
+#ifndef MFD_AB4500_H
+#define MFD_AB4500_H
+
+#include <linux/device.h>
+
+/*
+ * AB4500 bank addresses
+ */
+#define AB4500_SYS_CTRL1_BLOCK 0x1
+#define AB4500_SYS_CTRL2_BLOCK 0x2
+#define AB4500_REGU_CTRL1 0x3
+#define AB4500_REGU_CTRL2 0x4
+#define AB4500_USB 0x5
+#define AB4500_TVOUT 0x6
+#define AB4500_DBI 0x7
+#define AB4500_ECI_AV_ACC 0x8
+#define AB4500_RESERVED 0x9
+#define AB4500_GPADC 0xA
+#define AB4500_CHARGER 0xB
+#define AB4500_GAS_GAUGE 0xC
+#define AB4500_AUDIO 0xD
+#define AB4500_INTERRUPT 0xE
+#define AB4500_RTC 0xF
+#define AB4500_MISC 0x10
+#define AB4500_DEBUG 0x12
+#define AB4500_PROD_TEST 0x13
+#define AB4500_OTP_EMUL 0x15
+
+/*
+ * System control 1 register offsets.
+ * Bank = 0x01
+ */
+#define AB4500_TURNON_STAT_REG 0x0100
+#define AB4500_RESET_STAT_REG 0x0101
+#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102
+
+#define AB4500_FSM_STAT1_REG 0x0140
+#define AB4500_FSM_STAT2_REG 0x0141
+#define AB4500_SYSCLK_REQ_STAT_REG 0x0142
+#define AB4500_USB_STAT1_REG 0x0143
+#define AB4500_USB_STAT2_REG 0x0144
+#define AB4500_STATUS_SPARE1_REG 0x0145
+#define AB4500_STATUS_SPARE2_REG 0x0146
+
+#define AB4500_CTRL1_REG 0x0180
+#define AB4500_CTRL2_REG 0x0181
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB4500_CTRL3_REG 0x0200
+#define AB4500_MAIN_WDOG_CTRL_REG 0x0201
+#define AB4500_MAIN_WDOG_TIMER_REG 0x0202
+#define AB4500_LOW_BAT_REG 0x0203
+#define AB4500_BATT_OK_REG 0x0204
+#define AB4500_SYSCLK_TIMER_REG 0x0205
+#define AB4500_SMPSCLK_CTRL_REG 0x0206
+#define AB4500_SMPSCLK_SEL1_REG 0x0207
+#define AB4500_SMPSCLK_SEL2_REG 0x0208
+#define AB4500_SMPSCLK_SEL3_REG 0x0209
+#define AB4500_SYSULPCLK_CONF_REG 0x020A
+#define AB4500_SYSULPCLK_CTRL1_REG 0x020B
+#define AB4500_SYSCLK_CTRL_REG 0x020C
+#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D
+#define AB4500_SYSCLK_REQ_VALID_REG 0x020E
+#define AB4500_SYSCTRL_SPARE_REG 0x020F
+#define AB4500_PAD_CONF_REG 0x0210
+
+/*
+ * Regu control1 register offsets
+ * Bank = 0x03
+ */
+#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300
+#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301
+#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302
+#define AB4500_REGU_REQ_CTRL1_REG 0x0303
+#define AB4500_REGU_REQ_CTRL2_REG 0x0304
+#define AB4500_REGU_REQ_CTRL3_REG 0x0305
+#define AB4500_REGU_REQ_CTRL4_REG 0x0306
+#define AB4500_REGU_MISC1_REG 0x0380
+#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381
+#define AB4500_REGU_VUSB_CTRL_REG 0x0382
+#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383
+#define AB4500_REGU_CTRL1_SPARE_REG 0x0384
+
+/*
+ * Regu control2 Vmod register offsets
+ */
+#define AB4500_REGU_VMOD_REGU_REG 0x0440
+#define AB4500_REGU_VMOD_SEL1_REG 0x0441
+#define AB4500_REGU_VMOD_SEL2_REG 0x0442
+#define AB4500_REGU_CTRL_DISCH_REG 0x0443
+#define AB4500_REGU_CTRL_DISCH2_REG 0x0444
+
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB4500_USB_LINE_STAT_REG 0x0580
+#define AB4500_USB_LINE_CTRL1_REG 0x0581
+#define AB4500_USB_LINE_CTRL2_REG 0x0582
+#define AB4500_USB_LINE_CTRL3_REG 0x0583
+#define AB4500_USB_LINE_CTRL4_REG 0x0584
+#define AB4500_USB_LINE_CTRL5_REG 0x0585
+#define AB4500_USB_OTG_CTRL_REG 0x0587
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_OTG_STAT_REG 0x0588
+#define AB4500_USB_CTRL_SPARE_REG 0x0589
+#define AB4500_USB_PHY_CTRL_REG 0x058A
+
+/*
+ * TVOUT / CTRL register offsets
+ * Bank : 0x06
+ */
+#define AB4500_TVOUT_CTRL_REG 0x0680
+
+/*
+ * DBI register offsets
+ * Bank : 0x07
+ */
+#define AB4500_DBI_REG1_REG 0x0700
+#define AB4500_DBI_REG2_REG 0x0701
+
+/*
+ * ECI regsiter offsets
+ * Bank : 0x08
+ */
+#define AB4500_ECI_CTRL_REG 0x0800
+#define AB4500_ECI_HOOKLEVEL_REG 0x0801
+#define AB4500_ECI_DATAOUT_REG 0x0802
+#define AB4500_ECI_DATAIN_REG 0x0803
+
+/*
+ * AV Connector register offsets
+ * Bank : 0x08
+ */
+#define AB4500_AV_CONN_REG 0x0840
+
+/*
+ * Accessory detection register offsets
+ * Bank : 0x08
+ */
+#define AB4500_ACC_DET_DB1_REG 0x0880
+#define AB4500_ACC_DET_DB2_REG 0x0881
+
+/*
+ * GPADC register offsets
+ * Bank : 0x0A
+ */
+#define AB4500_GPADC_CTRL1_REG 0x0A00
+#define AB4500_GPADC_CTRL2_REG 0x0A01
+#define AB4500_GPADC_CTRL3_REG 0x0A02
+#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03
+#define AB4500_GPADC_STAT_REG 0x0A04
+#define AB4500_GPADC_MANDATAL_REG 0x0A05
+#define AB4500_GPADC_MANDATAH_REG 0x0A06
+#define AB4500_GPADC_AUTODATAL_REG 0x0A07
+#define AB4500_GPADC_AUTODATAH_REG 0x0A08
+#define AB4500_GPADC_MUX_CTRL_REG 0x0A09
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_STATUS1_REG 0x0B00
+#define AB4500_CH_STATUS2_REG 0x0B01
+#define AB4500_CH_USBCH_STAT1_REG 0x0B02
+#define AB4500_CH_USBCH_STAT2_REG 0x0B03
+#define AB4500_CH_FSM_STAT_REG 0x0B04
+#define AB4500_CH_STAT_REG 0x0B05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_CH_VOLT_LVL_REG 0x0B40
+
+/*
+ * Charger / main control register offfsets
+ * Bank : 0x0B
+ */
+#define AB4500_MCH_CTRL1 0x0B80
+#define AB4500_MCH_CTRL2 0x0B81
+#define AB4500_MCH_IPT_CURLVL_REG 0x0B82
+#define AB4500_CH_WD_REG 0x0B83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB4500_USBCH_CTRL1_REG 0x0BC0
+#define AB4500_USBCH_CTRL2_REG 0x0BC1
+#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2
+
+/*
+ * RTC bank register offsets
+ * Bank : 0xF
+ */
+#define AB4500_RTC_SOFF_STAT_REG 0x0F00
+#define AB4500_RTC_CC_CONF_REG 0x0F01
+#define AB4500_RTC_READ_REQ_REG 0x0F02
+#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03
+#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04
+#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05
+#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06
+#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07
+#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08
+#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09
+#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A
+#define AB4500_RTC_STAT_REG 0x0F0B
+#define AB4500_RTC_BKUP_CHG_REG 0x0F0C
+#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D
+#define AB4500_RTC_CALIB_REG 0x0F0E
+#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F
+
+/*
+ * PWM Out generators
+ * Bank: 0x10
+ */
+#define AB4500_PWM_OUT_CTRL1_REG 0x1060
+#define AB4500_PWM_OUT_CTRL2_REG 0x1061
+#define AB4500_PWM_OUT_CTRL3_REG 0x1062
+#define AB4500_PWM_OUT_CTRL4_REG 0x1063
+#define AB4500_PWM_OUT_CTRL5_REG 0x1064
+#define AB4500_PWM_OUT_CTRL6_REG 0x1065
+#define AB4500_PWM_OUT_CTRL7_REG 0x1066
+
+#define AB4500_I2C_PAD_CTRL_REG 0x1067
+#define AB4500_REV_REG 0x1080
+
+/**
+ * struct ab4500
+ * @spi: spi device structure
+ * @tx_buf: transmit buffer
+ * @rx_buf: receive buffer
+ * @lock: sync primitive
+ */
+struct ab4500 {
+ struct spi_device *spi;
+ unsigned long tx_buf[4];
+ unsigned long rx_buf[4];
+ struct mutex lock;
+};
+
+int ab4500_write(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr, unsigned char data);
+int ab4500_read(struct ab4500 *ab4500, unsigned char block,
+ unsigned long addr);
+
+#endif /* MFD_AB4500_H */
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
new file mode 100644
index 00000000000..ac37558a467
--- /dev/null
+++ b/include/linux/mfd/adp5520.h
@@ -0,0 +1,299 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef __LINUX_MFD_ADP5520_H
+#define __LINUX_MFD_ADP5520_H
+
+#define ID_ADP5520 5520
+#define ID_ADP5501 5501
+
+/*
+ * ADP5520/ADP5501 Register Map
+ */
+
+#define ADP5520_MODE_STATUS 0x00
+#define ADP5520_INTERRUPT_ENABLE 0x01
+#define ADP5520_BL_CONTROL 0x02
+#define ADP5520_BL_TIME 0x03
+#define ADP5520_BL_FADE 0x04
+#define ADP5520_DAYLIGHT_MAX 0x05
+#define ADP5520_DAYLIGHT_DIM 0x06
+#define ADP5520_OFFICE_MAX 0x07
+#define ADP5520_OFFICE_DIM 0x08
+#define ADP5520_DARK_MAX 0x09
+#define ADP5520_DARK_DIM 0x0A
+#define ADP5520_BL_VALUE 0x0B
+#define ADP5520_ALS_CMPR_CFG 0x0C
+#define ADP5520_L2_TRIP 0x0D
+#define ADP5520_L2_HYS 0x0E
+#define ADP5520_L3_TRIP 0x0F
+#define ADP5520_L3_HYS 0x10
+#define ADP5520_LED_CONTROL 0x11
+#define ADP5520_LED_TIME 0x12
+#define ADP5520_LED_FADE 0x13
+#define ADP5520_LED1_CURRENT 0x14
+#define ADP5520_LED2_CURRENT 0x15
+#define ADP5520_LED3_CURRENT 0x16
+
+/*
+ * ADP5520 Register Map
+ */
+
+#define ADP5520_GPIO_CFG_1 0x17
+#define ADP5520_GPIO_CFG_2 0x18
+#define ADP5520_GPIO_IN 0x19
+#define ADP5520_GPIO_OUT 0x1A
+#define ADP5520_GPIO_INT_EN 0x1B
+#define ADP5520_GPIO_INT_STAT 0x1C
+#define ADP5520_GPIO_INT_LVL 0x1D
+#define ADP5520_GPIO_DEBOUNCE 0x1E
+#define ADP5520_GPIO_PULLUP 0x1F
+#define ADP5520_KP_INT_STAT_1 0x20
+#define ADP5520_KP_INT_STAT_2 0x21
+#define ADP5520_KR_INT_STAT_1 0x22
+#define ADP5520_KR_INT_STAT_2 0x23
+#define ADP5520_KEY_STAT_1 0x24
+#define ADP5520_KEY_STAT_2 0x25
+
+/*
+ * MODE_STATUS bits
+ */
+
+#define ADP5520_nSTNBY (1 << 7)
+#define ADP5520_BL_EN (1 << 6)
+#define ADP5520_DIM_EN (1 << 5)
+#define ADP5520_OVP_INT (1 << 4)
+#define ADP5520_CMPR_INT (1 << 3)
+#define ADP5520_GPI_INT (1 << 2)
+#define ADP5520_KR_INT (1 << 1)
+#define ADP5520_KP_INT (1 << 0)
+
+/*
+ * INTERRUPT_ENABLE bits
+ */
+
+#define ADP5520_AUTO_LD_EN (1 << 4)
+#define ADP5520_CMPR_IEN (1 << 3)
+#define ADP5520_OVP_IEN (1 << 2)
+#define ADP5520_KR_IEN (1 << 1)
+#define ADP5520_KP_IEN (1 << 0)
+
+/*
+ * BL_CONTROL bits
+ */
+
+#define ADP5520_BL_LVL ((x) << 5)
+#define ADP5520_BL_LAW ((x) << 4)
+#define ADP5520_BL_AUTO_ADJ (1 << 3)
+#define ADP5520_OVP_EN (1 << 2)
+#define ADP5520_FOVR (1 << 1)
+#define ADP5520_KP_BL_EN (1 << 0)
+
+/*
+ * ALS_CMPR_CFG bits
+ */
+
+#define ADP5520_L3_OUT (1 << 3)
+#define ADP5520_L2_OUT (1 << 2)
+#define ADP5520_L3_EN (1 << 1)
+
+#define ADP5020_MAX_BRIGHTNESS 0x7F
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4))
+#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en)
+
+/*
+ * LEDs subdevice bits and masks
+ */
+
+#define ADP5520_01_MAXLEDS 3
+
+#define ADP5520_FLAG_LED_MASK 0x3
+#define ADP5520_FLAG_OFFT_SHIFT 8
+#define ADP5520_FLAG_OFFT_MASK 0x3
+
+#define ADP5520_R3_MODE (1 << 5)
+#define ADP5520_C3_MODE (1 << 4)
+#define ADP5520_LED_LAW (1 << 3)
+#define ADP5520_LED3_EN (1 << 2)
+#define ADP5520_LED2_EN (1 << 1)
+#define ADP5520_LED1_EN (1 << 0)
+
+/*
+ * GPIO subdevice bits and masks
+ */
+
+#define ADP5520_MAXGPIOS 8
+
+#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_GPIO_C2 (1 << 6)
+#define ADP5520_GPIO_C1 (1 << 5)
+#define ADP5520_GPIO_C0 (1 << 4)
+#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_GPIO_R2 (1 << 2)
+#define ADP5520_GPIO_R1 (1 << 1)
+#define ADP5520_GPIO_R0 (1 << 0)
+
+struct adp5520_gpio_platform_data {
+ unsigned gpio_start;
+ u8 gpio_en_mask;
+ u8 gpio_pullup_mask;
+};
+
+/*
+ * Keypad subdevice bits and masks
+ */
+
+#define ADP5520_MAXKEYS 16
+
+#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_COL_C2 (1 << 6)
+#define ADP5520_COL_C1 (1 << 5)
+#define ADP5520_COL_C0 (1 << 4)
+#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_ROW_R2 (1 << 2)
+#define ADP5520_ROW_R1 (1 << 1)
+#define ADP5520_ROW_R0 (1 << 0)
+
+#define ADP5520_KEY(row, col) (col + row * 4)
+#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS
+
+struct adp5520_keys_platform_data {
+ int rows_en_mask; /* Number of rows */
+ int cols_en_mask; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+};
+
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */
+#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */
+#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */
+
+#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT)
+
+#define ADP5520_LED_ONT_200ms 0
+#define ADP5520_LED_ONT_600ms 1
+#define ADP5520_LED_ONT_800ms 2
+#define ADP5520_LED_ONT_1200ms 3
+
+struct adp5520_leds_platform_data {
+ int num_leds;
+ struct led_info *leds;
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 led_on_time;
+};
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP5520_FADE_T_600ms 2
+#define ADP5520_FADE_T_900ms 3
+#define ADP5520_FADE_T_1200ms 4
+#define ADP5520_FADE_T_1500ms 5
+#define ADP5520_FADE_T_1800ms 6
+#define ADP5520_FADE_T_2100ms 7
+#define ADP5520_FADE_T_2400ms 8
+#define ADP5520_FADE_T_2700ms 9
+#define ADP5520_FADE_T_3000ms 10
+#define ADP5520_FADE_T_3500ms 11
+#define ADP5520_FADE_T_4000ms 12
+#define ADP5520_FADE_T_4500ms 13
+#define ADP5520_FADE_T_5000ms 14
+#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP5520_BL_LAW_LINEAR 0
+#define ADP5520_BL_LAW_SQUARE 1
+#define ADP5520_BL_LAW_CUBIC1 2
+#define ADP5520_BL_LAW_CUBIC2 3
+
+#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP5520_BL_AMBL_FILT_160ms 1
+#define ADP5520_BL_AMBL_FILT_320ms 2
+#define ADP5520_BL_AMBL_FILT_640ms 3
+#define ADP5520_BL_AMBL_FILT_1280ms 4
+#define ADP5520_BL_AMBL_FILT_2560ms 5
+#define ADP5520_BL_AMBL_FILT_5120ms 6
+#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+ /*
+ * Blacklight current 0..30mA
+ */
+#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30)
+
+ /*
+ * L2 comparator current 0..1000uA
+ */
+#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000)
+
+ /*
+ * L3 comparator current 0..127uA
+ */
+#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127)
+
+struct adp5520_backlight_platform_data {
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 fade_led_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+};
+
+/*
+ * MFD chip platform data
+ */
+
+struct adp5520_platform_data {
+ struct adp5520_keys_platform_data *keys;
+ struct adp5520_gpio_platform_data *gpio;
+ struct adp5520_leds_platform_data *leds;
+ struct adp5520_backlight_platform_data *backlight;
+};
+
+/*
+ * MFD chip functions
+ */
+
+extern int adp5520_read(struct device *dev, int reg, uint8_t *val);
+extern int adp5520_write(struct device *dev, int reg, u8 val);
+extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+
+extern int adp5520_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+extern int adp5520_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+#endif /* __LINUX_MFD_ADP5520_H */
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 3402042ddc3..40c372165f3 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -231,9 +231,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_LED_4MA 1
#define PCAP_LED_5MA 2
#define PCAP_LED_9MA 3
-#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff
-#define PCAP_LED_GPIO_EN 0x01000000
-#define PCAP_LED_GPIO_INVERT 0x02000000
#define PCAP_LED_T_MASK 0xf
#define PCAP_LED_C_MASK 0x3
#define PCAP_BL_MASK 0x1f
diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h
index 47e698cb0f1..95cf9360553 100644
--- a/include/linux/mfd/mc13783-private.h
+++ b/include/linux/mfd/mc13783-private.h
@@ -24,52 +24,23 @@
#include <linux/platform_device.h>
#include <linux/mfd/mc13783.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
-
-struct mc13783_irq {
- void (*handler)(int, void *);
- void *data;
-};
-
-#define MC13783_NUM_IRQ 2
-#define MC13783_IRQ_TS 0
-#define MC13783_IRQ_REGULATOR 1
-
-#define MC13783_ADC_MODE_TS 1
-#define MC13783_ADC_MODE_SINGLE_CHAN 2
-#define MC13783_ADC_MODE_MULT_CHAN 3
+#include <linux/interrupt.h>
struct mc13783 {
- int revision;
- struct device *dev;
- struct spi_device *spi_device;
-
- int (*read_dev)(void *data, char reg, int count, u32 *dst);
- int (*write_dev)(void *data, char reg, int count, const u32 *src);
-
- struct mutex io_lock;
- void *io_data;
+ struct spi_device *spidev;
+ struct mutex lock;
int irq;
- unsigned int flags;
+ int flags;
- struct mc13783_irq irq_handler[MC13783_NUM_IRQ];
- struct work_struct work;
- struct completion adc_done;
- unsigned int ts_active;
- struct mutex adc_conv_lock;
+ irq_handler_t irqhandler[MC13783_NUM_IRQ];
+ void *irqdata[MC13783_NUM_IRQ];
+ /* XXX these should go as platformdata to the regulator subdevice */
struct mc13783_regulator_init_data *regulators;
int num_regulators;
};
-int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *);
-int mc13783_reg_write(struct mc13783 *, int, u32);
-int mc13783_set_bits(struct mc13783 *, int, u32, u32);
-int mc13783_free_irq(struct mc13783 *mc13783, int irq);
-int mc13783_register_irq(struct mc13783 *mc13783, int irq,
- void (*handler) (int, void *), void *data);
-
#define MC13783_REG_INTERRUPT_STATUS_0 0
#define MC13783_REG_INTERRUPT_MASK_0 1
#define MC13783_REG_INTERRUPT_SENSE_0 2
@@ -136,55 +107,6 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_REG_TEST_3 63
#define MC13783_REG_NB 64
-
-/*
- * Interrupt Status
- */
-#define MC13783_INT_STAT_ADCDONEI (1 << 0)
-#define MC13783_INT_STAT_ADCBISDONEI (1 << 1)
-#define MC13783_INT_STAT_TSI (1 << 2)
-#define MC13783_INT_STAT_WHIGHI (1 << 3)
-#define MC13783_INT_STAT_WLOWI (1 << 4)
-#define MC13783_INT_STAT_CHGDETI (1 << 6)
-#define MC13783_INT_STAT_CHGOVI (1 << 7)
-#define MC13783_INT_STAT_CHGREVI (1 << 8)
-#define MC13783_INT_STAT_CHGSHORTI (1 << 9)
-#define MC13783_INT_STAT_CCCVI (1 << 10)
-#define MC13783_INT_STAT_CHGCURRI (1 << 11)
-#define MC13783_INT_STAT_BPONI (1 << 12)
-#define MC13783_INT_STAT_LOBATLI (1 << 13)
-#define MC13783_INT_STAT_LOBATHI (1 << 14)
-#define MC13783_INT_STAT_UDPI (1 << 15)
-#define MC13783_INT_STAT_USBI (1 << 16)
-#define MC13783_INT_STAT_IDI (1 << 19)
-#define MC13783_INT_STAT_Unused (1 << 20)
-#define MC13783_INT_STAT_SE1I (1 << 21)
-#define MC13783_INT_STAT_CKDETI (1 << 22)
-#define MC13783_INT_STAT_UDMI (1 << 23)
-
-/*
- * Interrupt Mask
- */
-#define MC13783_INT_MASK_ADCDONEM (1 << 0)
-#define MC13783_INT_MASK_ADCBISDONEM (1 << 1)
-#define MC13783_INT_MASK_TSM (1 << 2)
-#define MC13783_INT_MASK_WHIGHM (1 << 3)
-#define MC13783_INT_MASK_WLOWM (1 << 4)
-#define MC13783_INT_MASK_CHGDETM (1 << 6)
-#define MC13783_INT_MASK_CHGOVM (1 << 7)
-#define MC13783_INT_MASK_CHGREVM (1 << 8)
-#define MC13783_INT_MASK_CHGSHORTM (1 << 9)
-#define MC13783_INT_MASK_CCCVM (1 << 10)
-#define MC13783_INT_MASK_CHGCURRM (1 << 11)
-#define MC13783_INT_MASK_BPONM (1 << 12)
-#define MC13783_INT_MASK_LOBATLM (1 << 13)
-#define MC13783_INT_MASK_LOBATHM (1 << 14)
-#define MC13783_INT_MASK_UDPM (1 << 15)
-#define MC13783_INT_MASK_USBM (1 << 16)
-#define MC13783_INT_MASK_IDM (1 << 19)
-#define MC13783_INT_MASK_SE1M (1 << 21)
-#define MC13783_INT_MASK_CKDETM (1 << 22)
-
/*
* Reg Regulator Mode 0
*/
@@ -284,113 +206,15 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
#define MC13783_SWCTRL_SW3_STBY (1 << 21)
#define MC13783_SWCTRL_SW3_MODE (1 << 22)
-/*
- * ADC/Touch
- */
-#define MC13783_ADC0_LICELLCON (1 << 0)
-#define MC13783_ADC0_CHRGICON (1 << 1)
-#define MC13783_ADC0_BATICON (1 << 2)
-#define MC13783_ADC0_RTHEN (1 << 3)
-#define MC13783_ADC0_DTHEN (1 << 4)
-#define MC13783_ADC0_UIDEN (1 << 5)
-#define MC13783_ADC0_ADOUTEN (1 << 6)
-#define MC13783_ADC0_ADOUTPER (1 << 7)
-#define MC13783_ADC0_ADREFEN (1 << 10)
-#define MC13783_ADC0_ADREFMODE (1 << 11)
-#define MC13783_ADC0_TSMOD0 (1 << 12)
-#define MC13783_ADC0_TSMOD1 (1 << 13)
-#define MC13783_ADC0_TSMOD2 (1 << 14)
-#define MC13783_ADC0_CHRGRAWDIV (1 << 15)
-#define MC13783_ADC0_ADINC1 (1 << 16)
-#define MC13783_ADC0_ADINC2 (1 << 17)
-#define MC13783_ADC0_WCOMP (1 << 18)
-#define MC13783_ADC0_ADCBIS0 (1 << 23)
-
-#define MC13783_ADC1_ADEN (1 << 0)
-#define MC13783_ADC1_RAND (1 << 1)
-#define MC13783_ADC1_ADSEL (1 << 3)
-#define MC13783_ADC1_TRIGMASK (1 << 4)
-#define MC13783_ADC1_ADA10 (1 << 5)
-#define MC13783_ADC1_ADA11 (1 << 6)
-#define MC13783_ADC1_ADA12 (1 << 7)
-#define MC13783_ADC1_ADA20 (1 << 8)
-#define MC13783_ADC1_ADA21 (1 << 9)
-#define MC13783_ADC1_ADA22 (1 << 10)
-#define MC13783_ADC1_ATO0 (1 << 11)
-#define MC13783_ADC1_ATO1 (1 << 12)
-#define MC13783_ADC1_ATO2 (1 << 13)
-#define MC13783_ADC1_ATO3 (1 << 14)
-#define MC13783_ADC1_ATO4 (1 << 15)
-#define MC13783_ADC1_ATO5 (1 << 16)
-#define MC13783_ADC1_ATO6 (1 << 17)
-#define MC13783_ADC1_ATO7 (1 << 18)
-#define MC13783_ADC1_ATOX (1 << 19)
-#define MC13783_ADC1_ASC (1 << 20)
-#define MC13783_ADC1_ADTRIGIGN (1 << 21)
-#define MC13783_ADC1_ADONESHOT (1 << 22)
-#define MC13783_ADC1_ADCBIS1 (1 << 23)
-
-#define MC13783_ADC1_CHAN0_SHIFT 5
-#define MC13783_ADC1_CHAN1_SHIFT 8
-
-#define MC13783_ADC2_ADD10 (1 << 2)
-#define MC13783_ADC2_ADD11 (1 << 3)
-#define MC13783_ADC2_ADD12 (1 << 4)
-#define MC13783_ADC2_ADD13 (1 << 5)
-#define MC13783_ADC2_ADD14 (1 << 6)
-#define MC13783_ADC2_ADD15 (1 << 7)
-#define MC13783_ADC2_ADD16 (1 << 8)
-#define MC13783_ADC2_ADD17 (1 << 9)
-#define MC13783_ADC2_ADD18 (1 << 10)
-#define MC13783_ADC2_ADD19 (1 << 11)
-#define MC13783_ADC2_ADD20 (1 << 14)
-#define MC13783_ADC2_ADD21 (1 << 15)
-#define MC13783_ADC2_ADD22 (1 << 16)
-#define MC13783_ADC2_ADD23 (1 << 17)
-#define MC13783_ADC2_ADD24 (1 << 18)
-#define MC13783_ADC2_ADD25 (1 << 19)
-#define MC13783_ADC2_ADD26 (1 << 20)
-#define MC13783_ADC2_ADD27 (1 << 21)
-#define MC13783_ADC2_ADD28 (1 << 22)
-#define MC13783_ADC2_ADD29 (1 << 23)
+static inline int mc13783_set_bits(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val)
+{
+ int ret;
+ mc13783_lock(mc13783);
+ ret = mc13783_reg_rmw(mc13783, offset, mask, val);
+ mc13783_unlock(mc13783);
-#define MC13783_ADC3_WHIGH0 (1 << 0)
-#define MC13783_ADC3_WHIGH1 (1 << 1)
-#define MC13783_ADC3_WHIGH2 (1 << 2)
-#define MC13783_ADC3_WHIGH3 (1 << 3)
-#define MC13783_ADC3_WHIGH4 (1 << 4)
-#define MC13783_ADC3_WHIGH5 (1 << 5)
-#define MC13783_ADC3_ICID0 (1 << 6)
-#define MC13783_ADC3_ICID1 (1 << 7)
-#define MC13783_ADC3_ICID2 (1 << 8)
-#define MC13783_ADC3_WLOW0 (1 << 9)
-#define MC13783_ADC3_WLOW1 (1 << 10)
-#define MC13783_ADC3_WLOW2 (1 << 11)
-#define MC13783_ADC3_WLOW3 (1 << 12)
-#define MC13783_ADC3_WLOW4 (1 << 13)
-#define MC13783_ADC3_WLOW5 (1 << 14)
-#define MC13783_ADC3_ADCBIS2 (1 << 23)
-
-#define MC13783_ADC4_ADDBIS10 (1 << 2)
-#define MC13783_ADC4_ADDBIS11 (1 << 3)
-#define MC13783_ADC4_ADDBIS12 (1 << 4)
-#define MC13783_ADC4_ADDBIS13 (1 << 5)
-#define MC13783_ADC4_ADDBIS14 (1 << 6)
-#define MC13783_ADC4_ADDBIS15 (1 << 7)
-#define MC13783_ADC4_ADDBIS16 (1 << 8)
-#define MC13783_ADC4_ADDBIS17 (1 << 9)
-#define MC13783_ADC4_ADDBIS18 (1 << 10)
-#define MC13783_ADC4_ADDBIS19 (1 << 11)
-#define MC13783_ADC4_ADDBIS20 (1 << 14)
-#define MC13783_ADC4_ADDBIS21 (1 << 15)
-#define MC13783_ADC4_ADDBIS22 (1 << 16)
-#define MC13783_ADC4_ADDBIS23 (1 << 17)
-#define MC13783_ADC4_ADDBIS24 (1 << 18)
-#define MC13783_ADC4_ADDBIS25 (1 << 19)
-#define MC13783_ADC4_ADDBIS26 (1 << 20)
-#define MC13783_ADC4_ADDBIS27 (1 << 21)
-#define MC13783_ADC4_ADDBIS28 (1 << 22)
-#define MC13783_ADC4_ADDBIS29 (1 << 23)
+ return ret;
+}
#endif /* __LINUX_MFD_MC13783_PRIV_H */
-
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b3a2a724357..35680409b8c 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,28 +1,50 @@
/*
- * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright 2009 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
- * Initial development of this code was funded by
- * Phytec Messtechnik GmbH, http://www.phytec.de
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
*/
+#ifndef __LINUX_MFD_MC13783_H
+#define __LINUX_MFD_MC13783_H
-#ifndef __INCLUDE_LINUX_MFD_MC13783_H
-#define __INCLUDE_LINUX_MFD_MC13783_H
+#include <linux/interrupt.h>
struct mc13783;
+
+void mc13783_lock(struct mc13783 *mc13783);
+void mc13783_unlock(struct mc13783 *mc13783);
+
+int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val);
+int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val);
+int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
+ u32 mask, u32 val);
+
+int mc13783_irq_request(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev);
+int mc13783_ackirq(struct mc13783 *mc13783, int irq);
+
+int mc13783_mask(struct mc13783 *mc13783, int irq);
+int mc13783_unmask(struct mc13783 *mc13783, int irq);
+
+#define MC13783_ADC0 43
+#define MC13783_ADC0_ADREFEN (1 << 10)
+#define MC13783_ADC0_ADREFMODE (1 << 11)
+#define MC13783_ADC0_TSMOD0 (1 << 12)
+#define MC13783_ADC0_TSMOD1 (1 << 13)
+#define MC13783_ADC0_TSMOD2 (1 << 14)
+#define MC13783_ADC0_ADINC1 (1 << 16)
+#define MC13783_ADC0_ADINC2 (1 << 17)
+
+#define MC13783_ADC0_TSMOD_MASK (MC13783_ADC0_TSMOD0 | \
+ MC13783_ADC0_TSMOD1 | \
+ MC13783_ADC0_TSMOD2)
+
+/* to be cleaned up */
struct regulator_init_data;
struct mc13783_regulator_init_data {
@@ -30,23 +52,30 @@ struct mc13783_regulator_init_data {
struct regulator_init_data *init_data;
};
-struct mc13783_platform_data {
- struct mc13783_regulator_init_data *regulators;
+struct mc13783_regulator_platform_data {
int num_regulators;
- unsigned int flags;
+ struct mc13783_regulator_init_data *regulators;
};
-/* mc13783_platform_data flags */
+struct mc13783_platform_data {
+ int num_regulators;
+ struct mc13783_regulator_init_data *regulators;
+
#define MC13783_USE_TOUCHSCREEN (1 << 0)
#define MC13783_USE_CODEC (1 << 1)
#define MC13783_USE_ADC (1 << 2)
#define MC13783_USE_RTC (1 << 3)
#define MC13783_USE_REGULATOR (1 << 4)
+ unsigned int flags;
+};
+
+#define MC13783_ADC_MODE_TS 1
+#define MC13783_ADC_MODE_SINGLE_CHAN 2
+#define MC13783_ADC_MODE_MULT_CHAN 3
int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
unsigned int channel, unsigned int *sample);
-void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_SW_SW1A 0
#define MC13783_SW_SW1B 1
@@ -80,5 +109,46 @@ void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
#define MC13783_REGU_V3 29
#define MC13783_REGU_V4 30
-#endif /* __INCLUDE_LINUX_MFD_MC13783_H */
+#define MC13783_IRQ_ADCDONE 0
+#define MC13783_IRQ_ADCBISDONE 1
+#define MC13783_IRQ_TS 2
+#define MC13783_IRQ_WHIGH 3
+#define MC13783_IRQ_WLOW 4
+#define MC13783_IRQ_CHGDET 6
+#define MC13783_IRQ_CHGOV 7
+#define MC13783_IRQ_CHGREV 8
+#define MC13783_IRQ_CHGSHORT 9
+#define MC13783_IRQ_CCCV 10
+#define MC13783_IRQ_CHGCURR 11
+#define MC13783_IRQ_BPON 12
+#define MC13783_IRQ_LOBATL 13
+#define MC13783_IRQ_LOBATH 14
+#define MC13783_IRQ_UDP 15
+#define MC13783_IRQ_USB 16
+#define MC13783_IRQ_ID 19
+#define MC13783_IRQ_SE1 21
+#define MC13783_IRQ_CKDET 22
+#define MC13783_IRQ_UDM 23
+#define MC13783_IRQ_1HZ 24
+#define MC13783_IRQ_TODA 25
+#define MC13783_IRQ_ONOFD1 27
+#define MC13783_IRQ_ONOFD2 28
+#define MC13783_IRQ_ONOFD3 29
+#define MC13783_IRQ_SYSRST 30
+#define MC13783_IRQ_RTCRST 31
+#define MC13783_IRQ_PC 32
+#define MC13783_IRQ_WARM 33
+#define MC13783_IRQ_MEMHLD 34
+#define MC13783_IRQ_PWRRDY 35
+#define MC13783_IRQ_THWARNL 36
+#define MC13783_IRQ_THWARNH 37
+#define MC13783_IRQ_CLK 38
+#define MC13783_IRQ_SEMAF 39
+#define MC13783_IRQ_MC2B 41
+#define MC13783_IRQ_HSDET 42
+#define MC13783_IRQ_HSL 43
+#define MC13783_IRQ_ALSPTH 44
+#define MC13783_IRQ_AHSSHORT 45
+#define MC13783_NUM_IRQ 46
+#endif /* __LINUX_MFD_MC13783_H */
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 9aba7b779fb..3398bd9aab1 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,7 +29,12 @@ struct pcf50633_platform_data {
char **batteries;
int num_batteries;
- int charging_restart_interval;
+ /*
+ * Should be set accordingly to the reference resistor used, see
+ * I_{ch(ref)} charger reference current in the pcf50633 User
+ * Manual.
+ */
+ int charger_reference_current_ma;
/* Callbacks */
void (*probe_done)(struct pcf50633 *);
@@ -40,10 +45,6 @@ struct pcf50633_platform_data {
u8 resumers[5];
};
-struct pcf50633_subdev_pdata {
- struct pcf50633 *pcf;
-};
-
struct pcf50633_irq {
void (*handler) (int, void *);
void *data;
@@ -217,5 +218,9 @@ enum pcf50633_reg_int5 {
#define PCF50633_REG_LEDCTL 0x2a
#define PCF50633_REG_LEDDIM 0x2b
-#endif
+static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+#endif
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 4119579acf2..df4f5fa88de 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 {
int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
int pcf50633_mbc_get_status(struct pcf50633 *);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
#endif
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 91eb493bf14..5184b79c700 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -16,7 +16,6 @@
#define __MFD_WM831X_CORE_H__
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
/*
* Register values.
@@ -117,6 +116,7 @@
#define WM831X_DC3_SLEEP_CONTROL 0x4063
#define WM831X_DC4_CONTROL 0x4064
#define WM831X_DC4_SLEEP_CONTROL 0x4065
+#define WM832X_DC4_SLEEP_CONTROL 0x4067
#define WM831X_EPE1_CONTROL 0x4066
#define WM831X_EPE2_CONTROL 0x4067
#define WM831X_LDO1_CONTROL 0x4068
@@ -235,6 +235,8 @@
struct regulator_dev;
+#define WM831X_NUM_IRQ_REGS 5
+
struct wm831x {
struct mutex io_lock;
@@ -248,10 +250,11 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- struct workqueue_struct *irq_wq;
- struct work_struct irq_work;
unsigned int irq_base;
- int irq_masks[5];
+ int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
+ int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+
+ int num_gpio;
struct mutex auxadc_lock;
@@ -278,12 +281,30 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
-int __must_check wm831x_request_irq(struct wm831x *wm831x,
- unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name,
- void *dev);
-void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *);
-void wm831x_disable_irq(struct wm831x *wm831x, int irq);
-void wm831x_enable_irq(struct wm831x *wm831x, int irq);
+static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
+ unsigned int irq,
+ irq_handler_t handler,
+ unsigned long flags,
+ const char *name,
+ void *dev)
+{
+ return request_threaded_irq(irq, NULL, handler, flags, name, dev);
+}
+
+static inline void wm831x_free_irq(struct wm831x *wm831x,
+ unsigned int irq, void *dev)
+{
+ free_irq(irq, dev);
+}
+
+static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
+{
+ disable_irq(irq);
+}
+
+static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
+{
+ enable_irq(irq);
+}
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 90d820260aa..415c228743d 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -91,6 +91,7 @@ struct wm831x_pdata {
/** Called after subdevices are set up */
int (*post_init)(struct wm831x *wm831x);
+ int irq_base;
int gpio_base;
struct wm831x_backlight_pdata *backlight;
struct wm831x_backup_pdata *backup;
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 1d595de6a05..43868899bf4 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/interrupt.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -601,7 +601,7 @@ extern const u16 wm8352_mode3_defaults[];
struct wm8350;
struct wm8350_irq {
- void (*handler) (struct wm8350 *, int, void *);
+ irq_handler_t handler;
void *data;
};
@@ -646,10 +646,12 @@ struct wm8350 {
* @init: Function called during driver initialisation. Should be
* used by the platform to configure GPIO functions and similar.
* @irq_high: Set if WM8350 IRQ is active high.
+ * @irq_base: Base IRQ for genirq (not currently used).
*/
struct wm8350_platform_data {
int (*init)(struct wm8350 *wm8350);
int irq_high;
+ int irq_base;
};
@@ -676,11 +678,13 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
* WM8350 internal interrupts
*/
int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- void (*handler) (struct wm8350 *, int, void *),
- void *data);
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data);
int wm8350_free_irq(struct wm8350 *wm8350, int irq);
int wm8350_mask_irq(struct wm8350 *wm8350, int irq);
int wm8350_unmask_irq(struct wm8350 *wm8350, int irq);
-
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata);
+int wm8350_irq_exit(struct wm8350 *wm8350);
#endif
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index ed91e8f5d29..71af3d6ebe9 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -173,6 +173,24 @@
#define WM8350_GPIO_DEBOUNCE_ON 1
/*
+ * R30 (0x1E) - GPIO Interrupt Status
+ */
+#define WM8350_GP12_EINT 0x1000
+#define WM8350_GP11_EINT 0x0800
+#define WM8350_GP10_EINT 0x0400
+#define WM8350_GP9_EINT 0x0200
+#define WM8350_GP8_EINT 0x0100
+#define WM8350_GP7_EINT 0x0080
+#define WM8350_GP6_EINT 0x0040
+#define WM8350_GP5_EINT 0x0020
+#define WM8350_GP4_EINT 0x0010
+#define WM8350_GP3_EINT 0x0008
+#define WM8350_GP2_EINT 0x0004
+#define WM8350_GP1_EINT 0x0002
+#define WM8350_GP0_EINT 0x0001
+
+
+/*
* R128 (0x80) - GPIO Debounce
*/
#define WM8350_GP12_DB 0x1000
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 527602cdea1..7f085c97c79 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
-extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
+extern int migrate_pages(struct list_head *l, new_page_t x,
+ unsigned long private, int offlining);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
- unsigned long private) { return -ENOSYS; }
-
-static inline int migrate_pages_to(struct list_head *pagelist,
- struct vm_area_struct *vma, int dest) { return 0; }
+ unsigned long private, int offlining) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ce7cc6c7bcb..e92d1bfdb33 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -61,6 +61,7 @@ enum {
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
+ MLX4_DEV_CAP_FLAG_BLH = 1 << 15,
MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
MLX4_DEV_CAP_FLAG_APM = 1 << 17,
MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4..9d65ae4ba0e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
/*
* On an anonymous page mapped into a user virtual memory area,
* page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it.
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
struct address_space *mapping = page->mapping;
VM_BUG_ON(PageSlab(page));
-#ifdef CONFIG_SWAP
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
- else
-#endif
- if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
}
+/* Neutral page->mapping pointer to address_space or anon_vma or other */
+static inline void *page_rmapping(struct page *page)
+{
+ return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
* @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
*
* (see walk_page_range for more details)
*/
@@ -767,6 +780,8 @@ struct mm_walk {
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
+ struct mm_walk *);
struct mm_struct *mm;
void *private;
};
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index fff8c53e543..9c3757c5759 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,22 +19,21 @@
/**
* struct nand_bbt_descr - bad block table descriptor
- * @options: options for this descriptor
- * @pages: the page(s) where we find the bbt, used with
- * option BBT_ABSPAGE when bbt is searched,
- * then we store the found bbts pages here.
- * Its an array and supports up to 8 chips now
- * @offs: offset of the pattern in the oob area of the page
- * @veroffs: offset of the bbt version counter in the oob area of the page
- * @version: version read from the bbt page during scan
- * @len: length of the pattern, if 0 no pattern check is performed
- * @maxblocks: maximum number of blocks to search for a bbt. This
- * number of blocks is reserved at the end of the device
- * where the tables are written.
- * @reserved_block_code: if non-0, this pattern denotes a reserved
- * (rather than bad) block in the stored bbt
- * @pattern: pattern to identify bad block table or factory marked
- * good / bad blocks, can be NULL, if len = 0
+ * @options: options for this descriptor
+ * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
+ * when bbt is searched, then we store the found bbts pages here.
+ * Its an array and supports up to 8 chips now
+ * @offs: offset of the pattern in the oob area of the page
+ * @veroffs: offset of the bbt version counter in the oob are of the page
+ * @version: version read from the bbt page during scan
+ * @len: length of the pattern, if 0 no pattern check is performed
+ * @maxblocks: maximum number of blocks to search for a bbt. This number of
+ * blocks is reserved at the end of the device where the tables are
+ * written.
+ * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
+ * bad) block in the stored bbt
+ * @pattern: pattern to identify bad block table or factory marked good /
+ * bad blocks, can be NULL, if len = 0
*
* Descriptor for the bad block table marker and the descriptor for the
* pattern which identifies good and bad blocks. The assumption is made
@@ -90,7 +89,9 @@ struct nand_bbt_descr {
/*
* Constants for oob configuration
*/
-#define ONENAND_BADBLOCK_POS 0
+#define NAND_SMALL_BADBLOCK_POS 5
+#define NAND_LARGE_BADBLOCK_POS 0
+#define ONENAND_BADBLOCK_POS 0
/*
* Bad block scanning errors
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 88d3d8fbf9f..df89f427523 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -518,10 +518,11 @@ struct cfi_fixup {
#define CFI_MFR_ANY 0xffff
#define CFI_ID_ANY 0xffff
-#define CFI_MFR_AMD 0x0001
-#define CFI_MFR_ATMEL 0x001F
-#define CFI_MFR_SAMSUNG 0x00EC
-#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_AMD 0x0001
+#define CFI_MFR_INTEL 0x0089
+#define CFI_MFR_ATMEL 0x001F
+#define CFI_MFR_SAMSUNG 0x00EC
+#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index d4f38c5fd44..d0bf422ae37 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -38,6 +38,15 @@ typedef enum {
FL_XIP_WHILE_ERASING,
FL_XIP_WHILE_WRITING,
FL_SHUTDOWN,
+ /* These 2 come from nand_state_t, which has been unified here */
+ FL_READING,
+ FL_CACHEDPRG,
+ /* These 4 come from onenand_state_t, which has been unified here */
+ FL_RESETING,
+ FL_OTPING,
+ FL_PREPARING_ERASE,
+ FL_VERIFYING_ERASE,
+
FL_UNKNOWN
} flstate_t;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7a232a9bdd6..ccab9dfc521 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,6 +21,8 @@
#include <linux/wait.h>
#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/bbm.h>
struct mtd_info;
/* Scan and identify a NAND device */
@@ -168,7 +170,6 @@ typedef enum {
/* Chip does not allow subpage writes */
#define NAND_NO_SUBPAGE_WRITE 0x00000200
-
/* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS \
(NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@@ -194,6 +195,9 @@ typedef enum {
/* This option is defined if the board driver allocates its own buffers
(e.g. because it needs them DMA-coherent */
#define NAND_OWN_BUFFERS 0x00040000
+/* Chip may not exist, so silence any errors in scan */
+#define NAND_SCAN_SILENT_NODEV 0x00080000
+
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -202,20 +206,6 @@ typedef enum {
#define NAND_CI_CHIPNR_MSK 0x03
#define NAND_CI_CELLTYPE_MSK 0x0C
-/*
- * nand_state_t - chip states
- * Enumeration for NAND flash chip state
- */
-typedef enum {
- FL_READY,
- FL_READING,
- FL_WRITING,
- FL_ERASING,
- FL_SYNCING,
- FL_CACHEDPRG,
- FL_PM_SUSPENDED,
-} nand_state_t;
-
/* Keep gcc happy */
struct nand_chip;
@@ -402,7 +392,7 @@ struct nand_chip {
uint8_t cellinfo;
int badblockpos;
- nand_state_t state;
+ flstate_t state;
uint8_t *oob_poi;
struct nand_hw_control *controller;
@@ -470,75 +460,6 @@ struct nand_manufacturers {
extern struct nand_flash_dev nand_flash_ids[];
extern struct nand_manufacturers nand_manuf_ids[];
-/**
- * struct nand_bbt_descr - bad block table descriptor
- * @options: options for this descriptor
- * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
- * when bbt is searched, then we store the found bbts pages here.
- * Its an array and supports up to 8 chips now
- * @offs: offset of the pattern in the oob area of the page
- * @veroffs: offset of the bbt version counter in the oob are of the page
- * @version: version read from the bbt page during scan
- * @len: length of the pattern, if 0 no pattern check is performed
- * @maxblocks: maximum number of blocks to search for a bbt. This number of
- * blocks is reserved at the end of the device where the tables are
- * written.
- * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
- * bad) block in the stored bbt
- * @pattern: pattern to identify bad block table or factory marked good /
- * bad blocks, can be NULL, if len = 0
- *
- * Descriptor for the bad block table marker and the descriptor for the
- * pattern which identifies good and bad blocks. The assumption is made
- * that the pattern and the version count are always located in the oob area
- * of the first block.
- */
-struct nand_bbt_descr {
- int options;
- int pages[NAND_MAX_CHIPS];
- int offs;
- int veroffs;
- uint8_t version[NAND_MAX_CHIPS];
- int len;
- int maxblocks;
- int reserved_block_code;
- uint8_t *pattern;
-};
-
-/* Options for the bad block table descriptors */
-
-/* The number of bits used per block in the bbt on the device */
-#define NAND_BBT_NRBITS_MSK 0x0000000F
-#define NAND_BBT_1BIT 0x00000001
-#define NAND_BBT_2BIT 0x00000002
-#define NAND_BBT_4BIT 0x00000004
-#define NAND_BBT_8BIT 0x00000008
-/* The bad block table is in the last good block of the device */
-#define NAND_BBT_LASTBLOCK 0x00000010
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_ABSPAGE 0x00000020
-/* The bbt is at the given page, else we must scan for the bbt */
-#define NAND_BBT_SEARCH 0x00000040
-/* bbt is stored per chip on multichip devices */
-#define NAND_BBT_PERCHIP 0x00000080
-/* bbt has a version counter at offset veroffs */
-#define NAND_BBT_VERSION 0x00000100
-/* Create a bbt if none axists */
-#define NAND_BBT_CREATE 0x00000200
-/* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES 0x00000400
-/* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY 0x00000800
-/* Write bbt if neccecary */
-#define NAND_BBT_WRITE 0x00001000
-/* Read and write back block contents when writing bbt */
-#define NAND_BBT_SAVECONTENT 0x00002000
-/* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE 0x00004000
-
-/* The maximum number of blocks to scan for a bbt */
-#define NAND_BBT_SCAN_MAXBLOCKS 4
-
extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
extern int nand_default_bbt(struct mtd_info *mtd);
@@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, uint8_t * buf);
-/*
-* Constants for oob configuration
-*/
-#define NAND_SMALL_BADBLOCK_POS 5
-#define NAND_LARGE_BADBLOCK_POS 0
-
/**
* struct platform_nand_chip - chip level device structure
* @nr_chips: max. number of chips to scan for
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 052ea8ca243..41bc013571d 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -16,7 +16,13 @@
struct mtd_info;
/*
- * Calculate 3 byte ECC code for 256 byte block
+ * Calculate 3 byte ECC code for eccsize byte block
+ */
+void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
+ u_char *ecc_code);
+
+/*
+ * Calculate 3 byte ECC code for 256/512 byte block
*/
int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
@@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
unsigned int eccsize);
/*
- * Detect and correct a 1 bit error for 256 byte block
+ * Detect and correct a 1 bit error for 256/512 byte block
*/
int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4e49f335067..5509eb06b32 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,7 +1,7 @@
/*
* linux/include/linux/mtd/onenand.h
*
- * Copyright (C) 2005-2007 Samsung Electronics
+ * Copyright © 2005-2009 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
+#include <linux/mtd/flashchip.h>
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
@@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
/* Free resources held by the OneNAND device */
extern void onenand_release(struct mtd_info *mtd);
-/*
- * onenand_state_t - chip states
- * Enumeration for OneNAND flash chip state
- */
-typedef enum {
- FL_READY,
- FL_READING,
- FL_WRITING,
- FL_ERASING,
- FL_SYNCING,
- FL_LOCKING,
- FL_RESETING,
- FL_OTPING,
- FL_PM_SUSPENDED,
-} onenand_state_t;
-
/**
* struct onenand_bufferram - OneNAND BufferRAM Data
* @blockpage: block & page address in BufferRAM
@@ -137,7 +122,7 @@ struct onenand_chip {
spinlock_t chip_lock;
wait_queue_head_t wq;
- onenand_state_t state;
+ flstate_t state;
unsigned char *page_buf;
unsigned char *oob_buf;
@@ -152,6 +137,8 @@ struct onenand_chip {
/*
* Helper macros
*/
+#define ONENAND_PAGES_PER_BLOCK (1<<6)
+
#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index acadbf53a69..cd6f3b43119 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -131,6 +131,8 @@
#define ONENAND_CMD_LOCK_TIGHT (0x2C)
#define ONENAND_CMD_UNLOCK_ALL (0x27)
#define ONENAND_CMD_ERASE (0x94)
+#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
+#define ONENAND_CMD_ERASE_VERIFY (0x71)
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c4c06020810..9b8299af374 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -128,6 +128,8 @@
#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
#define SEQ4_STATUS_LEASE_MOVED 0x00000080
#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
+#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200
+#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400
#define NFS4_MAX_UINT64 (~(u64)0)
@@ -528,6 +530,7 @@ enum {
NFSPROC4_CLNT_DESTROY_SESSION,
NFSPROC4_CLNT_SEQUENCE,
NFSPROC4_CLNT_GET_LEASE_TIME,
+ NFSPROC4_CLNT_RECLAIM_COMPLETE,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569eabe3..34fc6be5bfc 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
+ struct completion complete;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62f63fb0c4c..51071b33575 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -170,8 +170,9 @@ struct nfs4_sequence_args {
struct nfs4_sequence_res {
struct nfs4_session *sr_session;
u8 sr_slotid; /* slot used to send request */
- unsigned long sr_renewal_time;
int sr_status; /* sequence operation status */
+ unsigned long sr_renewal_time;
+ u32 sr_status_flags;
};
struct nfs4_get_lease_time_args {
@@ -938,6 +939,16 @@ struct nfs41_create_session_args {
struct nfs41_create_session_res {
struct nfs_client *client;
};
+
+struct nfs41_reclaim_complete_args {
+ /* In the future extend to include curr_fh for use with migration */
+ unsigned char one_fs:1;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs41_reclaim_complete_res {
+ struct nfs4_sequence_res seq_res;
+};
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
diff --git a/include/linux/node.h b/include/linux/node.h
index 681a697b9a8..06292dac3ea 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -21,13 +21,19 @@
#include <linux/sysdev.h>
#include <linux/cpumask.h>
+#include <linux/workqueue.h>
struct node {
struct sys_device sysdev;
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ struct work_struct node_work;
+#endif
};
struct memory_block;
extern struct node node_devices[];
+typedef void (*node_registration_func_t)(struct node *);
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
int nid);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
+
+#ifdef CONFIG_HUGETLBFS
+extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister);
+#endif
#else
static inline int register_one_node(int nid)
{
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
{
return 0;
}
+
+static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
+ node_registration_func_t unreg)
+{
+}
#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b359c4a9ec9..454997cccbd 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp)
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
+static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+{
+ nodes_clear(*mask);
+ node_set(node, *mask);
+}
+
#define nodemask_of_node(node) \
({ \
typeof(_unused_nodemask_arg_) m; \
if (sizeof(m) == sizeof(unsigned long)) { \
- m.bits[0] = 1UL<<(node); \
+ m.bits[0] = 1UL << (node); \
} else { \
- nodes_clear(m); \
- node_set((node), m); \
+ init_nodemask_of_node(&m, (node)); \
} \
m; \
})
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
/*
- * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h)
+ * For nodemask scrach area.
+ * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
+ * name.
*/
-
-#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */
-#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL)
-#define NODEMASK_FREE(m) kfree(m)
+#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#define NODEMASK_ALLOC(type, name, gfp_flags) \
+ type *name = kmalloc(sizeof(*name), gfp_flags)
+#define NODEMASK_FREE(m) kfree(m)
#else
-#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m
-#define NODEMASK_FREE(m)
+#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name
+#define NODEMASK_FREE(m) do {} while (0)
#endif
/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
@@ -497,8 +504,10 @@ struct nodemask_scratch {
nodemask_t mask2;
};
-#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x)
-#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+#define NODEMASK_SCRATCH(x) \
+ NODEMASK_ALLOC(struct nodemask_scratch, x, \
+ GFP_KERNEL | __GFP_NORETRY)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a31a7301b15..3aaa31603a8 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -10,4 +10,6 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
+#define NUMA_NO_NODE (-1)
+
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6aac5fe4f6f..53766231562 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -10,6 +10,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/nodemask.h>
struct zonelist;
struct notifier_block;
@@ -26,7 +27,8 @@ enum oom_constraint {
extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
-extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
+extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *mask);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b17395..49e907bd067 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,7 +99,7 @@ enum pageflags {
PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-#define MLOCK_PAGES 1
+#ifdef CONFIG_MMU
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else
-#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif
@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_MLOCKED 0
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 4b938d4f3ac..b0e4eb12623 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -57,6 +57,8 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
{ return test_and_clear_bit(PCG_##lname, &pc->flags); }
+TESTPCGFLAG(Locked, LOCK)
+
/* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache, CACHE)
CLEARPCGFLAG(Cache, CACHE)
@@ -86,11 +88,6 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
bit_spin_lock(PCG_LOCK, &pc->flags);
}
-static inline int trylock_page_cgroup(struct page_cgroup *pc)
-{
- return bit_spin_trylock(PCG_LOCK, &pc->flags);
-}
-
static inline void unlock_page_cgroup(struct page_cgroup *pc)
{
bit_spin_unlock(PCG_LOCK, &pc->flags);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 04771b9c331..bf1e6708084 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1255,7 +1255,7 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size;
-extern u8 pci_dfl_cache_line_size;
+extern u8 __devinitdata pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
extern unsigned long pci_hotplug_io_size;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eae1f864c93..cca8a044e2b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2295,6 +2295,20 @@
#define PCI_DEVICE_ID_MPC8536 0x0051
#define PCI_DEVICE_ID_P2020E 0x0070
#define PCI_DEVICE_ID_P2020 0x0071
+#define PCI_DEVICE_ID_P2010E 0x0078
+#define PCI_DEVICE_ID_P2010 0x0079
+#define PCI_DEVICE_ID_P1020E 0x0100
+#define PCI_DEVICE_ID_P1020 0x0101
+#define PCI_DEVICE_ID_P1011E 0x0108
+#define PCI_DEVICE_ID_P1011 0x0109
+#define PCI_DEVICE_ID_P1022E 0x0110
+#define PCI_DEVICE_ID_P1022 0x0111
+#define PCI_DEVICE_ID_P1013E 0x0118
+#define PCI_DEVICE_ID_P1013 0x0119
+#define PCI_DEVICE_ID_P4080E 0x0400
+#define PCI_DEVICE_ID_P4080 0x0401
+#define PCI_DEVICE_ID_P4040E 0x0408
+#define PCI_DEVICE_ID_P4040 0x0409
#define PCI_DEVICE_ID_MPC8641 0x7010
#define PCI_DEVICE_ID_MPC8641D 0x7011
#define PCI_DEVICE_ID_MPC8610 0x7018
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd..5a5d6ce4bd5 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999..cf5efbcf716 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
#ifdef CONFIG_SMP
-#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
-
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
extern void *__alloc_reserved_percpu(size_t size, size_t align);
-
-#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
-struct percpu_data {
- void *ptrs[1];
-};
-
-/* pointer disguising messes up the kmemleak objects tracking */
-#ifndef CONFIG_DEBUG_KMEMLEAK
-#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-#else
-#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
-#endif
-
-#define per_cpu_ptr(ptr, cpu) \
-({ \
- struct percpu_data *__p = __percpu_disguise(ptr); \
- (__typeof__(ptr))__p->ptrs[(cpu)]; \
-})
-
-#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
-
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
kfree(p);
}
+static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
+{
+ return __pa(addr);
+}
+
static inline void __init setup_per_cpu_areas(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
-#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
- __alignof__(type))
+#define alloc_percpu(type) \
+ (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
#endif
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#define __pcpu_size_call_return(stem, variable) \
+({ typeof(variable) pscr_ret__; \
+ switch(sizeof(variable)) { \
+ case 1: pscr_ret__ = stem##1(variable);break; \
+ case 2: pscr_ret__ = stem##2(variable);break; \
+ case 4: pscr_ret__ = stem##4(variable);break; \
+ case 8: pscr_ret__ = stem##8(variable);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+ pscr_ret__; \
+})
+
+#define __pcpu_size_call(stem, variable, ...) \
+do { \
+ switch(sizeof(variable)) { \
+ case 1: stem##1(variable, __VA_ARGS__);break; \
+ case 2: stem##2(variable, __VA_ARGS__);break; \
+ case 4: stem##4(variable, __VA_ARGS__);break; \
+ case 8: stem##8(variable, __VA_ARGS__);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+} while (0)
+
+/*
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables (can be determined
+ * using per_cpu_var(xx).
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The first group is used for accesses that must be done in a
+ * preemption safe way since we know that the context is not preempt
+ * safe. Interrupts may occur. If the interrupt modifies the variable
+ * too then RMW actions will not be reliable.
+ *
+ * The arch code can provide optimized functions in two ways:
+ *
+ * 1. Override the function completely. F.e. define this_cpu_add().
+ * The arch must then ensure that the various scalar format passed
+ * are handled correctly.
+ *
+ * 2. Provide functions for certain scalar sizes. F.e. provide
+ * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
+ * sized RMW actions. If arch code does not provide operations for
+ * a scalar size then the fallback in the generic code will be
+ * used.
+ */
+
+#define _this_cpu_generic_read(pcp) \
+({ typeof(pcp) ret__; \
+ preempt_disable(); \
+ ret__ = *this_cpu_ptr(&(pcp)); \
+ preempt_enable(); \
+ ret__; \
+})
+
+#ifndef this_cpu_read
+# ifndef this_cpu_read_1
+# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_2
+# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_4
+# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
+# endif
+# ifndef this_cpu_read_8
+# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
+# endif
+# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
+#endif
+
+#define _this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ preempt_disable(); \
+ *__this_cpu_ptr(&pcp) op val; \
+ preempt_enable(); \
+} while (0)
+
+#ifndef this_cpu_write
+# ifndef this_cpu_write_1
+# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_2
+# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_4
+# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef this_cpu_write_8
+# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_add
+# ifndef this_cpu_add_1
+# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_2
+# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_4
+# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef this_cpu_add_8
+# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_sub
+# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef this_cpu_inc
+# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
+#endif
+
+#ifndef this_cpu_dec
+# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef this_cpu_and
+# ifndef this_cpu_and_1
+# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_2
+# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_4
+# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef this_cpu_and_8
+# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_or
+# ifndef this_cpu_or_1
+# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_2
+# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_4
+# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef this_cpu_or_8
+# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef this_cpu_xor
+# ifndef this_cpu_xor_1
+# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_2
+# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_4
+# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef this_cpu_xor_8
+# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
+#endif
+
+/*
+ * Generic percpu operations that do not require preemption handling.
+ * Either we do not care about races or the caller has the
+ * responsibility of handling preemptions issues. Arch code can still
+ * override these instructions since the arch per cpu code may be more
+ * efficient and may actually get race freeness for free (that is the
+ * case for x86 for example).
+ *
+ * If there is no other protection through preempt disable and/or
+ * disabling interupts then one of these RMW operations can show unexpected
+ * behavior because the execution thread was rescheduled on another processor
+ * or an interrupt occurred and the same percpu variable was modified from
+ * the interrupt context.
+ */
+#ifndef __this_cpu_read
+# ifndef __this_cpu_read_1
+# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_2
+# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_4
+# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# ifndef __this_cpu_read_8
+# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
+# endif
+# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
+#endif
+
+#define __this_cpu_generic_to_op(pcp, val, op) \
+do { \
+ *__this_cpu_ptr(&(pcp)) op val; \
+} while (0)
+
+#ifndef __this_cpu_write
+# ifndef __this_cpu_write_1
+# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_2
+# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_4
+# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# ifndef __this_cpu_write_8
+# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
+# endif
+# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_add
+# ifndef __this_cpu_add_1
+# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_2
+# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_4
+# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef __this_cpu_add_8
+# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_sub
+# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
+#endif
+
+#ifndef __this_cpu_inc
+# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
+#endif
+
+#ifndef __this_cpu_dec
+# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
+#endif
+
+#ifndef __this_cpu_and
+# ifndef __this_cpu_and_1
+# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_2
+# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_4
+# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef __this_cpu_and_8
+# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_or
+# ifndef __this_cpu_or_1
+# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_2
+# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_4
+# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef __this_cpu_or_8
+# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
+#endif
+
+#ifndef __this_cpu_xor
+# ifndef __this_cpu_xor_1
+# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_2
+# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_4
+# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef __this_cpu_xor_8
+# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
+#endif
+
+/*
+ * IRQ safe versions of the per cpu RMW operations. Note that these operations
+ * are *not* safe against modification of the same variable from another
+ * processors (which one gets when using regular atomic operations)
+ . They are guaranteed to be atomic vs. local interrupts and
+ * preemption only.
+ */
+#define irqsafe_cpu_generic_to_op(pcp, val, op) \
+do { \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ *__this_cpu_ptr(&(pcp)) op val; \
+ local_irq_restore(flags); \
+} while (0)
+
+#ifndef irqsafe_cpu_add
+# ifndef irqsafe_cpu_add_1
+# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_2
+# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_4
+# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# ifndef irqsafe_cpu_add_8
+# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
+# endif
+# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
+#endif
+
+#ifndef irqsafe_cpu_sub
+# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
+#endif
+
+#ifndef irqsafe_cpu_inc
+# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_dec
+# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
+#endif
+
+#ifndef irqsafe_cpu_and
+# ifndef irqsafe_cpu_and_1
+# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_2
+# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_4
+# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# ifndef irqsafe_cpu_and_8
+# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
+# endif
+# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
+#endif
+
+#ifndef irqsafe_cpu_or
+# ifndef irqsafe_cpu_or_1
+# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_2
+# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_4
+# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# ifndef irqsafe_cpu_or_8
+# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
+# endif
+# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
+#endif
+
+#ifndef irqsafe_cpu_xor
+# ifndef irqsafe_cpu_xor_1
+# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_2
+# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_4
+# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# ifndef irqsafe_cpu_xor_8
+# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
+# endif
+# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
+#endif
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 43adbd7f001..da7bdc23f27 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -18,10 +18,6 @@
#include <linux/ioctl.h>
#include <asm/byteorder.h>
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include <asm/hw_breakpoint.h>
-#endif
-
/*
* User-space ABI bits:
*/
@@ -215,12 +211,12 @@ struct perf_event_attr {
__u32 wakeup_watermark; /* bytes before wakeup */
};
- union {
- struct { /* Hardware breakpoint info */
- __u64 bp_addr;
- __u32 bp_type;
- __u32 bp_len;
- };
+ struct { /* Hardware breakpoint info */
+ __u64 bp_addr;
+ __u32 bp_type;
+ __u32 bp_len;
+ __u64 __bp_reserved_1;
+ __u64 __bp_reserved_2;
};
__u32 __reserved_2;
@@ -451,6 +447,10 @@ enum perf_callchain_context {
# include <asm/perf_event.h>
#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <asm/hw_breakpoint.h>
+#endif
+
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
@@ -565,10 +565,12 @@ struct perf_pending_entry {
void (*func)(struct perf_pending_entry *);
};
-typedef void (*perf_callback_t)(struct perf_event *, void *);
-
struct perf_sample_data;
+typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
+ struct perf_sample_data *,
+ struct pt_regs *regs);
+
/**
* struct perf_event - performance event kernel representation:
*/
@@ -660,18 +662,12 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
- void (*overflow_handler)(struct perf_event *event,
- int nmi, struct perf_sample_data *data,
- struct pt_regs *regs);
+ perf_overflow_handler_t overflow_handler;
#ifdef CONFIG_EVENT_PROFILE
struct event_filter *filter;
#endif
- perf_callback_t callback;
-
- perf_callback_t event_callback;
-
#endif /* CONFIG_PERF_EVENTS */
};
@@ -685,7 +681,7 @@ struct perf_event_context {
* Protect the states of the events in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ raw_spinlock_t lock;
/*
* Protect the list of events. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
@@ -781,7 +777,7 @@ extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
int cpu,
pid_t pid,
- perf_callback_t callback);
+ perf_overflow_handler_t callback);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
@@ -876,6 +872,8 @@ extern void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
+extern void perf_event_enable(struct perf_event *event);
+extern void perf_event_disable(struct perf_event *event);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -906,7 +904,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
static inline void perf_swevent_put_recursion_context(int rctx) { }
-
+static inline void perf_event_enable(struct perf_event *event) { }
+static inline void perf_event_disable(struct perf_event *event) { }
#endif
#define perf_output_put(handle, x) \
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6a..8227f717c70 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
struct list_head prio_list;
struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST
- spinlock_t *lock;
+ raw_spinlock_t *rawlock;
+ spinlock_t *spinlock;
#endif
};
@@ -91,9 +92,11 @@ struct plist_node {
};
#ifdef CONFIG_DEBUG_PI_LIST
-# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
#else
# define PLIST_HEAD_LOCK_INIT(_lock)
+# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
#endif
#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- _PLIST_HEAD_INIT(head), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
/**
+ * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ * @_lock: lock to initialize for this list
+ */
+#define PLIST_HEAD_INIT_RAW(head, _lock) \
+{ \
+ _PLIST_HEAD_INIT(head), \
+ PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
+}
+
+/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = { _PLIST_HEAD_INIT((node).plist) }, \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
- * @lock: list spinlock, remembered for debugging
+ * @lock: spinlock protecting the list (debugging)
*/
static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST
- head->lock = lock;
+ head->spinlock = lock;
+ head->rawlock = NULL;
+#endif
+}
+
+/**
+ * plist_head_init_raw - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ * @lock: raw_spinlock protecting the list (debugging)
+ */
+static inline void
+plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->rawlock = lock;
+ head->spinlock = NULL;
#endif
}
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0d65934246a..198b8f9fe05 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -219,7 +219,7 @@ struct dev_pm_ops {
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-struct dev_pm_ops name = { \
+const struct dev_pm_ops name = { \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 7456d7d87a1..56f2d63a5cb 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -105,12 +105,7 @@ static inline int ptrace_reparented(struct task_struct *child)
{
return child->real_parent != child->parent;
}
-static inline void ptrace_link(struct task_struct *child,
- struct task_struct *new_parent)
-{
- if (unlikely(child->ptrace))
- __ptrace_link(child, new_parent);
-}
+
static inline void ptrace_unlink(struct task_struct *child)
{
if (unlikely(child->ptrace))
@@ -169,9 +164,9 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
INIT_LIST_HEAD(&child->ptraced);
child->parent = child->real_parent;
child->ptrace = 0;
- if (unlikely(ptrace)) {
+ if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
child->ptrace = current->ptrace;
- ptrace_link(child, current->parent);
+ __ptrace_link(child, current->parent);
}
}
@@ -278,6 +273,18 @@ static inline void user_enable_block_step(struct task_struct *task)
}
#endif /* arch_has_block_step */
+#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
+extern void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info);
+#else
+static inline void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+}
+#endif
+
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index d92480f8285..1cbbd2c11aa 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -78,6 +78,25 @@ struct raid6_calls {
/* Selected algorithm */
extern struct raid6_calls raid6_call;
+/* Various routine sets */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_calls raid6_intx16;
+extern const struct raid6_calls raid6_intx32;
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
int raid6_select_algo(void);
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index a05b4a20768..c96c1858fe2 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -2051,25 +2051,12 @@ void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
struct treepath *path, struct reiserfs_dir_entry *de);
struct dentry *reiserfs_get_parent(struct dentry *);
-/* procfs.c */
-
-#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
-#define REISERFS_PROC_INFO
-#else
-#undef REISERFS_PROC_INFO
-#endif
+#ifdef CONFIG_REISERFS_PROC_INFO
int reiserfs_proc_info_init(struct super_block *sb);
int reiserfs_proc_info_done(struct super_block *sb);
-struct proc_dir_entry *reiserfs_proc_register_global(char *name,
- read_proc_t * func);
-void reiserfs_proc_unregister_global(const char *name);
int reiserfs_proc_info_global_init(void);
int reiserfs_proc_info_global_done(void);
-int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
- int count, int *eof, void *data);
-
-#if defined( REISERFS_PROC_INFO )
#define PROC_EXP( e ) e
@@ -2084,6 +2071,26 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \
PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
#else
+static inline int reiserfs_proc_info_init(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_done(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_init(void)
+{
+ return 0;
+}
+
+static inline int reiserfs_proc_info_global_done(void)
+{
+ return 0;
+}
+
#define PROC_EXP( e )
#define VOID_V ( ( void ) 0 )
#define PROC_INFO_MAX( sb, field, value ) VOID_V
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index cb0ba703260..b019ae64e2a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,9 @@
*/
struct anon_vma {
spinlock_t lock; /* Serialize access to vma list */
+#ifdef CONFIG_KSM
+ atomic_t ksm_refcount;
+#endif
/*
* NOTE: the LSB of the head.next is set by
* mm_take_all_locks() _after_ taking the above lock. So the
@@ -38,6 +41,34 @@ struct anon_vma {
};
#ifdef CONFIG_MMU
+#ifdef CONFIG_KSM
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+ atomic_set(&anon_vma->ksm_refcount, 0);
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return atomic_read(&anon_vma->ksm_refcount);
+}
+#else
+static inline void ksm_refcount_init(struct anon_vma *anon_vma)
+{
+}
+
+static inline int ksm_refcount(struct anon_vma *anon_vma)
+{
+ return 0;
+}
+#endif /* CONFIG_KSM */
+
+static inline struct anon_vma *page_anon_vma(struct page *page)
+{
+ if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
+ PAGE_MAPPING_ANON)
+ return NULL;
+ return page_rmapping(page);
+}
static inline void anon_vma_lock(struct vm_area_struct *vma)
{
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
void anon_vma_unlink(struct vm_area_struct *);
void anon_vma_link(struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
+void anon_vma_free(struct anon_vma *);
/*
* rmap interfaces called when adding or removing pte of page
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page)
*/
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *cnt, unsigned long *vm_flags);
+int page_referenced_one(struct page *, struct vm_area_struct *,
+ unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+
enum ttu_flags {
TTU_UNMAP = 0, /* unmap mode */
TTU_MIGRATION = 1, /* migration mode */
@@ -94,6 +129,8 @@ enum ttu_flags {
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
int try_to_unmap(struct page *, enum ttu_flags flags);
+int try_to_unmap_one(struct page *, struct vm_area_struct *,
+ unsigned long address, enum ttu_flags flags);
/*
* Called from mm/filemap_xip.c to unmap empty zero page
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+/*
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+ struct vm_area_struct *, unsigned long, void *), void *arg);
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d53..281d8fd775e 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
* @owner: the mutex owner
*/
struct rt_mutex {
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 00000000000..71e0b00b6f2
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void do_raw_read_lock(rwlock_t *lock);
+#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
+ extern int do_raw_read_trylock(rwlock_t *lock);
+ extern void do_raw_read_unlock(rwlock_t *lock);
+ extern void do_raw_write_lock(rwlock_t *lock);
+#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
+ extern int do_raw_write_trylock(rwlock_t *lock);
+ extern void do_raw_write_unlock(rwlock_t *lock);
+#else
+# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
+# define do_raw_read_lock_flags(lock, flags) \
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
+# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
+# define do_raw_write_lock_flags(lock, flags) \
+ arch_write_lock_flags(&(lock)->raw_lock, *(flags))
+# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+
+#define write_lock(lock) _raw_write_lock(lock)
+#define read_lock(lock) _raw_read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _raw_read_lock_irq(lock)
+#define read_lock_bh(lock) _raw_read_lock_bh(lock)
+#define write_lock_irq(lock) _raw_write_lock_irq(lock)
+#define write_lock_bh(lock) _raw_write_lock_bh(lock)
+#define read_unlock(lock) _raw_read_unlock(lock)
+#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
+#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 00000000000..9c9f0495d37
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc
+_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _raw_read_lock(lock) __raw_read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _raw_write_lock(lock) __raw_write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _raw_read_trylock(lock) __raw_read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _raw_write_trylock(lock) __raw_write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _raw_read_unlock(lock) __raw_read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _raw_write_unlock(lock) __raw_write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __raw_read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __raw_write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __raw_write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __raw_read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
+ do_raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline void __raw_read_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
+ do_raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock_bh(rwlock_t *lock)
+{
+ local_bh_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __raw_write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void
+__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable_no_resched();
+ local_bh_enable_ip((unsigned long)__builtin_return_address(0));
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 00000000000..bd31808c7d8
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ arch_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
+ * deprecated.
+ *
+ * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6c261..bdfcc252797 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
-
-static inline int rwsem_is_locked(struct rw_semaphore *sem)
-{
- return (sem->activity != 0);
-}
+extern int rwsem_is_locked(struct rw_semaphore *sem);
#endif /* __KERNEL__ */
#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89115ec7d43..244c287a5ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1102,7 +1102,7 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
- void (*task_new) (struct rq *rq, struct task_struct *p);
+ void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
@@ -1111,7 +1111,8 @@ struct sched_class {
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running);
- unsigned int (*get_rr_interval) (struct task_struct *task);
+ unsigned int (*get_rr_interval) (struct rq *rq,
+ struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*moved_group) (struct task_struct *p);
@@ -1151,8 +1152,6 @@ struct sched_entity {
u64 start_runtime;
u64 avg_wakeup;
- u64 avg_running;
-
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
@@ -1175,7 +1174,6 @@ struct sched_entity {
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
- u64 nr_forced2_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
@@ -1411,7 +1409,7 @@ struct task_struct {
#endif
/* Protection of the PI data structures: */
- spinlock_t pi_lock;
+ raw_spinlock_t pi_lock;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
@@ -1448,8 +1446,10 @@ struct task_struct {
gfp_t lockdep_reclaim_gfp;
#endif
+#ifdef CONFIG_FS_JOURNAL_INFO
/* journalling filesystem info */
void *journal_info;
+#endif
/* stacked block device info */
struct bio *bio_list, **bio_tail;
@@ -1544,6 +1544,14 @@ struct task_struct {
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
unsigned long stack_start;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+ struct memcg_batch_info {
+ int do_batch; /* incremented when batch uncharge started */
+ struct mem_cgroup *memcg; /* target memcg of uncharge */
+ unsigned long bytes; /* uncharged usage */
+ unsigned long memsw_bytes; /* uncharged mem+swap usage */
+ } memcg_batch;
+#endif
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1840,7 +1848,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
extern int sched_clock_stable;
#endif
-extern unsigned long long sched_clock(void);
+/* ftrace calls sched_clock() directly */
+extern unsigned long long notrace sched_clock(void);
extern void sched_clock_init(void);
extern u64 sched_clock_cpu(int cpu);
@@ -1903,14 +1912,22 @@ extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
extern unsigned int sysctl_sched_child_runs_first;
+
+enum sched_tunable_scaling {
+ SCHED_TUNABLESCALING_NONE,
+ SCHED_TUNABLESCALING_LOG,
+ SCHED_TUNABLESCALING_LINEAR,
+ SCHED_TUNABLESCALING_END,
+};
+extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+
#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration;
-int sched_nr_latency_handler(struct ctl_table *table, int write,
+int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
#endif
@@ -2066,7 +2083,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t);
extern int do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
-extern void force_sig_specific(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern void zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
@@ -2085,11 +2101,6 @@ static inline int kill_cad_pid(int sig, int priv)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
#define SEND_SIG_FORCED ((struct siginfo *) 2)
-static inline int is_si_special(const struct siginfo *info)
-{
- return info <= SEND_SIG_FORCED;
-}
-
/*
* True if we are on the alternate signal stack.
*/
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 1b191c176bc..8a4adbef8a0 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -86,6 +86,7 @@ struct task_struct;
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ struct list_head sem_pending; /* pending single-sop operations */
};
/* One sem_array data structure for each set of semaphores in the system. */
@@ -96,11 +97,13 @@ struct sem_array {
struct sem *sem_base; /* ptr to first semaphore in array */
struct list_head sem_pending; /* pending operations to be processed */
struct list_head list_id; /* undo requests on this array */
- unsigned long sem_nsems; /* no. of semaphores in array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
};
/* One queue for each sleeping process in the system. */
struct sem_queue {
+ struct list_head simple_list; /* queue of pending operations */
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 850d057500d..ca6b2b31799 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
@@ -166,7 +166,7 @@ found:
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5ad70a60fd7..1e14beb23f9 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else
static __always_inline void *
@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node);
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
index d53642d2d89..67ed2c54283 100644
--- a/include/linux/sm501-regs.h
+++ b/include/linux/sm501-regs.h
@@ -31,6 +31,8 @@
#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
+#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19)
+
/* miscellaneous control */
#define SM501_MISC_CONTROL (0x000004)
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
new file mode 100644
index 00000000000..2e8db3d2d2e
--- /dev/null
+++ b/include/linux/spi/sh_msiof.h
@@ -0,0 +1,10 @@
+#ifndef __SPI_SH_MSIOF_H__
+#define __SPI_SH_MSIOF_H__
+
+struct sh_msiof_spi_info {
+ int tx_fifo_override;
+ int rx_fifo_override;
+ u16 num_chipselect;
+};
+
+#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
new file mode 100644
index 00000000000..6f17278810b
--- /dev/null
+++ b/include/linux/spi/xilinx_spi.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_SPI_XILINX_SPI_H
+#define __LINUX_SPI_XILINX_SPI_H
+
+/**
+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
+ * @num_chipselect: Number of chip select by the IP.
+ * @little_endian: If registers should be accessed little endian or not.
+ * @bits_per_word: Number of bits per word.
+ * @devices: Devices to add when the driver is probed.
+ * @num_devices: Number of devices in the devices array.
+ */
+struct xspi_platform_data {
+ u16 num_chipselect;
+ bool little_endian;
+ u8 bits_per_word;
+ struct spi_board_info *devices;
+ u8 num_devices;
+};
+
+#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d8..86088213334 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
*
* on SMP builds:
*
- * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
* linux/spinlock_types.h:
* defines the generic type and initializers
*
- * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
* defines the generic type and initializers
*
* linux/spinlock_up.h:
- * contains the __raw_spin_*()/etc. version of UP
+ * contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
@@ -75,12 +75,12 @@
#define __lockfunc __attribute__((section(".spinlock.text")))
/*
- * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
*/
#include <linux/spinlock_types.h>
/*
- * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
*/
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define spin_lock_init(lock) \
+ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __spin_lock_init((lock), #lock, &__key); \
+ __raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
-# define spin_lock_init(lock) \
- do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
+# define raw_spin_lock_init(lock) \
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- extern void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define rwlock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key); \
-} while (0)
-#else
-# define rwlock_init(lock) \
- do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
-#endif
-
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
-#define spin_is_contended(lock) ((lock)->break_lock)
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
#else
-#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#ifdef arch_spin_is_contended
+#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
-#define spin_is_contended(lock) (((void)(lock), 0))
-#endif /*__raw_spin_is_contended*/
+#define raw_spin_is_contended(lock) (((void)(lock), 0))
+#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
- * spin_unlock_wait - wait until the spinlock gets unlocked
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
- extern void _raw_spin_lock(spinlock_t *lock);
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
- extern int _raw_spin_trylock(spinlock_t *lock);
- extern void _raw_spin_unlock(spinlock_t *lock);
- extern void _raw_read_lock(rwlock_t *lock);
-#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
- extern int _raw_read_trylock(rwlock_t *lock);
- extern void _raw_read_unlock(rwlock_t *lock);
- extern void _raw_write_lock(rwlock_t *lock);
-#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
- extern int _raw_write_trylock(rwlock_t *lock);
- extern void _raw_write_unlock(rwlock_t *lock);
+ extern void do_raw_spin_lock(raw_spinlock_t *lock);
+#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
-# define _raw_spin_lock_flags(lock, flags) \
- __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
-# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
-# define _raw_read_lock_flags(lock, flags) \
- __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
-# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
-# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
-# define _raw_write_lock_flags(lock, flags) \
- __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
-# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+static inline void do_raw_spin_lock(raw_spinlock_t *lock)
+{
+ arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
+{
+ arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ arch_spin_unlock(&lock->raw_lock);
+}
#endif
-#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
-#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
-
/*
- * Define the various spin_lock and rw_lock methods. Note we define these
- * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
- * methods are defined as nops in the case they are not required.
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
*/
-#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
-#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
-#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
-#define spin_lock(lock) _spin_lock(lock)
+#define raw_spin_lock(lock) _raw_spin_lock(lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
-# define spin_lock_nest_lock(lock, nest_lock) \
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock_nested(lock, subclass)
+
+# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
- _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else
-# define spin_lock_nested(lock, subclass) _spin_lock(lock)
-# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
+# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
#endif
-#define write_lock(lock) _write_lock(lock)
-#define read_lock(lock) _read_lock(lock)
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define spin_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _read_lock_irqsave(lock); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- flags = _write_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave_nested(lock, subclass); \
+ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
} while (0)
#else
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
do { \
typecheck(unsigned long, flags); \
- flags = _spin_lock_irqsave(lock); \
+ flags = _raw_spin_lock_irqsave(lock); \
} while (0)
#endif
#else
-#define spin_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_lock_irqsave(lock, flags); \
- } while (0)
-#define read_lock_irqsave(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_lock_irqsave(lock, flags); \
- } while (0)
-#define write_lock_irqsave(lock, flags) \
+#define raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
- _write_lock_irqsave(lock, flags); \
+ _raw_spin_lock_irqsave(lock, flags); \
} while (0)
-#define spin_lock_irqsave_nested(lock, flags, subclass) \
- spin_lock_irqsave(lock, flags)
-#endif
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ raw_spin_lock_irqsave(lock, flags)
-#define spin_lock_irq(lock) _spin_lock_irq(lock)
-#define spin_lock_bh(lock) _spin_lock_bh(lock)
-#define read_lock_irq(lock) _read_lock_irq(lock)
-#define read_lock_bh(lock) _read_lock_bh(lock)
-#define write_lock_irq(lock) _write_lock_irq(lock)
-#define write_lock_bh(lock) _write_lock_bh(lock)
-#define spin_unlock(lock) _spin_unlock(lock)
-#define read_unlock(lock) _read_unlock(lock)
-#define write_unlock(lock) _write_unlock(lock)
-#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
-#define read_unlock_irq(lock) _read_unlock_irq(lock)
-#define write_unlock_irq(lock) _write_unlock_irq(lock)
-
-#define spin_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _spin_unlock_irqrestore(lock, flags); \
- } while (0)
-#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+#endif
-#define read_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _read_unlock_irqrestore(lock, flags); \
- } while (0)
-#define read_unlock_bh(lock) _read_unlock_bh(lock)
+#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
-#define write_unlock_irqrestore(lock, flags) \
- do { \
- typecheck(unsigned long, flags); \
- _write_unlock_irqrestore(lock, flags); \
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
-#define write_unlock_bh(lock) _write_unlock_bh(lock)
+#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
-#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock) \
+ __cond_lock(lock, _raw_spin_trylock_bh(lock))
-#define spin_trylock_irq(lock) \
+#define raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
-#define spin_trylock_irqsave(lock, flags) \
+#define raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
- spin_trylock(lock) ? \
+ raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
-#define write_trylock_irqsave(lock, flags) \
-({ \
- local_irq_save(flags); \
- write_trylock(lock) ? \
- 1 : ({ local_irq_restore(flags); 0; }); \
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+ return &lock->rlock;
+}
+
+#define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ raw_spin_lock_init(&(_lock)->rlock); \
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ raw_spin_lock_nested(spinlock_check(lock), subclass); \
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+ return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+ return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return raw_spin_can_lock(&lock->rlock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+ assert_raw_spin_locked(&lock->rlock);
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-/**
- * spin_can_lock - would spin_trylock() succeed?
- * @lock: the spinlock in question.
- */
-#define spin_can_lock(lock) (!spin_is_locked(lock))
-
-/*
- * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
- */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-# include <linux/spinlock_api_smp.h>
-#else
-# include <linux/spinlock_api_up.h>
-#endif
-
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459b..e253ccd7a60 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
int in_lock_functions(unsigned long addr);
-#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
-
-void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
- __acquires(lock);
-void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock);
-void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock);
-void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock);
-void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
- __acquires(lock);
-unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
- __acquires(lock);
-int __lockfunc _spin_trylock(spinlock_t *lock);
-int __lockfunc _read_trylock(rwlock_t *lock);
-int __lockfunc _write_trylock(rwlock_t *lock);
-int __lockfunc _spin_trylock_bh(spinlock_t *lock);
-void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
-void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
-void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
-void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
- __releases(lock);
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
+
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
+
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
#ifdef CONFIG_INLINE_SPIN_LOCK
-#define _spin_lock(lock) __spin_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK
-#define _read_lock(lock) __read_lock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK
-#define _write_lock(lock) __write_lock(lock)
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_BH
-#define _spin_lock_bh(lock) __spin_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_BH
-#define _read_lock_bh(lock) __read_lock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_BH
-#define _write_lock_bh(lock) __write_lock_bh(lock)
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
-#define _spin_lock_irq(lock) __spin_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQ
-#define _read_lock_irq(lock) __read_lock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
-#define _write_lock_irq(lock) __write_lock_irq(lock)
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
-#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
-#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
-#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK
-#define _spin_trylock(lock) __spin_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_TRYLOCK
-#define _read_trylock(lock) __read_trylock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_TRYLOCK
-#define _write_trylock(lock) __write_trylock(lock)
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
-#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK
-#define _spin_unlock(lock) __spin_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK
-#define _read_unlock(lock) __read_unlock(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK
-#define _write_unlock(lock) __write_unlock(lock)
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
-#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_BH
-#define _read_unlock_bh(lock) __read_unlock_bh(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
-#define _write_unlock_bh(lock) __write_unlock_bh(lock)
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
-#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
-#define _read_unlock_irq(lock) __read_unlock_irq(lock)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
-#define _write_unlock_irq(lock) __write_unlock_irq(lock)
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif
#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
-#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
-#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
-#endif
-
-#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
-#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif
-static inline int __spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
return 0;
}
-static inline int __read_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_read_trylock(lock)) {
- rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
-static inline int __write_trylock(rwlock_t *lock)
-{
- preempt_disable();
- if (_raw_write_trylock(lock)) {
- rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return 1;
- }
- preempt_enable();
- return 0;
-}
-
/*
* If lockdep is enabled then we use the non-preemption spin-ops
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-static inline void __read_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
/*
* On lockdep we dont want the hand-coded irq-enable of
- * _raw_spin_lock_flags() code, because lockdep assumes
+ * do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
#ifdef CONFIG_LOCKDEP
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
- _raw_spin_lock_flags(lock, &flags);
+ do_raw_spin_lock_flags(lock, &flags);
#endif
return flags;
}
-static inline void __spin_lock_irq(spinlock_t *lock)
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
local_irq_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __spin_lock_bh(spinlock_t *lock)
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
- _raw_read_lock_flags, &flags);
- return flags;
-}
-
-static inline void __read_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline void __read_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
-}
-
-static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
- _raw_write_lock_flags, &flags);
- return flags;
-}
-
-static inline void __write_lock_irq(rwlock_t *lock)
-{
- local_irq_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
-static inline void __write_lock_bh(rwlock_t *lock)
-{
- local_bh_disable();
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
-}
-
-static inline void __spin_lock(spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
-}
-
-static inline void __write_lock(rwlock_t *lock)
-{
- preempt_disable();
- rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
}
#endif /* CONFIG_PREEMPT */
-static inline void __spin_unlock(spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable();
-}
-
-static inline void __write_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable();
-}
-
-static inline void __read_unlock(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable();
}
-static inline void __spin_unlock_irqrestore(spinlock_t *lock,
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
}
-static inline void __spin_unlock_irq(spinlock_t *lock)
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
+ do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
}
-static inline void __spin_unlock_bh(spinlock_t *lock)
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
spin_release(&lock->dep_map, 1, _RET_IP_);
- _raw_spin_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __read_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __read_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_read_unlock(lock);
+ do_raw_spin_unlock(lock);
preempt_enable_no_resched();
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
-static inline void __write_unlock_irqrestore(rwlock_t *lock,
- unsigned long flags)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_restore(flags);
- preempt_enable();
-}
-
-static inline void __write_unlock_irq(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- local_irq_enable();
- preempt_enable();
-}
-
-static inline void __write_unlock_bh(rwlock_t *lock)
-{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
- _raw_write_unlock(lock);
- preempt_enable_no_resched();
- local_bh_enable_ip((unsigned long)__builtin_return_address(0));
-}
-
-static inline int __spin_trylock_bh(spinlock_t *lock)
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
preempt_disable();
- if (_raw_spin_trylock(lock)) {
+ if (do_raw_spin_trylock(lock)) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
return 1;
}
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
return 0;
}
+#include <linux/rwlock_api_smp.h>
+
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d316457..af1f47229e7 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
#define in_lock_functions(ADDR) 0
-#define assert_spin_locked(lock) do { (void)(lock); } while (0)
+#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
/*
* In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
do { preempt_enable(); __release(lock); (void)(lock); } while (0)
#define __UNLOCK_BH(lock) \
- do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0)
+ do { preempt_enable_no_resched(); local_bh_enable(); \
+ __release(lock); (void)(lock); } while (0)
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
-#define _spin_lock(lock) __LOCK(lock)
-#define _spin_lock_nested(lock, subclass) __LOCK(lock)
-#define _read_lock(lock) __LOCK(lock)
-#define _write_lock(lock) __LOCK(lock)
-#define _spin_lock_bh(lock) __LOCK_BH(lock)
-#define _read_lock_bh(lock) __LOCK_BH(lock)
-#define _write_lock_bh(lock) __LOCK_BH(lock)
-#define _spin_lock_irq(lock) __LOCK_IRQ(lock)
-#define _read_lock_irq(lock) __LOCK_IRQ(lock)
-#define _write_lock_irq(lock) __LOCK_IRQ(lock)
-#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
-#define _spin_trylock(lock) ({ __LOCK(lock); 1; })
-#define _read_trylock(lock) ({ __LOCK(lock); 1; })
-#define _write_trylock(lock) ({ __LOCK(lock); 1; })
-#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
-#define _spin_unlock(lock) __UNLOCK(lock)
-#define _read_unlock(lock) __UNLOCK(lock)
-#define _write_unlock(lock) __UNLOCK(lock)
-#define _spin_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _write_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _read_unlock_bh(lock) __UNLOCK_BH(lock)
-#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock)
-#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
-#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_spin_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_write_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a..851b7783720 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
#include <linux/lockdep.h>
-typedef struct {
- raw_spinlock_t raw_lock;
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
#endif
@@ -29,26 +29,10 @@ typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-} spinlock_t;
+} raw_spinlock_t;
#define SPINLOCK_MAGIC 0xdead4ead
-typedef struct {
- raw_rwlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
- unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} rwlock_t;
-
-#define RWLOCK_MAGIC 0xdeaf1eed
-
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
# define SPIN_DEP_MAP_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
#else
-# define RW_DEP_MAP_INIT(lockname)
+# define SPIN_DEBUG_INIT(lockname)
#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- .magic = SPINLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- .magic = RWLOCK_MAGIC, \
- .owner = SPINLOCK_OWNER_INIT, \
- .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
-#else
-# define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- SPIN_DEP_MAP_INIT(lockname) }
-#define __RW_LOCK_UNLOCKED(lockname) \
- (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
/*
- * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
- * are hence deprecated.
- * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
- * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
+ * deprecated.
+ * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
+ * appropriate.
*/
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
-#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198..c09b6407ae1 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
typedef struct {
volatile unsigned int slock;
-} raw_spinlock_t;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
-typedef struct { } raw_spinlock_t;
+typedef struct { } arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { }
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
#endif
typedef struct {
/* no debug version on UP */
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { }
+#define __ARCH_RW_LOCK_UNLOCKED { }
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215..b14f6a91e19 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
*/
#ifdef CONFIG_DEBUG_SPINLOCK
-#define __raw_spin_is_locked(x) ((x)->slock == 0)
+#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
local_irq_save(flags);
lock->slock = 0;
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
char oldval = lock->slock;
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
return oldval > 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
lock->slock = 1;
}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
/*
* Read-write spinlocks. No debug version.
*/
-#define __raw_read_lock(lock) do { (void)(lock); } while (0)
-#define __raw_write_lock(lock) do { (void)(lock); } while (0)
-#define __raw_read_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_write_trylock(lock) ({ (void)(lock); 1; })
-#define __raw_read_unlock(lock) do { (void)(lock); } while (0)
-#define __raw_write_unlock(lock) do { (void)(lock); } while (0)
+#define arch_read_lock(lock) do { (void)(lock); } while (0)
+#define arch_write_lock(lock) do { (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ (void)(lock); 1; })
+#define arch_read_unlock(lock) do { (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { (void)(lock); } while (0)
#else /* DEBUG_SPINLOCK */
-#define __raw_spin_is_locked(lock) ((void)(lock), 0)
+#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
-# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
-# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
-# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
+# define arch_spin_lock(lock) do { (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
-#define __raw_spin_is_contended(lock) (((void)(lock), 0))
+#define arch_spin_is_contended(lock) (((void)(lock), 0))
-#define __raw_read_can_lock(lock) (((void)(lock), 1))
-#define __raw_write_can_lock(lock) (((void)(lock), 1))
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
-#define __raw_spin_unlock_wait(lock) \
- do { cpu_relax(); } while (__raw_spin_is_locked(lock))
+#define arch_spin_unlock_wait(lock) \
+ do { cpu_relax(); } while (arch_spin_is_locked(lock))
#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index b8508868d5a..651839a2a75 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,7 +62,15 @@ extern char * strnchr(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRRCHR
extern char * strrchr(const char *,int);
#endif
-extern char * __must_check strstrip(char *);
+extern char * __must_check skip_spaces(const char *);
+
+extern char *strim(char *);
+
+static inline __must_check char *strstrip(char *str)
+{
+ return strim(str);
+}
+
#ifndef __HAVE_ARCH_STRSTR
extern char * strstr(const char *,const char *);
#endif
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 401097781fc..1906782ec86 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,12 +130,14 @@ struct rpc_task_setup {
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
+#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4ec90019c1a..a2602a8207a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -145,38 +145,43 @@ enum {
SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
+ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
/* add others here before... */
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32
-#define SWAP_MAP_MAX 0x7ffe
-#define SWAP_MAP_BAD 0x7fff
-#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */
-#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE)
+#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
+#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
+#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
+#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+
/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
- unsigned long flags;
- int prio; /* swap priority */
- int next; /* next entry on swap list */
- struct file *swap_file;
- struct block_device *bdev;
- struct list_head extent_list;
- struct swap_extent *curr_swap_extent;
- unsigned short *swap_map;
- unsigned int lowest_bit;
- unsigned int highest_bit;
+ unsigned long flags; /* SWP_USED etc: see above */
+ signed short prio; /* swap priority of this type */
+ signed char type; /* strange name for an index */
+ signed char next; /* next type on the swap list */
+ unsigned int max; /* extent of the swap_map */
+ unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned int lowest_bit; /* index of first free in swap_map */
+ unsigned int highest_bit; /* index of last free in swap_map */
+ unsigned int pages; /* total of usable pages of swap */
+ unsigned int inuse_pages; /* number of those currently in use */
+ unsigned int cluster_next; /* likely index for next allocation */
+ unsigned int cluster_nr; /* countdown to next cluster search */
unsigned int lowest_alloc; /* while preparing discard cluster */
unsigned int highest_alloc; /* while preparing discard cluster */
- unsigned int cluster_next;
- unsigned int cluster_nr;
- unsigned int pages;
- unsigned int max;
- unsigned int inuse_pages;
- unsigned int old_block_size;
+ struct swap_extent *curr_swap_extent;
+ struct swap_extent first_swap_extent;
+ struct block_device *bdev; /* swap device or bdev of swap file */
+ struct file *swap_file; /* seldom referenced */
+ unsigned int old_block_size; /* seldom referenced */
};
struct swap_list_t {
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node);
extern void scan_unevictable_unregister_node(struct node *node);
extern int kswapd_run(int nid);
+extern void kswapd_stop(int nid);
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
@@ -309,17 +315,18 @@ extern long total_swap_pages;
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
-extern void swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
+extern int add_swap_count_continuation(swp_entry_t, gfp_t);
+extern void swap_shmem_alloc(swp_entry_t);
+extern int swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free(swp_entry_t, struct page *page);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void)
#define free_swap_and_cache(swp) is_migration_entry(swp)
#define swapcache_prepare(swp) is_migration_entry(swp)
-static inline void swap_duplicate(swp_entry_t swp)
+static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
+ return 0;
+}
+
+static inline void swap_shmem_alloc(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
}
static inline void swap_free(swp_entry_t swp)
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h
new file mode 100644
index 00000000000..ce456eaae86
--- /dev/null
+++ b/include/linux/timb_gpio.h
@@ -0,0 +1,37 @@
+/*
+ * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_TIMB_GPIO_H
+#define _LINUX_TIMB_GPIO_H
+
+/**
+ * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
+ * @gpio_base The number of the first GPIO pin, set to -1 for
+ * dynamic number allocation.
+ * @nr_pins Number of pins that is supported by the hardware (1-32)
+ * @irq_base If IRQ is supported by the hardware, this is the base
+ * number of IRQ:s. One IRQ per pin will be used. Set to
+ * -1 if IRQ:s is not supported.
+ */
+struct timbgpio_platform_data {
+ int gpio_base;
+ int nr_pins;
+ int irq_base;
+};
+
+#endif
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 09077f6ed12..5cf397ceb72 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -14,6 +14,7 @@ struct trace_seq {
unsigned char buffer[PAGE_SIZE];
unsigned int len;
unsigned int readpos;
+ int full;
};
static inline void
@@ -21,6 +22,7 @@ trace_seq_init(struct trace_seq *s)
{
s->len = 0;
s->readpos = 0;
+ s->full = 0;
}
/*
@@ -33,7 +35,7 @@ extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
__attribute__ ((format (printf, 2, 0)));
extern int
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
-extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
+extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt);
extern int trace_seq_puts(struct trace_seq *s, const char *str);
@@ -55,8 +57,9 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
return 0;
}
-static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
{
+ return 0;
}
static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt)
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 1eb44a924e5..10db0102a89 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -134,6 +134,13 @@ static inline __must_check int tracehook_report_syscall_entry(
*/
static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
{
+ if (step) {
+ siginfo_t info;
+ user_single_step_siginfo(current, regs, &info);
+ force_sig_info(SIGTRAP, &info, current);
+ return;
+ }
+
ptrace_report_syscall(regs);
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 405a9035fe4..ef3a2947b10 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *);
extern struct ktermios tty_std_termios;
-extern int kmsg_redirect;
-
extern void console_init(void);
extern int vcs_init(void);
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index b2a7d8ba6ee..15591d2ea40 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -128,6 +128,29 @@ struct usbdevfs_hub_portinfo {
#ifdef __KERNEL__
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
+
+struct usbdevfs_ctrltransfer32 {
+ u8 bRequestType;
+ u8 bRequest;
+ u16 wValue;
+ u16 wIndex;
+ u16 wLength;
+ u32 timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_bulktransfer32 {
+ compat_uint_t ep;
+ compat_uint_t len;
+ compat_uint_t timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_disconnectsignal32 {
+ compat_int_t signr;
+ compat_caddr_t context;
+};
+
struct usbdevfs_urb32 {
unsigned char type;
unsigned char endpoint;
@@ -153,7 +176,9 @@ struct usbdevfs_ioctl32 {
#endif /* __KERNEL__ */
#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer)
+#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer)
+#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32)
#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int)
#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface)
#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int)
@@ -166,6 +191,7 @@ struct usbdevfs_ioctl32 {
#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *)
#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32)
#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal)
+#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32)
#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int)
#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int)
#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo)
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 32b92298fd7..d4962a782b8 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -294,6 +294,7 @@ struct v4l2_pix_format {
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
+#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
/* Palette formats */
@@ -329,7 +330,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
-#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */
+#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
+#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
/* 10bit raw bayer DPCM compressed to 8 bits */
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
/*
@@ -732,6 +737,99 @@ struct v4l2_standard {
};
/*
+ * V I D E O T I M I N G S D V P R E S E T
+ */
+struct v4l2_dv_preset {
+ __u32 preset;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T S E N U M E R A T I O N
+ */
+struct v4l2_dv_enum_preset {
+ __u32 index;
+ __u32 preset;
+ __u8 name[32]; /* Name of the preset timing */
+ __u32 width;
+ __u32 height;
+ __u32 reserved[4];
+};
+
+/*
+ * D V P R E S E T V A L U E S
+ */
+#define V4L2_DV_INVALID 0
+#define V4L2_DV_480P59_94 1 /* BT.1362 */
+#define V4L2_DV_576P50 2 /* BT.1362 */
+#define V4L2_DV_720P24 3 /* SMPTE 296M */
+#define V4L2_DV_720P25 4 /* SMPTE 296M */
+#define V4L2_DV_720P30 5 /* SMPTE 296M */
+#define V4L2_DV_720P50 6 /* SMPTE 296M */
+#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
+#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
+#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
+#define V4L2_DV_1080I25 11 /* BT.1120 */
+#define V4L2_DV_1080I50 12 /* SMPTE 296M */
+#define V4L2_DV_1080I60 13 /* SMPTE 296M */
+#define V4L2_DV_1080P24 14 /* SMPTE 296M */
+#define V4L2_DV_1080P25 15 /* SMPTE 296M */
+#define V4L2_DV_1080P30 16 /* SMPTE 296M */
+#define V4L2_DV_1080P50 17 /* BT.1120 */
+#define V4L2_DV_1080P60 18 /* BT.1120 */
+
+/*
+ * D V B T T I M I N G S
+ */
+
+/* BT.656/BT.1120 timing data */
+struct v4l2_bt_timings {
+ __u32 width; /* width in pixels */
+ __u32 height; /* height in lines */
+ __u32 interlaced; /* Interlaced or progressive */
+ __u32 polarities; /* Positive or negative polarity */
+ __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */
+ __u32 hfrontporch; /* Horizpontal front porch in pixels */
+ __u32 hsync; /* Horizontal Sync length in pixels */
+ __u32 hbackporch; /* Horizontal back porch in pixels */
+ __u32 vfrontporch; /* Vertical front porch in pixels */
+ __u32 vsync; /* Vertical Sync length in lines */
+ __u32 vbackporch; /* Vertical back porch in lines */
+ __u32 il_vfrontporch; /* Vertical front porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vsync; /* Vertical sync length for bottom field of
+ * interlaced field formats
+ */
+ __u32 il_vbackporch; /* Vertical back porch for bottom field of
+ * interlaced field formats
+ */
+ __u32 reserved[16];
+} __attribute__ ((packed));
+
+/* Interlaced or progressive format */
+#define V4L2_DV_PROGRESSIVE 0
+#define V4L2_DV_INTERLACED 1
+
+/* Polarities. If bit is not set, it is assumed to be negative polarity */
+#define V4L2_DV_VSYNC_POS_POL 0x00000001
+#define V4L2_DV_HSYNC_POS_POL 0x00000002
+
+
+/* DV timings */
+struct v4l2_dv_timings {
+ __u32 type;
+ union {
+ struct v4l2_bt_timings bt;
+ __u32 reserved[32];
+ };
+} __attribute__ ((packed));
+
+/* Values for the type field */
+#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
+
+/*
* V I D E O I N P U T S
*/
struct v4l2_input {
@@ -742,7 +840,8 @@ struct v4l2_input {
__u32 tuner; /* Associated tuner */
v4l2_std_id std;
__u32 status;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
@@ -773,6 +872,11 @@ struct v4l2_input {
#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
+/* capabilities flags */
+#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* V I D E O O U T P U T S
*/
@@ -783,13 +887,19 @@ struct v4l2_output {
__u32 audioset; /* Associated audios (bitfield) */
__u32 modulator; /* Associated modulator */
v4l2_std_id std;
- __u32 reserved[4];
+ __u32 capabilities;
+ __u32 reserved[3];
};
/* Values for the 'type' field */
#define V4L2_OUTPUT_TYPE_MODULATOR 1
#define V4L2_OUTPUT_TYPE_ANALOG 2
#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
+/* capabilities flags */
+#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
+#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
+#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
+
/*
* C O N T R O L S
*/
@@ -1624,6 +1734,13 @@ struct v4l2_dbg_chip_ident {
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
+#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset)
+#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset)
+#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset)
+#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset)
+#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
+#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a..ee03bba9c5d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+ KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+ KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -76,24 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __get_cpu_var(vm_event_states).event[item]++;
+ __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- get_cpu_var(vm_event_states).event[item]++;
- put_cpu();
+ this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __get_cpu_var(vm_event_states).event[item] += delta;
+ __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- get_cpu_var(vm_event_states).event[item] += delta;
- put_cpu();
+ this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7ffa11f0623..3fb9944e50a 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -84,4 +84,19 @@ struct vt_setactivate {
#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
+#ifdef CONFIG_VT_CONSOLE
+
+extern int vt_kmsg_redirect(int new);
+
+#else
+
+static inline int vt_kmsg_redirect(int new)
+{
+ return 0;
+}
+
+#endif
+
+#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
+
#endif /* _LINUX_VT_H */