summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild4
-rw-r--r--include/linux/acpi.h56
-rw-r--r--include/linux/aer.h5
-rw-r--r--include/linux/aio.h14
-rw-r--r--include/linux/atm.h17
-rw-r--r--include/linux/atmdev.h15
-rw-r--r--include/linux/audit.h32
-rw-r--r--include/linux/auto_dev-ioctl.h157
-rw-r--r--include/linux/auto_fs4.h7
-rw-r--r--include/linux/backing-dev.h13
-rw-r--r--include/linux/bcd.h16
-rw-r--r--include/linux/binfmts.h21
-rw-r--r--include/linux/bio.h65
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/blkdev.h105
-rw-r--r--include/linux/blktrace_api.h146
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/buffer_head.h3
-rw-r--r--include/linux/byteorder/Kbuild1
-rw-r--r--include/linux/byteorder/big_endian.h1
-rw-r--r--include/linux/byteorder/little_endian.h1
-rw-r--r--include/linux/c2port.h65
-rw-r--r--include/linux/can/core.h2
-rw-r--r--include/linux/capability.h25
-rw-r--r--include/linux/cdrom.h10
-rw-r--r--include/linux/cgroup.h35
-rw-r--r--include/linux/cgroup_subsys.h12
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clocksource.h14
-rw-r--r--include/linux/cnt32_to_63.h22
-rw-r--r--include/linux/compat.h8
-rw-r--r--include/linux/compiler.h86
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/cpumask.h559
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crash_dump.h40
-rw-r--r--include/linux/crc32c.h6
-rw-r--r--include/linux/cred.h342
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/dcbnl.h340
-rw-r--r--include/linux/dccp.h42
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/device-mapper.h14
-rw-r--r--include/linux/device.h20
-rw-r--r--include/linux/dm-region-hash.h104
-rw-r--r--include/linux/dma_remapping.h156
-rw-r--r--include/linux/dmar.h1
-rw-r--r--include/linux/dmi.h5
-rw-r--r--include/linux/ds1286.h2
-rw-r--r--include/linux/dvb/frontend.h2
-rw-r--r--include/linux/dynamic_printk.h93
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/etherdevice.h46
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/ext2_fs.h2
-rw-r--r--include/linux/ext3_fs.h6
-rw-r--r--include/linux/ext3_jbd.h14
-rw-r--r--include/linux/fault-inject.h9
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/fddidevice.h1
-rw-r--r--include/linux/file.h4
-rw-r--r--include/linux/filter.h3
-rw-r--r--include/linux/firewire-cdev.h9
-rw-r--r--include/linux/freezer.h40
-rw-r--r--include/linux/fs.h96
-rw-r--r--include/linux/fsl_devices.h17
-rw-r--r--include/linux/fsnotify.h2
-rw-r--r--include/linux/ftrace.h374
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/fuse.h12
-rw-r--r--include/linux/futex.h5
-rw-r--r--include/linux/gameport.h7
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/gpio.h3
-rw-r--r--include/linux/hardirq.h28
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/hid.h7
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hippidevice.h4
-rw-r--r--include/linux/hrtimer.h152
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/i2c-algo-pcf.h5
-rw-r--r--include/linux/i2c-id.h2
-rw-r--r--include/linux/i2c.h163
-rw-r--r--include/linux/i2c/twl4030.h343
-rw-r--r--include/linux/i2o.h292
-rw-r--r--include/linux/i7300_idle.h83
-rw-r--r--include/linux/icmpv6.h6
-rw-r--r--include/linux/ide.h84
-rw-r--r--include/linux/idr.h3
-rw-r--r--include/linux/ieee80211.h212
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_arp.h3
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/in.h4
-rw-r--r--include/linux/init.h12
-rw-r--r--include/linux/init_task.h15
-rw-r--r--include/linux/inotify.h11
-rw-r--r--include/linux/input.h19
-rw-r--r--include/linux/intel-iommu.h363
-rw-r--r--include/linux/interrupt.h29
-rw-r--r--include/linux/io-mapping.h125
-rw-r--r--include/linux/iommu-helper.h3
-rw-r--r--include/linux/ioport.h8
-rw-r--r--include/linux/iova.h52
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h127
-rw-r--r--include/linux/irqnr.h38
-rw-r--r--include/linux/jbd.h9
-rw-r--r--include/linux/jbd2.h13
-rw-r--r--include/linux/jiffies.h10
-rw-r--r--include/linux/journal-head.h2
-rw-r--r--include/linux/kallsyms.h8
-rw-r--r--include/linux/kernel.h134
-rw-r--r--include/linux/kernel_stat.h35
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/key-ui.h66
-rw-r--r--include/linux/key.h32
-rw-r--r--include/linux/keyctl.h4
-rw-r--r--include/linux/kmod.h3
-rw-r--r--include/linux/kprobes.h5
-rw-r--r--include/linux/kvm.h78
-rw-r--r--include/linux/kvm_host.h87
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/lguest_launcher.h6
-rw-r--r--include/linux/libata.h78
-rw-r--r--include/linux/linkage.h10
-rw-r--r--include/linux/list_nulls.h94
-rw-r--r--include/linux/lockd/bind.h1
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h50
-rw-r--r--include/linux/map_to_7segment.h187
-rw-r--r--include/linux/marker.h78
-rw-r--r--include/linux/mdio-gpio.h25
-rw-r--r--include/linux/memcontrol.h34
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/mfd/da903x.h201
-rw-r--r--include/linux/mfd/t7l66xb.h2
-rw-r--r--include/linux/mfd/tc6387xb.h3
-rw-r--r--include/linux/mfd/tc6393xb.h17
-rw-r--r--include/linux/mfd/tmio.h19
-rw-r--r--include/linux/mfd/wm8350/audio.h38
-rw-r--r--include/linux/mfd/wm8350/rtc.h2
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mii.h33
-rw-r--r--include/linux/mlx4/cmd.h9
-rw-r--r--include/linux/mlx4/device.h59
-rw-r--r--include/linux/mm.h30
-rw-r--r--include/linux/mm_inline.h98
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmiotrace.h20
-rw-r--r--include/linux/mmzone.h105
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/module.h24
-rw-r--r--include/linux/moduleparam.h25
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mroute6.h26
-rw-r--r--include/linux/msdos_fs.h281
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mtd/cfi.h31
-rw-r--r--include/linux/mtd/flashchip.h4
-rw-r--r--include/linux/mtd/mtd.h4
-rw-r--r--include/linux/mtd/nand-gpio.h19
-rw-r--r--include/linux/mtd/nand.h1
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/sh_flctl.h125
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/namei.h8
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h406
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_bridge/ebtables.h3
-rw-r--r--include/linux/netfilter_ipv4/ipt_policy.h2
-rw-r--r--include/linux/netfilter_ipv6/ip6t_policy.h2
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/nfs_fs.h44
-rw-r--r--include/linux/nfs_fs_sb.h7
-rw-r--r--include/linux/nfs_mount.h7
-rw-r--r--include/linux/nfs_xdr.h18
-rw-r--r--include/linux/nfsd/state.h2
-rw-r--r--include/linux/nl80211.h223
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/of.h9
-rw-r--r--include/linux/of_gpio.h44
-rw-r--r--include/linux/of_platform.h3
-rw-r--r--include/linux/oprofile.h19
-rw-r--r--include/linux/page-flags.h55
-rw-r--r--include/linux/page_cgroup.h108
-rw-r--r--include/linux/pagemap.h44
-rw-r--r--include/linux/pagevec.h34
-rw-r--r--include/linux/parport.h2
-rw-r--r--include/linux/pci.h52
-rw-r--r--include/linux/pci_hotplug.h11
-rw-r--r--include/linux/pci_ids.h21
-rw-r--r--include/linux/pci_regs.h16
-rw-r--r--include/linux/pfn.h6
-rw-r--r--include/linux/phonet.h1
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/pid.h4
-rw-r--r--include/linux/pid_namespace.h6
-rw-r--r--include/linux/pkt_cls.h14
-rw-r--r--include/linux/pkt_sched.h16
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pnp.h16
-rw-r--r--include/linux/poll.h8
-rw-r--r--include/linux/posix-timers.h10
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/prctl.h7
-rw-r--r--include/linux/proc_fs.h6
-rw-r--r--include/linux/profile.h13
-rw-r--r--include/linux/ptrace.h23
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/raid/linear.h10
-rw-r--r--include/linux/raid/md.h32
-rw-r--r--include/linux/raid/md_k.h8
-rw-r--r--include/linux/random.h51
-rw-r--r--include/linux/ratelimit.h7
-rw-r--r--include/linux/rcuclassic.h2
-rw-r--r--include/linux/rculist_nulls.h110
-rw-r--r--include/linux/rcupdate.h12
-rw-r--r--include/linux/rcutree.h329
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/reiserfs_fs_sb.h1
-rw-r--r--include/linux/resource.h4
-rw-r--r--include/linux/rfkill.h8
-rw-r--r--include/linux/ring_buffer.h140
-rw-r--r--include/linux/rio_drv.h4
-rw-r--r--include/linux/rmap.h29
-rw-r--r--include/linux/rtmutex.h2
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h230
-rw-r--r--include/linux/securebits.h2
-rw-r--r--include/linux/security.h348
-rw-r--r--include/linux/seq_file.h14
-rw-r--r--include/linux/serial_core.h5
-rw-r--r--include/linux/sh_intc.h91
-rw-r--r--include/linux/skbuff.h54
-rw-r--r--include/linux/slab.h43
-rw-r--r--include/linux/smc911x.h1
-rw-r--r--include/linux/smp.h15
-rw-r--r--include/linux/smsc911x.h47
-rw-r--r--include/linux/snmp.h3
-rw-r--r--include/linux/spi/orion_spi.h1
-rw-r--r--include/linux/spi/spi_bitbang.h3
-rw-r--r--include/linux/ssb/ssb.h42
-rw-r--r--include/linux/stacktrace.h8
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h8
-rw-r--r--include/linux/sunrpc/svcauth_gss.h1
-rw-r--r--include/linux/sunrpc/xdr.h15
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/sunrpc/xprtrdma.h4
-rw-r--r--include/linux/swab.h10
-rw-r--r--include/linux/swap.h69
-rw-r--r--include/linux/swiotlb.h105
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/sysfs.h41
-rw-r--r--include/linux/task_io_accounting.h2
-rw-r--r--include/linux/telephony.h6
-rw-r--r--include/linux/thread_info.h8
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/time.h11
-rw-r--r--include/linux/timer.h5
-rw-r--r--include/linux/timex.h82
-rw-r--r--include/linux/topology.h10
-rw-r--r--include/linux/tracepoint.h156
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/types.h20
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/usb.h9
-rw-r--r--include/linux/usb/Kbuild3
-rw-r--r--include/linux/usb/cdc.h9
-rw-r--r--include/linux/usb/ch9.h8
-rw-r--r--include/linux/usb/composite.h11
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/tmc.h43
-rw-r--r--include/linux/usb/vstusb.h71
-rw-r--r--include/linux/usb/wusb-wa.h271
-rw-r--r--include/linux/usb/wusb.h376
-rw-r--r--include/linux/user_namespace.h13
-rw-r--r--include/linux/uwb.h765
-rw-r--r--include/linux/uwb/debug-cmd.h57
-rw-r--r--include/linux/uwb/debug.h82
-rw-r--r--include/linux/uwb/spec.h727
-rw-r--r--include/linux/uwb/umc.h194
-rw-r--r--include/linux/uwb/whci.h117
-rw-r--r--include/linux/videodev2.h21
-rw-r--r--include/linux/virtio_balloon.h3
-rw-r--r--include/linux/virtio_console.h11
-rw-r--r--include/linux/virtio_net.h9
-rw-r--r--include/linux/virtio_pci.h8
-rw-r--r--include/linux/virtio_ring.h13
-rw-r--r--include/linux/vmalloc.h17
-rw-r--r--include/linux/vmstat.h24
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/linux/wlp.h735
-rw-r--r--include/linux/workqueue.h26
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/linux/xfrm.h14
313 files changed, 12661 insertions, 2434 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 282a504bd1d..95ac82340c3 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -107,6 +107,7 @@ header-y += keyctl.h
header-y += limits.h
header-y += magic.h
header-y += major.h
+header-y += map_to_7segment.h
header-y += matroxfb.h
header-y += meye.h
header-y += minix_fs.h
@@ -182,6 +183,7 @@ unifdef-y += auto_fs.h
unifdef-y += auxvec.h
unifdef-y += binfmts.h
unifdef-y += blktrace_api.h
+unifdef-y += byteorder.h
unifdef-y += capability.h
unifdef-y += capi.h
unifdef-y += cciss_ioctl.h
@@ -311,6 +313,7 @@ unifdef-y += ptrace.h
unifdef-y += qnx4_fs.h
unifdef-y += quota.h
unifdef-y += random.h
+unifdef-y += irqnr.h
unifdef-y += reboot.h
unifdef-y += reiserfs_fs.h
unifdef-y += reiserfs_xattr.h
@@ -339,6 +342,7 @@ unifdef-y += soundcard.h
unifdef-y += stat.h
unifdef-y += stddef.h
unifdef-y += string.h
+unifdef-y += swab.h
unifdef-y += synclink.h
unifdef-y += sysctl.h
unifdef-y += tcp.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 702f79dad16..fba8051fb29 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -94,18 +94,10 @@ int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
/* the following four functions are architecture-dependent */
-#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT
-#define NR_NODE_MEMBLKS MAX_NUMNODES
-#define acpi_numa_slit_init(slit) do {} while (0)
-#define acpi_numa_processor_affinity_init(pa) do {} while (0)
-#define acpi_numa_memory_affinity_init(ma) do {} while (0)
-#define acpi_numa_arch_fixup() do {} while (0)
-#else
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
-#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
@@ -171,8 +163,6 @@ struct acpi_pci_driver {
int acpi_pci_register_driver(struct acpi_pci_driver *driver);
void acpi_pci_unregister_driver(struct acpi_pci_driver *driver);
-#ifdef CONFIG_ACPI_EC
-
extern int ec_read(u8 addr, u8 *val);
extern int ec_write(u8 addr, u8 val);
extern int ec_transaction(u8 command,
@@ -180,8 +170,6 @@ extern int ec_transaction(u8 command,
u8 *rdata, unsigned rdata_len,
int force_poll);
-#endif /*CONFIG_ACPI_EC*/
-
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
typedef void (*wmi_notify_handler) (u32 value, void *context);
@@ -202,6 +190,50 @@ extern bool wmi_has_guid(const char *guid);
#endif /* CONFIG_ACPI_WMI */
+#define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001
+#define ACPI_VIDEO_DEVICE_POSTING 0x0002
+#define ACPI_VIDEO_ROM_AVAILABLE 0x0004
+#define ACPI_VIDEO_BACKLIGHT 0x0008
+#define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010
+#define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020
+#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040
+#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080
+#define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100
+#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200
+#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
+#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
+
+#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
+
+extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
+extern long acpi_is_video_device(struct acpi_device *device);
+extern int acpi_video_backlight_support(void);
+extern int acpi_video_display_switch_support(void);
+
+#else
+
+static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
+{
+ return 0;
+}
+
+static inline long acpi_is_video_device(struct acpi_device *device)
+{
+ return 0;
+}
+
+static inline int acpi_video_backlight_support(void)
+{
+ return 0;
+}
+
+static inline int acpi_video_display_switch_support(void)
+{
+ return 0;
+}
+
+#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
+
extern int acpi_blacklisted(void);
#ifdef CONFIG_DMI
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
diff --git a/include/linux/aer.h b/include/linux/aer.h
index f2518141de8..f7df1eefc10 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -10,7 +10,6 @@
#if defined(CONFIG_PCIEAER)
/* pci-e port driver needs this function to enable aer */
extern int pci_enable_pcie_error_reporting(struct pci_dev *dev);
-extern int pci_find_aer_capability(struct pci_dev *dev);
extern int pci_disable_pcie_error_reporting(struct pci_dev *dev);
extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
#else
@@ -18,10 +17,6 @@ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
return -EINVAL;
}
-static inline int pci_find_aer_capability(struct pci_dev *dev)
-{
- return 0;
-}
static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
return -EINVAL;
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 09b276c3522..b16a957030f 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -5,6 +5,7 @@
#include <linux/workqueue.h>
#include <linux/aio_abi.h>
#include <linux/uio.h>
+#include <linux/rcupdate.h>
#include <asm/atomic.h>
@@ -183,7 +184,7 @@ struct kioctx {
/* This needs improving */
unsigned long user_id;
- struct kioctx *next;
+ struct hlist_node list;
wait_queue_head_t wait;
@@ -199,17 +200,28 @@ struct kioctx {
struct aio_ring_info ring_info;
struct delayed_work wq;
+
+ struct rcu_head rcu_head;
};
/* prototypes */
extern unsigned aio_max_size;
+#ifdef CONFIG_AIO
extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
extern int aio_put_req(struct kiocb *iocb);
extern void kick_iocb(struct kiocb *iocb);
extern int aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
+#else
+static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
+static inline int aio_put_req(struct kiocb *iocb) { return 0; }
+static inline void kick_iocb(struct kiocb *iocb) { }
+static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
+struct mm_struct;
+static inline void exit_aio(struct mm_struct *mm) { }
+#endif /* CONFIG_AIO */
#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
diff --git a/include/linux/atm.h b/include/linux/atm.h
index c791ddd9693..d3b292174ae 100644
--- a/include/linux/atm.h
+++ b/include/linux/atm.h
@@ -231,10 +231,21 @@ static __inline__ int atmpvc_addr_in_use(struct sockaddr_atmpvc addr)
*/
struct atmif_sioc {
- int number;
- int length;
- void __user *arg;
+ int number;
+ int length;
+ void __user *arg;
};
+#ifdef __KERNEL__
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+struct compat_atmif_sioc {
+ int number;
+ int length;
+ compat_uptr_t arg;
+};
+#endif
+#endif
+
typedef unsigned short atm_backend_t;
#endif
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index a3d07c29d16..086e5c362d3 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -100,6 +100,10 @@ struct atm_dev_stats {
/* use backend to make new if */
#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf)
/* add party to p2mp call */
+#ifdef CONFIG_COMPAT
+/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */
+#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf)
+#endif
#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int)
/* drop party from p2mp call */
@@ -224,6 +228,13 @@ struct atm_cirange {
extern struct proc_dir_entry *atm_proc_root;
#endif
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+struct compat_atm_iobuf {
+ int length;
+ compat_uptr_t buffer;
+};
+#endif
struct k_atm_aal_stats {
#define __HANDLE_ITEM(i) atomic_t i
@@ -379,6 +390,10 @@ struct atmdev_ops { /* only send is required */
int (*open)(struct atm_vcc *vcc);
void (*close)(struct atm_vcc *vcc);
int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg);
+#ifdef CONFIG_COMPAT
+ int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
+ void __user *arg);
+#endif
int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
void __user *optval,int optlen);
int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 6272a395d43..26c4f6f65a4 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -99,6 +99,8 @@
#define AUDIT_OBJ_PID 1318 /* ptrace target */
#define AUDIT_TTY 1319 /* Input on an administrative TTY */
#define AUDIT_EOE 1320 /* End of multi-record event */
+#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */
+#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -391,6 +393,7 @@ extern int audit_classify_arch(int arch);
#ifdef CONFIG_AUDITSYSCALL
/* These are defined in auditsc.c */
/* Public API */
+extern void audit_finish_fork(struct task_struct *child);
extern int audit_alloc(struct task_struct *task);
extern void audit_free(struct task_struct *task);
extern void audit_syscall_entry(int arch,
@@ -434,7 +437,7 @@ static inline void audit_ptrace(struct task_struct *t)
/* Private API (for audit.c only) */
extern unsigned int audit_serial(void);
-extern void auditsc_get_stamp(struct audit_context *ctx,
+extern int auditsc_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial);
extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid);
#define audit_get_loginuid(t) ((t)->loginuid)
@@ -452,6 +455,10 @@ extern int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_pr
extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout);
extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification);
extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
+extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
+ const struct cred *new,
+ const struct cred *old);
+extern int __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old);
static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -501,9 +508,28 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
return __audit_mq_getsetattr(mqdes, mqstat);
return 0;
}
+
+static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm,
+ const struct cred *new,
+ const struct cred *old)
+{
+ if (unlikely(!audit_dummy_context()))
+ return __audit_log_bprm_fcaps(bprm, new, old);
+ return 0;
+}
+
+static inline int audit_log_capset(pid_t pid, const struct cred *new,
+ const struct cred *old)
+{
+ if (unlikely(!audit_dummy_context()))
+ return __audit_log_capset(pid, new, old);
+ return 0;
+}
+
extern int audit_n_rules;
extern int audit_signals;
#else
+#define audit_finish_fork(t)
#define audit_alloc(t) ({ 0; })
#define audit_free(t) do { ; } while (0)
#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
@@ -516,7 +542,7 @@ extern int audit_signals;
#define audit_inode(n,d) do { ; } while (0)
#define audit_inode_child(d,i,p) do { ; } while (0)
#define audit_core_dumps(i) do { ; } while (0)
-#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
+#define auditsc_get_stamp(c,t,s) (0)
#define audit_get_loginuid(t) (-1)
#define audit_get_sessionid(t) (-1)
#define audit_log_task_context(b) do { ; } while (0)
@@ -532,6 +558,8 @@ extern int audit_signals;
#define audit_mq_timedreceive(d,l,p,t) ({ 0; })
#define audit_mq_notify(d,n) ({ 0; })
#define audit_mq_getsetattr(d,s) ({ 0; })
+#define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; })
+#define audit_log_capset(pid, ncr, ocr) ({ 0; })
#define audit_ptrace(t) ((void)0)
#define audit_n_rules 0
#define audit_signals 0
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
new file mode 100644
index 00000000000..f4d05ccd731
--- /dev/null
+++ b/include/linux/auto_dev-ioctl.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2008 Red Hat, Inc. All rights reserved.
+ * Copyright 2008 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ */
+
+#ifndef _LINUX_AUTO_DEV_IOCTL_H
+#define _LINUX_AUTO_DEV_IOCTL_H
+
+#include <linux/types.h>
+
+#define AUTOFS_DEVICE_NAME "autofs"
+
+#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
+#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
+
+#define AUTOFS_DEVID_LEN 16
+
+#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
+
+/*
+ * An ioctl interface for autofs mount point control.
+ */
+
+/*
+ * All the ioctls use this structure.
+ * When sending a path size must account for the total length
+ * of the chunk of memory otherwise is is the size of the
+ * structure.
+ */
+
+struct autofs_dev_ioctl {
+ __u32 ver_major;
+ __u32 ver_minor;
+ __u32 size; /* total size of data passed in
+ * including this struct */
+ __s32 ioctlfd; /* automount command fd */
+
+ __u32 arg1; /* Command parameters */
+ __u32 arg2;
+
+ char path[0];
+};
+
+static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
+{
+ in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+ in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+ in->size = sizeof(struct autofs_dev_ioctl);
+ in->ioctlfd = -1;
+ in->arg1 = 0;
+ in->arg2 = 0;
+ return;
+}
+
+/*
+ * If you change this make sure you make the corresponding change
+ * to autofs-dev-ioctl.c:lookup_ioctl()
+ */
+enum {
+ /* Get various version info */
+ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD,
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
+
+ /* Open mount ioctl fd */
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
+
+ /* Close mount ioctl fd */
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
+
+ /* Mount/expire status returns */
+ AUTOFS_DEV_IOCTL_READY_CMD,
+ AUTOFS_DEV_IOCTL_FAIL_CMD,
+
+ /* Activate/deactivate autofs mount */
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD,
+
+ /* Expiry timeout */
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
+
+ /* Get mount last requesting uid and gid */
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD,
+
+ /* Check for eligible expire candidates */
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD,
+
+ /* Request busy status */
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
+
+ /* Check if path is a mountpoint */
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
+};
+
+#define AUTOFS_IOCTL 0x93
+
+#define AUTOFS_DEV_IOCTL_VERSION \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_OPENMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_READY \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_FAIL \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_SETPIPEFD \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CATATONIC \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_TIMEOUT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_REQUESTER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_EXPIRE \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
+
+#endif /* _LINUX_AUTO_DEV_IOCTL_H */
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
index b785c6f8644..2253716d4b9 100644
--- a/include/linux/auto_fs4.h
+++ b/include/linux/auto_fs4.h
@@ -23,12 +23,17 @@
#define AUTOFS_MIN_PROTO_VERSION 3
#define AUTOFS_MAX_PROTO_VERSION 5
-#define AUTOFS_PROTO_SUBVERSION 0
+#define AUTOFS_PROTO_SUBVERSION 1
/* Mask for expire behaviour */
#define AUTOFS_EXP_IMMEDIATE 1
#define AUTOFS_EXP_LEAVES 2
+#define AUTOFS_TYPE_ANY 0x0000
+#define AUTOFS_TYPE_INDIRECT 0x0001
+#define AUTOFS_TYPE_DIRECT 0x0002
+#define AUTOFS_TYPE_OFFSET 0x0004
+
/* Daemon notification packet types */
enum autofs_notify {
NFY_NONE,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0a24d5550eb..bee52abb8a4 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -175,6 +175,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
* BDI_CAP_READ_MAP: Can be mapped for reading
* BDI_CAP_WRITE_MAP: Can be mapped for writing
* BDI_CAP_EXEC_MAP: Can be mapped for execution
+ *
+ * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
*/
#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
#define BDI_CAP_NO_WRITEBACK 0x00000002
@@ -184,6 +186,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#define BDI_CAP_WRITE_MAP 0x00000020
#define BDI_CAP_EXEC_MAP 0x00000040
#define BDI_CAP_NO_ACCT_WB 0x00000080
+#define BDI_CAP_SWAP_BACKED 0x00000100
#define BDI_CAP_VMFLAGS \
(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
@@ -248,6 +251,11 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
BDI_CAP_NO_WRITEBACK));
}
+static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
+{
+ return bdi->capabilities & BDI_CAP_SWAP_BACKED;
+}
+
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -258,4 +266,9 @@ static inline bool mapping_cap_account_dirty(struct address_space *mapping)
return bdi_cap_account_dirty(mapping->backing_dev_info);
}
+static inline bool mapping_cap_swap_backed(struct address_space *mapping)
+{
+ return bdi_cap_swap_backed(mapping->backing_dev_info);
+}
+
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index 7ac518e3c15..22ea563ba3e 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -1,12 +1,3 @@
-/* Permission is hereby granted to copy, modify and redistribute this code
- * in terms of the GNU Library General Public License, Version 2 or later,
- * at your option.
- */
-
-/* macros to translate to/from binary and binary-coded decimal (frequently
- * found in RTC chips).
- */
-
#ifndef _BCD_H
#define _BCD_H
@@ -15,11 +6,4 @@
unsigned bcd2bin(unsigned char val) __attribute_const__;
unsigned char bin2bcd(unsigned val) __attribute_const__;
-#define BCD2BIN(val) bcd2bin(val)
-#define BIN2BCD(val) bin2bcd(val)
-
-/* backwards compat */
-#define BCD_TO_BIN(val) ((val)=BCD2BIN(val))
-#define BIN_TO_BCD(val) ((val)=BIN2BCD(val))
-
#endif /* _BCD_H */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 826f6235080..6cbfbe29718 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -35,12 +35,20 @@ struct linux_binprm{
struct mm_struct *mm;
unsigned long p; /* current top of mem */
unsigned int sh_bang:1,
- misc_bang:1;
+ misc_bang:1,
+ cred_prepared:1,/* true if creds already prepared (multiple
+ * preps happen for interpreters) */
+ cap_effective:1;/* true if has elevated effective capabilities,
+ * false if not; except for init which inherits
+ * its parent's caps anyway */
+#ifdef __alpha__
+ unsigned int taso:1;
+#endif
+ unsigned int recursion_depth;
struct file * file;
- int e_uid, e_gid;
- kernel_cap_t cap_post_exec_permitted;
- bool cap_effective;
- void *security;
+ struct cred *cred; /* new credentials */
+ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
+ unsigned int per_clear; /* bits to clear in current->personality */
int argc, envc;
char * filename; /* Name of binary as seen by procps */
char * interp; /* Name of the binary really executed. Most
@@ -58,6 +66,7 @@ struct linux_binprm{
#define BINPRM_FLAGS_EXECFD_BIT 1
#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+#define BINPRM_MAX_RECURSION 4
/*
* This structure defines the functions that are used to load the binary formats that
@@ -96,7 +105,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
int executable_stack);
extern int bprm_mm_init(struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
-extern void compute_creds(struct linux_binprm *binprm);
+extern void install_exec_creds(struct linux_binprm *bprm);
extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
extern int set_binfmt(struct linux_binfmt *new);
extern void free_bprm(struct linux_binprm *);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ff5b4cf9e2d..18462c5b8ff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -79,14 +79,22 @@ struct bio {
unsigned int bi_size; /* residual I/O count */
+ /*
+ * To keep track of the max segment size, we account for the
+ * sizes of the first and last mergeable segments in this bio.
+ */
+ unsigned int bi_seg_front_size;
+ unsigned int bi_seg_back_size;
+
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
unsigned int bi_comp_cpu; /* completion CPU */
+ atomic_t bi_cnt; /* pin count */
+
struct bio_vec *bi_io_vec; /* the actual vec list */
bio_end_io_t *bi_end_io;
- atomic_t bi_cnt; /* pin count */
void *bi_private;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -94,6 +102,13 @@ struct bio {
#endif
bio_destructor_t *bi_destructor; /* destructor */
+
+ /*
+ * We can inline a number of vecs at the end of the bio, to avoid
+ * double allocations for a small number of bio_vecs. This member
+ * MUST obviously be kept at the very end of the bio.
+ */
+ struct bio_vec bi_inline_vecs[0];
};
/*
@@ -110,6 +125,7 @@ struct bio {
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
+#define BIO_QUIET 11 /* Make BIO Quiet */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
@@ -129,25 +145,30 @@ struct bio {
* bit 2 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this oen is issued.
- * bit 3 -- fail fast, don't want low level driver retries
- * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
+ * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
* Note that this does NOT indicate that the IO itself is sync, just
* that the block layer will not postpone issue of this IO by plugging.
- * bit 5 -- metadata request
+ * bit 4 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
- * bit 6 -- discard sectors
+ * bit 5 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
+ * bit 6 -- fail fast device errors
+ * bit 7 -- fail fast transport errors
+ * bit 8 -- fail fast driver errors
+ * Don't want driver retries for any fast fail whatever the reason.
*/
#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
#define BIO_RW_BARRIER 2
-#define BIO_RW_FAILFAST 3
-#define BIO_RW_SYNC 4
-#define BIO_RW_META 5
-#define BIO_RW_DISCARD 6
+#define BIO_RW_SYNC 3
+#define BIO_RW_META 4
+#define BIO_RW_DISCARD 5
+#define BIO_RW_FAILFAST_DEV 6
+#define BIO_RW_FAILFAST_TRANSPORT 7
+#define BIO_RW_FAILFAST_DRIVER 8
/*
* upper 16 bits of bi_rw define the io priority of this bio
@@ -174,7 +195,10 @@ struct bio {
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
-#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
+#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
+#define bio_failfast_transport(bio) \
+ ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
+#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
@@ -196,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
+static inline int bio_has_allocated_vec(struct bio *bio)
+{
+ return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
+}
+
/*
* will die
*/
@@ -221,12 +250,16 @@ static inline void *bio_data(struct bio *bio)
#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
+/* Default implementation of BIOVEC_PHYS_MERGEABLE */
+#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+
/*
* allow arch override, for eg virtualized architectures (put in asm/io.h)
*/
#ifndef BIOVEC_PHYS_MERGEABLE
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+ __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
#endif
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
@@ -313,7 +346,7 @@ struct bio_pair {
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
extern void bio_pair_release(struct bio_pair *dbio);
-extern struct bio_set *bioset_create(int, int);
+extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern struct bio *bio_alloc(gfp_t, int);
@@ -358,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
extern int bio_uncopy_user(struct bio *);
void zero_fill_bio(struct bio *bio);
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
+extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx);
/*
@@ -376,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
*/
#define BIO_POOL_SIZE 2
#define BIOVEC_NR_POOLS 6
+#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
struct bio_set {
+ struct kmem_cache *bio_slab;
+ unsigned int front_pad;
+
mempool_t *bio_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
mempool_t *bio_integrity_pool;
#endif
- mempool_t *bvec_pools[BIOVEC_NR_POOLS];
+ mempool_t *bvec_pool;
};
struct biovec_slab {
@@ -392,6 +430,7 @@ struct biovec_slab {
};
extern struct bio_set *fs_bio_set;
+extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
/*
* a small number of entries is fine, not going to be performance critical.
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 89781fd4885..a08c33a26ca 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -110,7 +110,6 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
-extern int bitmap_scnprintf_len(unsigned int nr_bits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
@@ -130,6 +129,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
+extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a92d9e4ea96..7035cec583b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
struct request_queue;
struct elevator_queue;
-typedef struct elevator_queue elevator_t;
struct request_pm_state;
struct blk_trace;
struct request;
@@ -87,7 +86,9 @@ enum {
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
- __REQ_FAILFAST, /* no low level driver retries */
+ __REQ_FAILFAST_DEV, /* no driver retries of device errors */
+ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
+ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
@@ -111,8 +112,10 @@ enum rq_flag_bits {
};
#define REQ_RW (1 << __REQ_RW)
+#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_DISCARD (1 << __REQ_DISCARD)
-#define REQ_FAILFAST (1 << __REQ_FAILFAST)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
@@ -309,7 +312,7 @@ struct request_queue
*/
struct list_head queue_head;
struct request *last_merge;
- elevator_t *elevator;
+ struct elevator_queue *elevator;
/*
* the queue request freelist, one for reads and one for writes
@@ -445,6 +448,7 @@ struct request_queue
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
static inline int queue_is_locked(struct request_queue *q)
{
@@ -518,22 +522,32 @@ enum {
* TAG_FLUSH : ordering by tag w/ pre and post flushes
* TAG_FUA : ordering by tag w/ pre flush and FUA write
*/
- QUEUE_ORDERED_NONE = 0x00,
- QUEUE_ORDERED_DRAIN = 0x01,
- QUEUE_ORDERED_TAG = 0x02,
-
- QUEUE_ORDERED_PREFLUSH = 0x10,
- QUEUE_ORDERED_POSTFLUSH = 0x20,
- QUEUE_ORDERED_FUA = 0x40,
-
- QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
- QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
- QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
- QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
- QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
+ QUEUE_ORDERED_BY_DRAIN = 0x01,
+ QUEUE_ORDERED_BY_TAG = 0x02,
+ QUEUE_ORDERED_DO_PREFLUSH = 0x10,
+ QUEUE_ORDERED_DO_BAR = 0x20,
+ QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
+ QUEUE_ORDERED_DO_FUA = 0x80,
+
+ QUEUE_ORDERED_NONE = 0x00,
+
+ QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
+ QUEUE_ORDERED_DO_BAR,
+ QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_POSTFLUSH,
+ QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_FUA,
+
+ QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
+ QUEUE_ORDERED_DO_BAR,
+ QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_POSTFLUSH,
+ QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
+ QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_FUA,
/*
* Ordered operation sequence
@@ -560,7 +574,12 @@ enum {
#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
-#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
+#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
+#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
+#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
+#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
+ blk_failfast_transport(rq) || \
+ blk_failfast_driver(rq))
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
@@ -576,7 +595,6 @@ enum {
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
/* rq->queuelist of dequeued request must be list_empty() */
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
@@ -653,6 +671,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
* default timeout for SG_IO if none specified
*/
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+#define BLK_MIN_SG_TIMEOUT (7 * HZ)
#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
@@ -708,10 +727,10 @@ extern void blk_plug_device(struct request_queue *);
extern void blk_plug_device_unlocked(struct request_queue *);
extern int blk_remove_plug(struct request_queue *);
extern void blk_recount_segments(struct request_queue *, struct bio *);
-extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
- struct gendisk *, unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct file *, struct request_queue *,
- struct gendisk *, struct scsi_ioctl_command __user *);
+extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+ unsigned int, void __user *);
+extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+ struct scsi_ioctl_command __user *);
/*
* Temporary export, until SCSI gets fixed up.
@@ -777,6 +796,8 @@ static inline void blk_run_address_space(struct address_space *mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
+extern void blkdev_dequeue_request(struct request *req);
+
/*
* blk_end_request() and friends.
* __blk_end_request() and end_request() must be called with
@@ -811,11 +832,6 @@ extern void blk_update_request(struct request *rq, int error,
extern unsigned int blk_rq_bytes(struct request *rq);
extern unsigned int blk_rq_cur_bytes(struct request *rq);
-static inline void blkdev_dequeue_request(struct request *req)
-{
- elv_dequeue_request(req->q, req);
-}
-
/*
* Access functions for manipulating queue properties
*/
@@ -848,15 +864,14 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern int blk_do_ordered(struct request_queue *, struct request **);
+extern bool blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
+extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(struct request_queue *);
-extern void __generic_unplug_device(struct request_queue *);
extern long nr_blockdev_pages(void);
int blk_get_queue(struct request_queue *);
@@ -902,7 +917,8 @@ static inline int sb_issue_discard(struct super_block *sb,
* command filter functions
*/
extern int blk_verify_command(struct blk_cmd_filter *filter,
- unsigned char *cmd, int has_write_perm);
+ unsigned char *cmd, fmode_t has_write_perm);
+extern void blk_unregister_filter(struct gendisk *disk);
extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define MAX_PHYS_SEGMENTS 128
@@ -912,6 +928,8 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define MAX_SEGMENT_SIZE 65536
+#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL
+
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
static inline int queue_hardsect_size(struct request_queue *q)
@@ -968,7 +986,6 @@ static inline void put_dev_sector(Sector p)
struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
-void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
@@ -1048,6 +1065,22 @@ static inline int blk_integrity_rq(struct request *rq)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+struct block_device_operations {
+ int (*open) (struct block_device *, fmode_t);
+ int (*release) (struct gendisk *, fmode_t);
+ int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*direct_access) (struct block_device *, sector_t,
+ void **, unsigned long *);
+ int (*media_changed) (struct gendisk *);
+ int (*revalidate_disk) (struct gendisk *);
+ int (*getgeo)(struct block_device *, struct hd_geometry *);
+ struct module *owner;
+};
+
+extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
+ unsigned long);
#else /* CONFIG_BLOCK */
/*
* stubs for when the block layer is configured out
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3a31eb50616..1dba3493d52 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -24,6 +24,7 @@ enum blktrace_cat {
BLK_TC_AHEAD = 1 << 11, /* readahead */
BLK_TC_META = 1 << 12, /* metadata */
BLK_TC_DISCARD = 1 << 13, /* discard requests */
+ BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */
BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
};
@@ -51,6 +52,7 @@ enum blktrace_act {
__BLK_TA_BOUNCE, /* bio was bounced */
__BLK_TA_REMAP, /* bio was remapped */
__BLK_TA_ABORT, /* request aborted */
+ __BLK_TA_DRV_DATA, /* driver-specific binary data */
};
/*
@@ -82,6 +84,7 @@ enum blktrace_notify {
#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
+#define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@@ -157,7 +160,6 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
extern int do_blk_trace_setup(struct request_queue *q,
char *name, dev_t dev, struct blk_user_trace_setup *buts);
extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -183,140 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
} while (0)
#define BLK_TN_MAX_MSG 128
-/**
- * blk_add_trace_rq - Add a trace for a request oriented action
- * @q: queue the io is for
- * @rq: the source request
- * @what: the action
- *
- * Description:
- * Records an action against a request. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
- u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
- int rw = rq->cmd_flags & 0x03;
-
- if (likely(!bt))
- return;
-
- if (blk_discard_rq(rq))
- rw |= (1 << BIO_RW_DISCARD);
-
- if (blk_pc_request(rq)) {
- what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
- } else {
- what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
- }
-}
-
-/**
- * blk_add_trace_bio - Add a trace for a bio oriented action
- * @q: queue the io is for
- * @bio: the source bio
- * @what: the action
- *
- * Description:
- * Records an action against a bio. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
- u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (likely(!bt))
- return;
-
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
-}
-
-/**
- * blk_add_trace_generic - Add a trace for a generic action
- * @q: queue the io is for
- * @bio: the source bio
- * @rw: the data direction
- * @what: the action
- *
- * Description:
- * Records a simple trace
- *
- **/
-static inline void blk_add_trace_generic(struct request_queue *q,
- struct bio *bio, int rw, u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (likely(!bt))
- return;
-
- if (bio)
- blk_add_trace_bio(q, bio, what);
- else
- __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
-}
-
-/**
- * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
- * @q: queue the io is for
- * @what: the action
- * @bio: the source bio
- * @pdu: the integer payload
- *
- * Description:
- * Adds a trace with some integer payload. This might be an unplug
- * option given as the action, with the depth at unplug time given
- * as the payload
- *
- **/
-static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
- struct bio *bio, unsigned int pdu)
-{
- struct blk_trace *bt = q->blk_trace;
- __be64 rpdu = cpu_to_be64(pdu);
-
- if (likely(!bt))
- return;
-
- if (bio)
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
- else
- __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
-}
-
-/**
- * blk_add_trace_remap - Add a trace for a remap operation
- * @q: queue the io is for
- * @bio: the source bio
- * @dev: target device
- * @from: source sector
- * @to: target sector
- *
- * Description:
- * Device mapper or raid target sometimes need to split a bio because
- * it spans a stripe (or similar). Add a trace for that action.
- *
- **/
-static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
- dev_t dev, sector_t from, sector_t to)
-{
- struct blk_trace *bt = q->blk_trace;
- struct blk_io_trace_remap r;
-
- if (likely(!bt))
- return;
-
- r.device = cpu_to_be32(dev);
- r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
- r.sector = cpu_to_be64(to);
-
- __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
-}
-
+extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
+ void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
@@ -325,12 +195,8 @@ extern int blk_trace_remove(struct request_queue *q);
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)
-#define blk_add_trace_rq(q, rq, what) do { } while (0)
-#define blk_add_trace_bio(q, rq, what) do { } while (0)
-#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
-#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
-#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
+#define blk_add_driver_data(q, rq, data, len) do {} while (0)
#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
#define blk_trace_startstop(q, start) (-ENOTTY)
#define blk_trace_remove(q) (-ENOTTY)
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 777dbf695d4..27b1bcffe40 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,7 +2,6 @@
#define _LINUX_BH_H
extern void local_bh_disable(void);
-extern void __local_bh_enable(void);
extern void _local_bh_enable(void);
extern void local_bh_enable(void);
extern void local_bh_enable_ip(unsigned long ip);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index eadaab44015..8605f8a74df 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -35,6 +35,7 @@ enum bh_state_bits {
BH_Ordered, /* ordered write */
BH_Eopnotsupp, /* operation not supported (barrier) */
BH_Unwritten, /* Buffer is allocated on disk but not written */
+ BH_Quiet, /* Buffer Error Prinks to be quiet */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
@@ -322,7 +323,7 @@ static inline void wait_on_buffer(struct buffer_head *bh)
static inline int trylock_buffer(struct buffer_head *bh)
{
- return likely(!test_and_set_bit(BH_Lock, &bh->b_state));
+ return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
}
static inline void lock_buffer(struct buffer_head *bh)
diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild
index 1133d5f9d81..fbaa7f9cee3 100644
--- a/include/linux/byteorder/Kbuild
+++ b/include/linux/byteorder/Kbuild
@@ -1,3 +1,4 @@
unifdef-y += big_endian.h
unifdef-y += little_endian.h
unifdef-y += swab.h
+unifdef-y += swabb.h
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 44f95b92393..1cba3f3efe5 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/byteorder/swab.h>
+#include <linux/byteorder/swabb.h>
#define __constant_htonl(x) ((__force __be32)(__u32)(x))
#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 4cc170a3176..cedc1b5a289 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/byteorder/swab.h>
+#include <linux/byteorder/swabb.h>
#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
new file mode 100644
index 00000000000..7b5a2388ba6
--- /dev/null
+++ b/include/linux/c2port.h
@@ -0,0 +1,65 @@
+/*
+ * Silicon Labs C2 port Linux support
+ *
+ * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/device.h>
+
+#define C2PORT_NAME_LEN 32
+
+/*
+ * C2 port basic structs
+ */
+
+/* Main struct */
+struct c2port_ops;
+struct c2port_device {
+ unsigned int access:1;
+ unsigned int flash_access:1;
+
+ int id;
+ char name[C2PORT_NAME_LEN];
+ struct c2port_ops *ops;
+ struct mutex mutex; /* prevent races during read/write */
+
+ struct device *dev;
+
+ void *private_data;
+};
+
+/* Basic operations */
+struct c2port_ops {
+ /* Flash layout */
+ unsigned short block_size; /* flash block size in bytes */
+ unsigned short blocks_num; /* flash blocks number */
+
+ /* Enable or disable the access to C2 port */
+ void (*access)(struct c2port_device *dev, int status);
+
+ /* Set C2D data line as input/output */
+ void (*c2d_dir)(struct c2port_device *dev, int dir);
+
+ /* Read/write C2D data line */
+ int (*c2d_get)(struct c2port_device *dev);
+ void (*c2d_set)(struct c2port_device *dev, int status);
+
+ /* Write C2CK clock line */
+ void (*c2ck_set)(struct c2port_device *dev, int status);
+};
+
+/*
+ * Exported functions
+ */
+
+#define to_class_dev(obj) container_of((obj), struct class_device, kobj)
+#define to_c2port_device(obj) container_of((obj), struct c2port_device, class)
+
+extern struct c2port_device *c2port_device_register(char *name,
+ struct c2port_ops *ops, void *devdata);
+extern void c2port_device_unregister(struct c2port_device *dev);
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index e9ca210ffa5..f50785ad478 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -19,7 +19,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20071116"
+#define CAN_VERSION "20081130"
/* increment this number each time you change some user-space interface */
#define CAN_ABI_VERSION "8"
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 9d1fe30b6f6..e22f48c2a46 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -53,6 +53,7 @@ typedef struct __user_cap_data_struct {
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
#define VFS_CAP_REVISION_MASK 0xFF000000
+#define VFS_CAP_REVISION_SHIFT 24
#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK
#define VFS_CAP_FLAGS_EFFECTIVE 0x000001
@@ -68,6 +69,9 @@ typedef struct __user_cap_data_struct {
#define VFS_CAP_U32 VFS_CAP_U32_2
#define VFS_CAP_REVISION VFS_CAP_REVISION_2
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+extern int file_caps_enabled;
+#endif
struct vfs_cap_data {
__le32 magic_etc; /* Little endian */
@@ -96,6 +100,13 @@ typedef struct kernel_cap_struct {
__u32 cap[_KERNEL_CAPABILITY_U32S];
} kernel_cap_t;
+/* exact same as vfs_cap_data but in cpu endian and always filled completely */
+struct cpu_vfs_cap_data {
+ __u32 magic_etc;
+ kernel_cap_t permitted;
+ kernel_cap_t inheritable;
+};
+
#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
@@ -454,6 +465,13 @@ static inline int cap_isclear(const kernel_cap_t a)
return 1;
}
+/*
+ * Check if "a" is a subset of "set".
+ * return 1 if ALL of the capabilities in "a" are also in "set"
+ * cap_issubset(0101, 1111) will return 1
+ * return 0 if ANY of the capabilities in "a" are not in "set"
+ * cap_issubset(1111, 0101) will return 0
+ */
static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
kernel_cap_t dest;
@@ -501,8 +519,6 @@ extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_full_set;
extern const kernel_cap_t __cap_init_eff_set;
-kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
-
/**
* has_capability - Determine if a task has a superior capability available
* @t: The task in question
@@ -514,9 +530,14 @@ kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
* Note that this does not set PF_SUPERPRIV on the task.
*/
#define has_capability(t, cap) (security_capable((t), (cap)) == 0)
+#define has_capability_noaudit(t, cap) (security_capable_noaudit((t), (cap)) == 0)
extern int capable(int cap);
+/* audit system wants to get cap info from files as well */
+struct dentry;
+extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+
#endif /* __KERNEL__ */
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 5db265ea60f..0b49e08d3cb 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -987,11 +987,11 @@ struct cdrom_device_ops {
};
/* the general block_device operations structure: */
-extern int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip,
- struct file *fp);
-extern int cdrom_release(struct cdrom_device_info *cdi, struct file *fp);
-extern int cdrom_ioctl(struct file *file, struct cdrom_device_info *cdi,
- struct inode *ip, unsigned int cmd, unsigned long arg);
+extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode);
+extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
+extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg);
extern int cdrom_media_changed(struct cdrom_device_info *);
extern int register_cdrom(struct cdrom_device_info *cdi);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c98dd7cb707..1164963c3a8 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -9,12 +9,12 @@
*/
#include <linux/sched.h>
-#include <linux/kref.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
#include <linux/cgroupstats.h>
#include <linux/prio_heap.h>
+#include <linux/rwsem.h>
#ifdef CONFIG_CGROUPS
@@ -25,7 +25,6 @@ struct cgroup;
extern int cgroup_init_early(void);
extern int cgroup_init(void);
-extern void cgroup_init_smp(void);
extern void cgroup_lock(void);
extern bool cgroup_lock_live_group(struct cgroup *cgrp);
extern void cgroup_unlock(void);
@@ -137,6 +136,15 @@ struct cgroup {
* release_list_lock
*/
struct list_head release_list;
+
+ /* pids_mutex protects the fields below */
+ struct rw_semaphore pids_mutex;
+ /* Array of process ids in the cgroup */
+ pid_t *tasks_pids;
+ /* How many files are using the current tasks_pids array */
+ int pids_use_count;
+ /* Length of the current tasks_pids array */
+ int pids_length;
};
/* A css_set is a structure holding pointers to a set of
@@ -149,7 +157,7 @@ struct cgroup {
struct css_set {
/* Reference count */
- struct kref ref;
+ atomic_t refcount;
/*
* List running through all cgroup groups in the same hash
@@ -326,7 +334,8 @@ struct cgroup_subsys {
*/
void (*mm_owner_changed)(struct cgroup_subsys *ss,
struct cgroup *old,
- struct cgroup *new);
+ struct cgroup *new,
+ struct task_struct *p);
int subsys_id;
int active;
int disabled;
@@ -338,8 +347,6 @@ struct cgroup_subsys {
struct cgroupfs_root *root;
struct list_head sibling;
-
- void *private;
};
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -393,11 +400,13 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
+void cgroup_mm_owner_callbacks(struct task_struct *old,
+ struct task_struct *new);
+
#else /* !CONFIG_CGROUPS */
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
-static inline void cgroup_init_smp(void) {}
static inline void cgroup_fork(struct task_struct *p) {}
static inline void cgroup_fork_callbacks(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {}
@@ -411,15 +420,9 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
return -EINVAL;
}
+static inline void cgroup_mm_owner_callbacks(struct task_struct *old,
+ struct task_struct *new) {}
+
#endif /* !CONFIG_CGROUPS */
-#ifdef CONFIG_MM_OWNER
-extern void
-cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new);
-#else /* !CONFIG_MM_OWNER */
-static inline void
-cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
-{
-}
-#endif /* CONFIG_MM_OWNER */
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e2877454ec8..9c8d31bacf4 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -48,3 +48,15 @@ SUBSYS(devices)
#endif
/* */
+
+#ifdef CONFIG_CGROUP_FREEZER
+SUBSYS(freezer)
+#endif
+
+/* */
+
+#ifdef CONFIG_NET_CLS_CGROUP
+SUBSYS(net_cls)
+#endif
+
+/* */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 5ca8c6fddb5..778777316ea 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -35,6 +35,8 @@ struct clk;
* clk_get may return different clock producers depending on @dev.)
*
* Drivers must assume that the clock source is not enabled.
+ *
+ * clk_get should not be called from within interrupt context.
*/
struct clk *clk_get(struct device *dev, const char *id);
@@ -76,6 +78,8 @@ unsigned long clk_get_rate(struct clk *clk);
* Note: drivers must ensure that all clk_enable calls made on this
* clock source are balanced by clk_disable calls prior to calling
* this function.
+ *
+ * clk_put should not be called from within interrupt context.
*/
void clk_put(struct clk *clk);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 55e434feec9..f88d32f8ff7 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -45,7 +45,8 @@ struct clocksource;
* @read: returns a cycle value
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters
- * @mult: cycle to nanosecond multiplier
+ * @mult: cycle to nanosecond multiplier (adjusted by NTP)
+ * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
* @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties
* @vread: vsyscall based read
@@ -63,6 +64,7 @@ struct clocksource {
cycle_t (*read)(void);
cycle_t mask;
u32 mult;
+ u32 mult_orig;
u32 shift;
unsigned long flags;
cycle_t (*vread)(void);
@@ -77,6 +79,7 @@ struct clocksource {
/* timekeeping specific data, ignore */
cycle_t cycle_interval;
u64 xtime_interval;
+ u32 raw_interval;
/*
* Second part is written at each timer interrupt
* Keep it in a different cache line to dirty no
@@ -85,6 +88,7 @@ struct clocksource {
cycle_t cycle_last ____cacheline_aligned_in_smp;
u64 xtime_nsec;
s64 error;
+ struct timespec raw_time;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
@@ -201,17 +205,19 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
{
u64 tmp;
- /* XXX - All of this could use a whole lot of optimization */
+ /* Do the ns -> cycle conversion first, using original mult */
tmp = length_nsec;
tmp <<= c->shift;
- tmp += c->mult/2;
- do_div(tmp, c->mult);
+ tmp += c->mult_orig/2;
+ do_div(tmp, c->mult_orig);
c->cycle_interval = (cycle_t)tmp;
if (c->cycle_interval == 0)
c->cycle_interval = 1;
+ /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
c->xtime_interval = (u64)c->cycle_interval * c->mult;
+ c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
}
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index 8c0f9505b48..7605fdd1eb6 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -16,6 +16,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/byteorder.h>
+#include <asm/system.h>
/* this is used only to give gcc a clue about good code generation */
union cnt32_to_63 {
@@ -53,11 +54,19 @@ union cnt32_to_63 {
* needed increment. And any race in updating the value in memory is harmless
* as the same value would simply be stored more than once.
*
- * The only restriction for the algorithm to work properly is that this
- * code must be executed at least once per each half period of the 32-bit
- * counter to properly update the state bit in memory. This is usually not a
- * problem in practice, but if it is then a kernel timer could be scheduled
- * to manage for this code to be executed often enough.
+ * The restrictions for the algorithm to work properly are:
+ *
+ * 1) this code must be called at least once per each half period of the
+ * 32-bit counter;
+ *
+ * 2) this code must not be preempted for a duration longer than the
+ * 32-bit counter half period minus the longest period between two
+ * calls to this code.
+ *
+ * Those requirements ensure proper update to the state bit in memory.
+ * This is usually not a problem in practice, but if it is then a kernel
+ * timer should be scheduled to manage for this code to be executed often
+ * enough.
*
* Note that the top bit (bit 63) in the returned value should be considered
* as garbage. It is not cleared here because callers are likely to use a
@@ -68,9 +77,10 @@ union cnt32_to_63 {
*/
#define cnt32_to_63(cnt_lo) \
({ \
- static volatile u32 __m_cnt_hi; \
+ static u32 __m_cnt_hi; \
union cnt32_to_63 __x; \
__x.hi = __m_cnt_hi; \
+ smp_rmb(); \
__x.lo = (cnt_lo); \
if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
__m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
diff --git a/include/linux/compat.h b/include/linux/compat.h
index cf8d11cad5a..e88f3ecf38b 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -78,7 +78,6 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
-extern int cp_compat_stat(struct kstat *, struct compat_stat __user *);
extern int get_compat_timespec(struct timespec *, const struct compat_timespec __user *);
extern int put_compat_timespec(const struct timespec *, struct compat_timespec __user *);
@@ -235,6 +234,11 @@ extern int get_compat_itimerspec(struct itimerspec *dst,
extern int put_compat_itimerspec(struct compat_itimerspec __user *dst,
const struct itimerspec *src);
+asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz);
+
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
extern int compat_printk(const char *fmt, ...);
@@ -248,12 +252,10 @@ extern int compat_ptrace_request(struct task_struct *child,
compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
-#ifdef __ARCH_WANT_COMPAT_SYS_PTRACE
extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data);
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
compat_long_t addr, compat_long_t data);
-#endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */
/*
* epoll (fs/eventpoll.c) compat bits follow ...
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8322141ee48..ea7c6be354b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -44,6 +44,8 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# error Sorry, your compiler is too old/not recognized.
#endif
+#define notrace __attribute__((no_instrument_function))
+
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
*/
@@ -57,8 +59,88 @@ extern void __chk_io_ptr(const volatile void __iomem *);
* specific implementations come from the above header files
*/
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x) __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = likely_notrace(x); \
+ ftrace_likely_update(&______f, ______r, expect); \
+ ______r; \
+ })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same. This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = !!(cond); \
+ if (______r) \
+ ______f.hit++; \
+ else \
+ ______f.miss++; \
+ ______r; \
+ }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
/* Optimization barrier */
#ifndef barrier
diff --git a/include/linux/console.h b/include/linux/console.h
index 248e6e3b9b7..a67a90cf826 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty);
#define VESA_HSYNC_SUSPEND 2
#define VESA_POWERDOWN 3
+#ifdef CONFIG_VGA_CONSOLE
+extern bool vgacon_text_force(void);
+#endif
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d3219d73f8e..21e1dd43e52 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -5,6 +5,9 @@
* Cpumasks provide a bitmap suitable for representing the
* set of CPU's in a system, one bit position per CPU number.
*
+ * The new cpumask_ ops take a "struct cpumask *"; the old ones
+ * use cpumask_t.
+ *
* See detailed comments in the file linux/bitmap.h describing the
* data type on which these cpumasks are based.
*
@@ -31,7 +34,7 @@
* will span the entire range of NR_CPUS.
* . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
*
- * The available cpumask operations are:
+ * The obsolescent cpumask operations are:
*
* void cpu_set(cpu, mask) turn on bit 'cpu' in mask
* void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
@@ -138,7 +141,7 @@
#include <linux/threads.h>
#include <linux/bitmap.h>
-typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
extern cpumask_t _unused_cpumask_arg_;
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
@@ -527,4 +530,556 @@ extern cpumask_t cpu_active_map;
#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
+/* These are the new versions of the cpumask operators: passed by pointer.
+ * The older versions will be implemented in terms of these, then deleted. */
+#define cpumask_bits(maskp) ((maskp)->bits)
+
+#if NR_CPUS <= BITS_PER_LONG
+#define CPU_BITS_ALL \
+{ \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+
+/* This produces more efficient code. */
+#define nr_cpumask_bits NR_CPUS
+
+#else /* NR_CPUS > BITS_PER_LONG */
+
+#define CPU_BITS_ALL \
+{ \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
+}
+
+#define nr_cpumask_bits nr_cpu_ids
+#endif /* NR_CPUS > BITS_PER_LONG */
+
+/* verify cpu argument to cpumask_* operators */
+static inline unsigned int cpumask_check(unsigned int cpu)
+{
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+ WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+ return cpu;
+}
+
+#if NR_CPUS == 1
+/* Uniprocessor. Assume all masks are "1". */
+static inline unsigned int cpumask_first(const struct cpumask *srcp)
+{
+ return 0;
+}
+
+/* Valid inputs for n are -1 and 0. */
+static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline unsigned int cpumask_next_and(int n,
+ const struct cpumask *srcp,
+ const struct cpumask *andp)
+{
+ return n+1;
+}
+
+/* cpu must be a valid cpu, ie 0, so there's no other choice. */
+static inline unsigned int cpumask_any_but(const struct cpumask *mask,
+ unsigned int cpu)
+{
+ return 1;
+}
+
+#define for_each_cpu(cpu, mask) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_and(cpu, mask, and) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
+#else
+/**
+ * cpumask_first - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static inline unsigned int cpumask_first(const struct cpumask *srcp)
+{
+ return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_next - get the next cpu in a cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set.
+ */
+static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+}
+
+/**
+ * cpumask_next_zero - get the next unset cpu in a cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus unset.
+ */
+static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+}
+
+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
+int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+
+/**
+ * for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = cpumask_next((cpu), (mask)), \
+ (cpu) < nr_cpu_ids;)
+
+/**
+ * for_each_cpu_and - iterate over every cpu in both masks
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the first cpumask pointer
+ * @and: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_and(&tmp, &mask, &and);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_and(cpu, mask, and) \
+ for ((cpu) = -1; \
+ (cpu) = cpumask_next_and((cpu), (mask), (and)), \
+ (cpu) < nr_cpu_ids;)
+#endif /* SMP */
+
+#define CPU_BITS_NONE \
+{ \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
+}
+
+#define CPU_BITS_CPU0 \
+{ \
+ [0] = 1UL \
+}
+
+/**
+ * cpumask_set_cpu - set a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+{
+ set_bit(cpumask_check(cpu), cpumask_bits(dstp));
+}
+
+/**
+ * cpumask_clear_cpu - clear a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+{
+ clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
+}
+
+/**
+ * cpumask_test_cpu - test for a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @cpumask: the cpumask pointer
+ *
+ * No static inline type checking - see Subtlety (1) above.
+ */
+#define cpumask_test_cpu(cpu, cpumask) \
+ test_bit(cpumask_check(cpu), (cpumask)->bits)
+
+/**
+ * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @cpumask: the cpumask pointer
+ *
+ * test_and_set_bit wrapper for cpumasks.
+ */
+static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+{
+ return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
+}
+
+/**
+ * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_setall(struct cpumask *dstp)
+{
+ bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_clear(struct cpumask *dstp)
+{
+ bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_and - *dstp = *src1p & *src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline void cpumask_and(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_or - *dstp = *src1p | *src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_xor - *dstp = *src1p ^ *src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline void cpumask_xor(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_andnot - *dstp = *src1p & ~*src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline void cpumask_andnot(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_complement - *dstp = ~*srcp
+ * @dstp: the cpumask result
+ * @srcp: the input to invert
+ */
+static inline void cpumask_complement(struct cpumask *dstp,
+ const struct cpumask *srcp)
+{
+ bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_equal - *src1p == *src2p
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline bool cpumask_equal(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_intersects - (*src1p & *src2p) != 0
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline bool cpumask_intersects(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_subset - (*src1p & ~*src2p) == 0
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline int cpumask_subset(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_empty - *srcp == 0
+ * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ */
+static inline bool cpumask_empty(const struct cpumask *srcp)
+{
+ return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_full - *srcp == 0xFFFFFFFF...
+ * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ */
+static inline bool cpumask_full(const struct cpumask *srcp)
+{
+ return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_weight - Count of bits in *srcp
+ * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ */
+static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+{
+ return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_shift_right - *dstp = *srcp >> n
+ * @dstp: the cpumask result
+ * @srcp: the input to shift
+ * @n: the number of bits to shift by
+ */
+static inline void cpumask_shift_right(struct cpumask *dstp,
+ const struct cpumask *srcp, int n)
+{
+ bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_shift_left - *dstp = *srcp << n
+ * @dstp: the cpumask result
+ * @srcp: the input to shift
+ * @n: the number of bits to shift by
+ */
+static inline void cpumask_shift_left(struct cpumask *dstp,
+ const struct cpumask *srcp, int n)
+{
+ bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_copy - *dstp = *srcp
+ * @dstp: the result
+ * @srcp: the input cpumask
+ */
+static inline void cpumask_copy(struct cpumask *dstp,
+ const struct cpumask *srcp)
+{
+ bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_any - pick a "random" cpu from *srcp
+ * @srcp: the input cpumask
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+#define cpumask_any(srcp) cpumask_first(srcp)
+
+/**
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ */
+#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
+
+/**
+ * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
+
+/**
+ * cpumask_of - the cpumask containing just a given cpu
+ * @cpu: the cpu (<= nr_cpu_ids)
+ */
+#define cpumask_of(cpu) (get_cpu_mask(cpu))
+
+/**
+ * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * @bitmap: the bitmap
+ *
+ * There are a few places where cpumask_var_t isn't appropriate and
+ * static cpumasks must be used (eg. very early boot), yet we don't
+ * expose the definition of 'struct cpumask'.
+ *
+ * This does the conversion, and can be used as a constant initializer.
+ */
+#define to_cpumask(bitmap) \
+ ((struct cpumask *)(1 ? (bitmap) \
+ : (void *)sizeof(__check_is_bitmap(bitmap))))
+
+static inline int __check_is_bitmap(const unsigned long *bitmap)
+{
+ return 1;
+}
+
+/**
+ * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ *
+ * This will eventually be a runtime variable, depending on nr_cpu_ids.
+ */
+static inline size_t cpumask_size(void)
+{
+ /* FIXME: Once all cpumask assignments are eliminated, this
+ * can be nr_cpumask_bits */
+ return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
+}
+
+/*
+ * cpumask_var_t: struct cpumask for stack usage.
+ *
+ * Oh, the wicked games we play! In order to make kernel coding a
+ * little more difficult, we typedef cpumask_var_t to an array or a
+ * pointer: doing &mask on an array is a noop, so it still works.
+ *
+ * ie.
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * ... use 'tmpmask' like a normal struct cpumask * ...
+ *
+ * free_cpumask_var(tmpmask);
+ */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+typedef struct cpumask *cpumask_var_t;
+
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
+void free_cpumask_var(cpumask_var_t mask);
+void free_bootmem_cpumask_var(cpumask_var_t mask);
+
+#else
+typedef struct cpumask cpumask_var_t[1];
+
+static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return true;
+}
+
+static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+{
+}
+
+static inline void free_cpumask_var(cpumask_var_t mask)
+{
+}
+
+static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+{
+}
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
+/* The pointer versions of the maps, these will become the primary versions. */
+#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
+#define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
+#define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
+#define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
+
+/* It's common to want to use cpu_all_mask in struct member initializers,
+ * so it has to refer to an address rather than a pointer. */
+extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
+#define cpu_all_mask to_cpumask(cpu_all_bits)
+
+/* First bits of cpu_bit_bitmap are in fact unset. */
+#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+
+/* Wrappers for arch boot code to manipulate normally-constant masks */
+static inline void set_cpu_possible(unsigned int cpu, bool possible)
+{
+ if (possible)
+ cpumask_set_cpu(cpu, &cpu_possible_map);
+ else
+ cpumask_clear_cpu(cpu, &cpu_possible_map);
+}
+
+static inline void set_cpu_present(unsigned int cpu, bool present)
+{
+ if (present)
+ cpumask_set_cpu(cpu, &cpu_present_map);
+ else
+ cpumask_clear_cpu(cpu, &cpu_present_map);
+}
+
+static inline void set_cpu_online(unsigned int cpu, bool online)
+{
+ if (online)
+ cpumask_set_cpu(cpu, &cpu_online_map);
+ else
+ cpumask_clear_cpu(cpu, &cpu_online_map);
+}
+
+static inline void set_cpu_active(unsigned int cpu, bool active)
+{
+ if (active)
+ cpumask_set_cpu(cpu, &cpu_active_map);
+ else
+ cpumask_clear_cpu(cpu, &cpu_active_map);
+}
+
+static inline void init_cpu_present(const struct cpumask *src)
+{
+ cpumask_copy(&cpu_present_map, src);
+}
+
+static inline void init_cpu_possible(const struct cpumask *src)
+{
+ cpumask_copy(&cpu_possible_map, src);
+}
+
+static inline void init_cpu_online(const struct cpumask *src)
+{
+ cpumask_copy(&cpu_online_map, src);
+}
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2691926fb50..8e540d32c9f 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -74,8 +74,6 @@ static inline int cpuset_do_slab_mem_spread(void)
return current->flags & PF_SPREAD_SLAB;
}
-extern void cpuset_track_online_nodes(void);
-
extern int current_cpuset_is_being_rebound(void);
extern void rebuild_sched_domains(void);
@@ -151,8 +149,6 @@ static inline int cpuset_do_slab_mem_spread(void)
return 0;
}
-static inline void cpuset_track_online_nodes(void) {}
-
static inline int current_cpuset_is_being_rebound(void)
{
return 0;
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 025e4f57510..2dac064d835 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,17 +8,12 @@
#include <linux/proc_fs.h>
#define ELFCORE_ADDR_MAX (-1ULL)
+#define ELFCORE_ADDR_ERR (-2ULL)
-#ifdef CONFIG_PROC_VMCORE
extern unsigned long long elfcorehdr_addr;
-#else
-static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
-#endif
extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
unsigned long, int);
-extern const struct file_operations proc_vmcore_operations;
-extern struct proc_dir_entry *proc_vmcore;
/* Architecture code defines this if there are other possible ELF
* machine types, e.g. on bi-arch capable hardware. */
@@ -28,10 +23,43 @@ extern struct proc_dir_entry *proc_vmcore;
#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
+/*
+ * is_kdump_kernel() checks whether this kernel is booting after a panic of
+ * previous kernel or not. This is determined by checking if previous kernel
+ * has passed the elf core header address on command line.
+ *
+ * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
+ * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
+ * previous kernel.
+ */
+
static inline int is_kdump_kernel(void)
{
return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
}
+
+/* is_vmcore_usable() checks if the kernel is booting after a panic and
+ * the vmcore region is usable.
+ *
+ * This makes use of the fact that due to alignment -2ULL is not
+ * a valid pointer, much in the vain of IS_ERR(), except
+ * dealing directly with an unsigned long long rather than a pointer.
+ */
+
+static inline int is_vmcore_usable(void)
+{
+ return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+}
+
+/* vmcore_unusable() marks the vmcore as unusable,
+ * without disturbing the logic of is_kdump_kernel()
+ */
+
+static inline void vmcore_unusable(void)
+{
+ if (is_kdump_kernel())
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
+}
#else /* !CONFIG_CRASH_DUMP */
static inline int is_kdump_kernel(void) { return 0; }
#endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index 508f512e5a2..bd8b44d96bd 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -3,9 +3,9 @@
#include <linux/types.h>
-extern u32 crc32c_le(u32 crc, unsigned char const *address, size_t length);
-extern u32 crc32c_be(u32 crc, unsigned char const *address, size_t length);
+extern u32 crc32c(u32 crc, const void *address, unsigned int length);
-#define crc32c(seed, data, length) crc32c_le(seed, (unsigned char const *)data, length)
+/* This macro exists for backwards-compatibility. */
+#define crc32c_le crc32c
#endif /* _LINUX_CRC32C_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index b69222cc1fd..3282ee4318e 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,4 +1,4 @@
-/* Credentials management
+/* Credentials management - see Documentation/credentials.txt
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,39 +12,335 @@
#ifndef _LINUX_CRED_H
#define _LINUX_CRED_H
-#define get_current_user() (get_uid(current->user))
+#include <linux/capability.h>
+#include <linux/key.h>
+#include <asm/atomic.h>
-#define task_uid(task) ((task)->uid)
-#define task_gid(task) ((task)->gid)
-#define task_euid(task) ((task)->euid)
-#define task_egid(task) ((task)->egid)
+struct user_struct;
+struct cred;
+struct inode;
-#define current_uid() (current->uid)
-#define current_gid() (current->gid)
-#define current_euid() (current->euid)
-#define current_egid() (current->egid)
-#define current_suid() (current->suid)
-#define current_sgid() (current->sgid)
-#define current_fsuid() (current->fsuid)
-#define current_fsgid() (current->fsgid)
-#define current_cap() (current->cap_effective)
+/*
+ * COW Supplementary groups list
+ */
+#define NGROUPS_SMALL 32
+#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
+
+struct group_info {
+ atomic_t usage;
+ int ngroups;
+ int nblocks;
+ gid_t small_block[NGROUPS_SMALL];
+ gid_t *blocks[0];
+};
+
+/**
+ * get_group_info - Get a reference to a group info structure
+ * @group_info: The group info to reference
+ *
+ * This gets a reference to a set of supplementary groups.
+ *
+ * If the caller is accessing a task's credentials, they must hold the RCU read
+ * lock when reading.
+ */
+static inline struct group_info *get_group_info(struct group_info *gi)
+{
+ atomic_inc(&gi->usage);
+ return gi;
+}
+
+/**
+ * put_group_info - Release a reference to a group info structure
+ * @group_info: The group info to release
+ */
+#define put_group_info(group_info) \
+do { \
+ if (atomic_dec_and_test(&(group_info)->usage)) \
+ groups_free(group_info); \
+} while (0)
+
+extern struct group_info *groups_alloc(int);
+extern struct group_info init_groups;
+extern void groups_free(struct group_info *);
+extern int set_current_groups(struct group_info *);
+extern int set_groups(struct cred *, struct group_info *);
+extern int groups_search(const struct group_info *, gid_t);
+
+/* access the groups "array" with this macro */
+#define GROUP_AT(gi, i) \
+ ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
+
+extern int in_group_p(gid_t);
+extern int in_egroup_p(gid_t);
+
+/*
+ * The common credentials for a thread group
+ * - shared by CLONE_THREAD
+ */
+#ifdef CONFIG_KEYS
+struct thread_group_cred {
+ atomic_t usage;
+ pid_t tgid; /* thread group process ID */
+ spinlock_t lock;
+ struct key *session_keyring; /* keyring inherited over fork */
+ struct key *process_keyring; /* keyring private to this process */
+ struct rcu_head rcu; /* RCU deletion hook */
+};
+#endif
+
+/*
+ * The security context of a task
+ *
+ * The parts of the context break down into two categories:
+ *
+ * (1) The objective context of a task. These parts are used when some other
+ * task is attempting to affect this one.
+ *
+ * (2) The subjective context. These details are used when the task is acting
+ * upon another object, be that a file, a task, a key or whatever.
+ *
+ * Note that some members of this structure belong to both categories - the
+ * LSM security pointer for instance.
+ *
+ * A task has two security pointers. task->real_cred points to the objective
+ * context that defines that task's actual details. The objective part of this
+ * context is used whenever that task is acted upon.
+ *
+ * task->cred points to the subjective context that defines the details of how
+ * that task is going to act upon another object. This may be overridden
+ * temporarily to point to another security context, but normally points to the
+ * same context as task->real_cred.
+ */
+struct cred {
+ atomic_t usage;
+ uid_t uid; /* real UID of the task */
+ gid_t gid; /* real GID of the task */
+ uid_t suid; /* saved UID of the task */
+ gid_t sgid; /* saved GID of the task */
+ uid_t euid; /* effective UID of the task */
+ gid_t egid; /* effective GID of the task */
+ uid_t fsuid; /* UID for VFS ops */
+ gid_t fsgid; /* GID for VFS ops */
+ unsigned securebits; /* SUID-less security management */
+ kernel_cap_t cap_inheritable; /* caps our children can inherit */
+ kernel_cap_t cap_permitted; /* caps we're permitted */
+ kernel_cap_t cap_effective; /* caps we can actually use */
+ kernel_cap_t cap_bset; /* capability bounding set */
+#ifdef CONFIG_KEYS
+ unsigned char jit_keyring; /* default keyring to attach requested
+ * keys to */
+ struct key *thread_keyring; /* keyring private to this thread */
+ struct key *request_key_auth; /* assumed request_key authority */
+ struct thread_group_cred *tgcred; /* thread-group shared credentials */
+#endif
+#ifdef CONFIG_SECURITY
+ void *security; /* subjective LSM security */
+#endif
+ struct user_struct *user; /* real user ID subscription */
+ struct group_info *group_info; /* supplementary groups for euid/fsgid */
+ struct rcu_head rcu; /* RCU deletion hook */
+};
+
+extern void __put_cred(struct cred *);
+extern int copy_creds(struct task_struct *, unsigned long);
+extern struct cred *prepare_creds(void);
+extern struct cred *prepare_exec_creds(void);
+extern struct cred *prepare_usermodehelper_creds(void);
+extern int commit_creds(struct cred *);
+extern void abort_creds(struct cred *);
+extern const struct cred *override_creds(const struct cred *);
+extern void revert_creds(const struct cred *);
+extern struct cred *prepare_kernel_cred(struct task_struct *);
+extern int change_create_files_as(struct cred *, struct inode *);
+extern int set_security_override(struct cred *, u32);
+extern int set_security_override_from_ctx(struct cred *, const char *);
+extern int set_create_files_as(struct cred *, struct inode *);
+extern void __init cred_init(void);
+
+/**
+ * get_new_cred - Get a reference on a new set of credentials
+ * @cred: The new credentials to reference
+ *
+ * Get a reference on the specified set of new credentials. The caller must
+ * release the reference.
+ */
+static inline struct cred *get_new_cred(struct cred *cred)
+{
+ atomic_inc(&cred->usage);
+ return cred;
+}
+
+/**
+ * get_cred - Get a reference on a set of credentials
+ * @cred: The credentials to reference
+ *
+ * Get a reference on the specified set of credentials. The caller must
+ * release the reference.
+ *
+ * This is used to deal with a committed set of credentials. Although the
+ * pointer is const, this will temporarily discard the const and increment the
+ * usage count. The purpose of this is to attempt to catch at compile time the
+ * accidental alteration of a set of credentials that should be considered
+ * immutable.
+ */
+static inline const struct cred *get_cred(const struct cred *cred)
+{
+ return get_new_cred((struct cred *) cred);
+}
+
+/**
+ * put_cred - Release a reference to a set of credentials
+ * @cred: The credentials to release
+ *
+ * Release a reference to a set of credentials, deleting them when the last ref
+ * is released.
+ *
+ * This takes a const pointer to a set of credentials because the credentials
+ * on task_struct are attached by const pointers to prevent accidental
+ * alteration of otherwise immutable credential sets.
+ */
+static inline void put_cred(const struct cred *_cred)
+{
+ struct cred *cred = (struct cred *) _cred;
+
+ BUG_ON(atomic_read(&(cred)->usage) <= 0);
+ if (atomic_dec_and_test(&(cred)->usage))
+ __put_cred(cred);
+}
+
+/**
+ * current_cred - Access the current task's subjective credentials
+ *
+ * Access the subjective credentials of the current task.
+ */
+#define current_cred() \
+ (current->cred)
+
+/**
+ * __task_cred - Access a task's objective credentials
+ * @task: The task to query
+ *
+ * Access the objective credentials of a task. The caller must hold the RCU
+ * readlock.
+ *
+ * The caller must make sure task doesn't go away, either by holding a ref on
+ * task or by holding tasklist_lock to prevent it from being unlinked.
+ */
+#define __task_cred(task) \
+ ((const struct cred *)(rcu_dereference((task)->real_cred)))
+
+/**
+ * get_task_cred - Get another task's objective credentials
+ * @task: The task to query
+ *
+ * Get the objective credentials of a task, pinning them so that they can't go
+ * away. Accessing a task's credentials directly is not permitted.
+ *
+ * The caller must make sure task doesn't go away, either by holding a ref on
+ * task or by holding tasklist_lock to prevent it from being unlinked.
+ */
+#define get_task_cred(task) \
+({ \
+ struct cred *__cred; \
+ rcu_read_lock(); \
+ __cred = (struct cred *) __task_cred((task)); \
+ get_cred(__cred); \
+ rcu_read_unlock(); \
+ __cred; \
+})
+
+/**
+ * get_current_cred - Get the current task's subjective credentials
+ *
+ * Get the subjective credentials of the current task, pinning them so that
+ * they can't go away. Accessing the current task's credentials directly is
+ * not permitted.
+ */
+#define get_current_cred() \
+ (get_cred(current_cred()))
+
+/**
+ * get_current_user - Get the current task's user_struct
+ *
+ * Get the user record of the current task, pinning it so that it can't go
+ * away.
+ */
+#define get_current_user() \
+({ \
+ struct user_struct *__u; \
+ struct cred *__cred; \
+ __cred = (struct cred *) current_cred(); \
+ __u = get_uid(__cred->user); \
+ __u; \
+})
+
+/**
+ * get_current_groups - Get the current task's supplementary group list
+ *
+ * Get the supplementary group list of the current task, pinning it so that it
+ * can't go away.
+ */
+#define get_current_groups() \
+({ \
+ struct group_info *__groups; \
+ struct cred *__cred; \
+ __cred = (struct cred *) current_cred(); \
+ __groups = get_group_info(__cred->group_info); \
+ __groups; \
+})
+
+#define task_cred_xxx(task, xxx) \
+({ \
+ __typeof__(((struct cred *)NULL)->xxx) ___val; \
+ rcu_read_lock(); \
+ ___val = __task_cred((task))->xxx; \
+ rcu_read_unlock(); \
+ ___val; \
+})
+
+#define task_uid(task) (task_cred_xxx((task), uid))
+#define task_euid(task) (task_cred_xxx((task), euid))
+
+#define current_cred_xxx(xxx) \
+({ \
+ current->cred->xxx; \
+})
+
+#define current_uid() (current_cred_xxx(uid))
+#define current_gid() (current_cred_xxx(gid))
+#define current_euid() (current_cred_xxx(euid))
+#define current_egid() (current_cred_xxx(egid))
+#define current_suid() (current_cred_xxx(suid))
+#define current_sgid() (current_cred_xxx(sgid))
+#define current_fsuid() (current_cred_xxx(fsuid))
+#define current_fsgid() (current_cred_xxx(fsgid))
+#define current_cap() (current_cred_xxx(cap_effective))
+#define current_user() (current_cred_xxx(user))
+#define current_user_ns() (current_cred_xxx(user)->user_ns)
+#define current_security() (current_cred_xxx(security))
#define current_uid_gid(_uid, _gid) \
do { \
- *(_uid) = current->uid; \
- *(_gid) = current->gid; \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ *(_uid) = __cred->uid; \
+ *(_gid) = __cred->gid; \
} while(0)
-#define current_euid_egid(_uid, _gid) \
+#define current_euid_egid(_euid, _egid) \
do { \
- *(_uid) = current->euid; \
- *(_gid) = current->egid; \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ *(_euid) = __cred->euid; \
+ *(_egid) = __cred->egid; \
} while(0)
-#define current_fsuid_fsgid(_uid, _gid) \
+#define current_fsuid_fsgid(_fsuid, _fsgid) \
do { \
- *(_uid) = current->fsuid; \
- *(_gid) = current->fsgid; \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ *(_fsuid) = __cred->fsuid; \
+ *(_fsgid) = __cred->fsgid; \
} while(0)
#endif /* _LINUX_CRED_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3d2317e4af2..3bacd71509f 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -36,7 +36,8 @@
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
-#define CRYPTO_ALG_TYPE_HASH 0x00000009
+#define CRYPTO_ALG_TYPE_HASH 0x00000008
+#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
@@ -220,6 +221,7 @@ struct ablkcipher_alg {
struct ahash_alg {
int (*init)(struct ahash_request *req);
+ int (*reinit)(struct ahash_request *req);
int (*update)(struct ahash_request *req);
int (*final)(struct ahash_request *req);
int (*digest)(struct ahash_request *req);
@@ -480,6 +482,8 @@ struct crypto_tfm {
struct compress_tfm compress;
struct rng_tfm rng;
} crt_u;
+
+ void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
@@ -544,7 +548,9 @@ struct crypto_attr_u32 {
* Transform user interface.
*/
-struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
+struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_free_tfm(struct crypto_tfm *tfm);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index efba1de629a..a37359d0bad 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -228,9 +228,9 @@ extern void d_delete(struct dentry *);
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
-extern struct dentry * d_alloc_anon(struct inode *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern struct dentry * d_obtain_alias(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_for_umount(struct super_block *);
@@ -287,6 +287,7 @@ static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *in
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
+extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry * d_lookup(struct dentry *, struct qstr *);
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
new file mode 100644
index 00000000000..b0ef274e003
--- /dev/null
+++ b/include/linux/dcbnl.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2008, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Author: Lucy Liu <lucy.liu@intel.com>
+ */
+
+#ifndef __LINUX_DCBNL_H__
+#define __LINUX_DCBNL_H__
+
+#define DCB_PROTO_VERSION 1
+
+struct dcbmsg {
+ unsigned char dcb_family;
+ __u8 cmd;
+ __u16 dcb_pad;
+};
+
+/**
+ * enum dcbnl_commands - supported DCB commands
+ *
+ * @DCB_CMD_UNDEFINED: unspecified command to catch errors
+ * @DCB_CMD_GSTATE: request the state of DCB in the device
+ * @DCB_CMD_SSTATE: set the state of DCB in the device
+ * @DCB_CMD_PGTX_GCFG: request the priority group configuration for Tx
+ * @DCB_CMD_PGTX_SCFG: set the priority group configuration for Tx
+ * @DCB_CMD_PGRX_GCFG: request the priority group configuration for Rx
+ * @DCB_CMD_PGRX_SCFG: set the priority group configuration for Rx
+ * @DCB_CMD_PFC_GCFG: request the priority flow control configuration
+ * @DCB_CMD_PFC_SCFG: set the priority flow control configuration
+ * @DCB_CMD_SET_ALL: apply all changes to the underlying device
+ * @DCB_CMD_GPERM_HWADDR: get the permanent MAC address of the underlying
+ * device. Only useful when using bonding.
+ * @DCB_CMD_GCAP: request the DCB capabilities of the device
+ * @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
+ * @DCB_CMD_SNUMTCS: set the number of traffic classes
+ * @DCB_CMD_GBCN: set backward congestion notification configuration
+ * @DCB_CMD_SBCN: get backward congestion notification configration.
+ */
+enum dcbnl_commands {
+ DCB_CMD_UNDEFINED,
+
+ DCB_CMD_GSTATE,
+ DCB_CMD_SSTATE,
+
+ DCB_CMD_PGTX_GCFG,
+ DCB_CMD_PGTX_SCFG,
+ DCB_CMD_PGRX_GCFG,
+ DCB_CMD_PGRX_SCFG,
+
+ DCB_CMD_PFC_GCFG,
+ DCB_CMD_PFC_SCFG,
+
+ DCB_CMD_SET_ALL,
+
+ DCB_CMD_GPERM_HWADDR,
+
+ DCB_CMD_GCAP,
+
+ DCB_CMD_GNUMTCS,
+ DCB_CMD_SNUMTCS,
+
+ DCB_CMD_PFC_GSTATE,
+ DCB_CMD_PFC_SSTATE,
+
+ DCB_CMD_BCN_GCFG,
+ DCB_CMD_BCN_SCFG,
+
+ __DCB_CMD_ENUM_MAX,
+ DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
+};
+
+/**
+ * enum dcbnl_attrs - DCB top-level netlink attributes
+ *
+ * @DCB_ATTR_UNDEFINED: unspecified attribute to catch errors
+ * @DCB_ATTR_IFNAME: interface name of the underlying device (NLA_STRING)
+ * @DCB_ATTR_STATE: enable state of DCB in the device (NLA_U8)
+ * @DCB_ATTR_PFC_STATE: enable state of PFC in the device (NLA_U8)
+ * @DCB_ATTR_PFC_CFG: priority flow control configuration (NLA_NESTED)
+ * @DCB_ATTR_NUM_TC: number of traffic classes supported in the device (NLA_U8)
+ * @DCB_ATTR_PG_CFG: priority group configuration (NLA_NESTED)
+ * @DCB_ATTR_SET_ALL: bool to commit changes to hardware or not (NLA_U8)
+ * @DCB_ATTR_PERM_HWADDR: MAC address of the physical device (NLA_NESTED)
+ * @DCB_ATTR_CAP: DCB capabilities of the device (NLA_NESTED)
+ * @DCB_ATTR_NUMTCS: number of traffic classes supported (NLA_NESTED)
+ * @DCB_ATTR_BCN: backward congestion notification configuration (NLA_NESTED)
+ */
+enum dcbnl_attrs {
+ DCB_ATTR_UNDEFINED,
+
+ DCB_ATTR_IFNAME,
+ DCB_ATTR_STATE,
+ DCB_ATTR_PFC_STATE,
+ DCB_ATTR_PFC_CFG,
+ DCB_ATTR_NUM_TC,
+ DCB_ATTR_PG_CFG,
+ DCB_ATTR_SET_ALL,
+ DCB_ATTR_PERM_HWADDR,
+ DCB_ATTR_CAP,
+ DCB_ATTR_NUMTCS,
+ DCB_ATTR_BCN,
+
+ __DCB_ATTR_ENUM_MAX,
+ DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
+};
+
+/**
+ * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
+ *
+ * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors
+ * @DCB_PFC_UP_ATTR_0: Priority Flow Control value for User Priority 0 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_1: Priority Flow Control value for User Priority 1 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_2: Priority Flow Control value for User Priority 2 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_3: Priority Flow Control value for User Priority 3 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_4: Priority Flow Control value for User Priority 4 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_5: Priority Flow Control value for User Priority 5 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_6: Priority Flow Control value for User Priority 6 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_7: Priority Flow Control value for User Priority 7 (NLA_U8)
+ * @DCB_PFC_UP_ATTR_MAX: highest attribute number currently defined
+ * @DCB_PFC_UP_ATTR_ALL: apply to all priority flow control attrs (NLA_FLAG)
+ *
+ */
+enum dcbnl_pfc_up_attrs {
+ DCB_PFC_UP_ATTR_UNDEFINED,
+
+ DCB_PFC_UP_ATTR_0,
+ DCB_PFC_UP_ATTR_1,
+ DCB_PFC_UP_ATTR_2,
+ DCB_PFC_UP_ATTR_3,
+ DCB_PFC_UP_ATTR_4,
+ DCB_PFC_UP_ATTR_5,
+ DCB_PFC_UP_ATTR_6,
+ DCB_PFC_UP_ATTR_7,
+ DCB_PFC_UP_ATTR_ALL,
+
+ __DCB_PFC_UP_ATTR_ENUM_MAX,
+ DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1,
+};
+
+/**
+ * enum dcbnl_pg_attrs - DCB Priority Group attributes
+ *
+ * @DCB_PG_ATTR_UNDEFINED: unspecified attribute to catch errors
+ * @DCB_PG_ATTR_TC_0: Priority Group Traffic Class 0 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_1: Priority Group Traffic Class 1 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_2: Priority Group Traffic Class 2 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_3: Priority Group Traffic Class 3 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_4: Priority Group Traffic Class 4 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_5: Priority Group Traffic Class 5 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_6: Priority Group Traffic Class 6 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_7: Priority Group Traffic Class 7 configuration (NLA_NESTED)
+ * @DCB_PG_ATTR_TC_MAX: highest attribute number currently defined
+ * @DCB_PG_ATTR_TC_ALL: apply to all traffic classes (NLA_NESTED)
+ * @DCB_PG_ATTR_BW_ID_0: Percent of link bandwidth for Priority Group 0 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_1: Percent of link bandwidth for Priority Group 1 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_2: Percent of link bandwidth for Priority Group 2 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_3: Percent of link bandwidth for Priority Group 3 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_4: Percent of link bandwidth for Priority Group 4 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_5: Percent of link bandwidth for Priority Group 5 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_6: Percent of link bandwidth for Priority Group 6 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_7: Percent of link bandwidth for Priority Group 7 (NLA_U8)
+ * @DCB_PG_ATTR_BW_ID_MAX: highest attribute number currently defined
+ * @DCB_PG_ATTR_BW_ID_ALL: apply to all priority groups (NLA_FLAG)
+ *
+ */
+enum dcbnl_pg_attrs {
+ DCB_PG_ATTR_UNDEFINED,
+
+ DCB_PG_ATTR_TC_0,
+ DCB_PG_ATTR_TC_1,
+ DCB_PG_ATTR_TC_2,
+ DCB_PG_ATTR_TC_3,
+ DCB_PG_ATTR_TC_4,
+ DCB_PG_ATTR_TC_5,
+ DCB_PG_ATTR_TC_6,
+ DCB_PG_ATTR_TC_7,
+ DCB_PG_ATTR_TC_MAX,
+ DCB_PG_ATTR_TC_ALL,
+
+ DCB_PG_ATTR_BW_ID_0,
+ DCB_PG_ATTR_BW_ID_1,
+ DCB_PG_ATTR_BW_ID_2,
+ DCB_PG_ATTR_BW_ID_3,
+ DCB_PG_ATTR_BW_ID_4,
+ DCB_PG_ATTR_BW_ID_5,
+ DCB_PG_ATTR_BW_ID_6,
+ DCB_PG_ATTR_BW_ID_7,
+ DCB_PG_ATTR_BW_ID_MAX,
+ DCB_PG_ATTR_BW_ID_ALL,
+
+ __DCB_PG_ATTR_ENUM_MAX,
+ DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1,
+};
+
+/**
+ * enum dcbnl_tc_attrs - DCB Traffic Class attributes
+ *
+ * @DCB_TC_ATTR_PARAM_UNDEFINED: unspecified attribute to catch errors
+ * @DCB_TC_ATTR_PARAM_PGID: (NLA_U8) Priority group the traffic class belongs to
+ * Valid values are: 0-7
+ * @DCB_TC_ATTR_PARAM_UP_MAPPING: (NLA_U8) Traffic class to user priority map
+ * Some devices may not support changing the
+ * user priority map of a TC.
+ * @DCB_TC_ATTR_PARAM_STRICT_PRIO: (NLA_U8) Strict priority setting
+ * 0 - none
+ * 1 - group strict
+ * 2 - link strict
+ * @DCB_TC_ATTR_PARAM_BW_PCT: optional - (NLA_U8) If supported by the device and
+ * not configured to use link strict priority,
+ * this is the percentage of bandwidth of the
+ * priority group this traffic class belongs to
+ * @DCB_TC_ATTR_PARAM_ALL: (NLA_FLAG) all traffic class parameters
+ *
+ */
+enum dcbnl_tc_attrs {
+ DCB_TC_ATTR_PARAM_UNDEFINED,
+
+ DCB_TC_ATTR_PARAM_PGID,
+ DCB_TC_ATTR_PARAM_UP_MAPPING,
+ DCB_TC_ATTR_PARAM_STRICT_PRIO,
+ DCB_TC_ATTR_PARAM_BW_PCT,
+ DCB_TC_ATTR_PARAM_ALL,
+
+ __DCB_TC_ATTR_PARAM_ENUM_MAX,
+ DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1,
+};
+
+/**
+ * enum dcbnl_cap_attrs - DCB Capability attributes
+ *
+ * @DCB_CAP_ATTR_UNDEFINED: unspecified attribute to catch errors
+ * @DCB_CAP_ATTR_ALL: (NLA_FLAG) all capability parameters
+ * @DCB_CAP_ATTR_PG: (NLA_U8) device supports Priority Groups
+ * @DCB_CAP_ATTR_PFC: (NLA_U8) device supports Priority Flow Control
+ * @DCB_CAP_ATTR_UP2TC: (NLA_U8) device supports user priority to
+ * traffic class mapping
+ * @DCB_CAP_ATTR_PG_TCS: (NLA_U8) bitmap where each bit represents a
+ * number of traffic classes the device
+ * can be configured to use for Priority Groups
+ * @DCB_CAP_ATTR_PFC_TCS: (NLA_U8) bitmap where each bit represents a
+ * number of traffic classes the device can be
+ * configured to use for Priority Flow Control
+ * @DCB_CAP_ATTR_GSP: (NLA_U8) device supports group strict priority
+ * @DCB_CAP_ATTR_BCN: (NLA_U8) device supports Backwards Congestion
+ * Notification
+ */
+enum dcbnl_cap_attrs {
+ DCB_CAP_ATTR_UNDEFINED,
+ DCB_CAP_ATTR_ALL,
+ DCB_CAP_ATTR_PG,
+ DCB_CAP_ATTR_PFC,
+ DCB_CAP_ATTR_UP2TC,
+ DCB_CAP_ATTR_PG_TCS,
+ DCB_CAP_ATTR_PFC_TCS,
+ DCB_CAP_ATTR_GSP,
+ DCB_CAP_ATTR_BCN,
+
+ __DCB_CAP_ATTR_ENUM_MAX,
+ DCB_CAP_ATTR_MAX = __DCB_CAP_ATTR_ENUM_MAX - 1,
+};
+
+/**
+ * enum dcbnl_numtcs_attrs - number of traffic classes
+ *
+ * @DCB_NUMTCS_ATTR_UNDEFINED: unspecified attribute to catch errors
+ * @DCB_NUMTCS_ATTR_ALL: (NLA_FLAG) all traffic class attributes
+ * @DCB_NUMTCS_ATTR_PG: (NLA_U8) number of traffic classes used for
+ * priority groups
+ * @DCB_NUMTCS_ATTR_PFC: (NLA_U8) number of traffic classes which can
+ * support priority flow control
+ */
+enum dcbnl_numtcs_attrs {
+ DCB_NUMTCS_ATTR_UNDEFINED,
+ DCB_NUMTCS_ATTR_ALL,
+ DCB_NUMTCS_ATTR_PG,
+ DCB_NUMTCS_ATTR_PFC,
+
+ __DCB_NUMTCS_ATTR_ENUM_MAX,
+ DCB_NUMTCS_ATTR_MAX = __DCB_NUMTCS_ATTR_ENUM_MAX - 1,
+};
+
+enum dcbnl_bcn_attrs{
+ DCB_BCN_ATTR_UNDEFINED = 0,
+
+ DCB_BCN_ATTR_RP_0,
+ DCB_BCN_ATTR_RP_1,
+ DCB_BCN_ATTR_RP_2,
+ DCB_BCN_ATTR_RP_3,
+ DCB_BCN_ATTR_RP_4,
+ DCB_BCN_ATTR_RP_5,
+ DCB_BCN_ATTR_RP_6,
+ DCB_BCN_ATTR_RP_7,
+ DCB_BCN_ATTR_RP_ALL,
+
+ DCB_BCN_ATTR_BCNA_0,
+ DCB_BCN_ATTR_BCNA_1,
+ DCB_BCN_ATTR_ALPHA,
+ DCB_BCN_ATTR_BETA,
+ DCB_BCN_ATTR_GD,
+ DCB_BCN_ATTR_GI,
+ DCB_BCN_ATTR_TMAX,
+ DCB_BCN_ATTR_TD,
+ DCB_BCN_ATTR_RMIN,
+ DCB_BCN_ATTR_W,
+ DCB_BCN_ATTR_RD,
+ DCB_BCN_ATTR_RU,
+ DCB_BCN_ATTR_WRTT,
+ DCB_BCN_ATTR_RI,
+ DCB_BCN_ATTR_C,
+ DCB_BCN_ATTR_ALL,
+
+ __DCB_BCN_ATTR_ENUM_MAX,
+ DCB_BCN_ATTR_MAX = __DCB_BCN_ATTR_ENUM_MAX - 1,
+};
+
+/**
+ * enum dcb_general_attr_values - general DCB attribute values
+ *
+ * @DCB_ATTR_UNDEFINED: value used to indicate an attribute is not supported
+ *
+ */
+enum dcb_general_attr_values {
+ DCB_ATTR_VALUE_UNDEFINED = 0xff
+};
+
+
+#endif /* __LINUX_DCBNL_H__ */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 6080449fbec..61734e27abb 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -168,6 +168,8 @@ enum {
DCCPO_MIN_CCID_SPECIFIC = 128,
DCCPO_MAX_CCID_SPECIFIC = 255,
};
+/* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */
+#define DCCP_SINGLE_OPT_MAXLEN 253
/* DCCP CCIDS */
enum {
@@ -176,29 +178,23 @@ enum {
};
/* DCCP features (RFC 4340 section 6.4) */
-enum {
+enum dccp_feature_numbers {
DCCPF_RESERVED = 0,
DCCPF_CCID = 1,
- DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */
+ DCCPF_SHORT_SEQNOS = 2,
DCCPF_SEQUENCE_WINDOW = 3,
- DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */
+ DCCPF_ECN_INCAPABLE = 4,
DCCPF_ACK_RATIO = 5,
DCCPF_SEND_ACK_VECTOR = 6,
DCCPF_SEND_NDP_COUNT = 7,
DCCPF_MIN_CSUM_COVER = 8,
- DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */
+ DCCPF_DATA_CHECKSUM = 9,
/* 10-127 reserved */
DCCPF_MIN_CCID_SPECIFIC = 128,
+ DCCPF_SEND_LEV_RATE = 192, /* RFC 4342, sec. 8.4 */
DCCPF_MAX_CCID_SPECIFIC = 255,
};
-/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
-struct dccp_so_feat {
- __u8 dccpsf_feat;
- __u8 __user *dccpsf_val;
- __u8 dccpsf_len;
-};
-
/* DCCP socket options */
#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */
#define DCCP_SOCKOPT_SERVICE 2
@@ -208,6 +204,10 @@ struct dccp_so_feat {
#define DCCP_SOCKOPT_SERVER_TIMEWAIT 6
#define DCCP_SOCKOPT_SEND_CSCOV 10
#define DCCP_SOCKOPT_RECV_CSCOV 11
+#define DCCP_SOCKOPT_AVAILABLE_CCIDS 12
+#define DCCP_SOCKOPT_CCID 13
+#define DCCP_SOCKOPT_TX_CCID 14
+#define DCCP_SOCKOPT_RX_CCID 15
#define DCCP_SOCKOPT_CCID_RX_INFO 128
#define DCCP_SOCKOPT_CCID_TX_INFO 192
@@ -360,7 +360,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
#define DCCPF_INITIAL_ACK_RATIO 2
#define DCCPF_INITIAL_CCID DCCPC_CCID2
-#define DCCPF_INITIAL_SEND_ACK_VECTOR 1
/* FIXME: for now we're default to 1 but it should really be 0 */
#define DCCPF_INITIAL_SEND_NDP_COUNT 1
@@ -370,20 +369,11 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
* Will be used to pass the state from dccp_request_sock to dccp_sock.
*
* @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
- * @dccpms_ccid - Congestion Control Id (CCID) (section 10)
- * @dccpms_send_ack_vector - Send Ack Vector Feature (section 11.5)
- * @dccpms_send_ndp_count - Send NDP Count Feature (7.7.2)
- * @dccpms_ack_ratio - Ack Ratio Feature (section 11.3)
* @dccpms_pending - List of features being negotiated
* @dccpms_conf -
*/
struct dccp_minisock {
__u64 dccpms_sequence_window;
- __u8 dccpms_rx_ccid;
- __u8 dccpms_tx_ccid;
- __u8 dccpms_send_ack_vector;
- __u8 dccpms_send_ndp_count;
- __u8 dccpms_ack_ratio;
struct list_head dccpms_pending;
struct list_head dccpms_conf;
};
@@ -411,6 +401,7 @@ extern void dccp_minisock_init(struct dccp_minisock *dmsk);
* @dreq_iss: initial sequence number sent on the Response (RFC 4340, 7.1)
* @dreq_isr: initial sequence number received on the Request
* @dreq_service: service code present on the Request (there is just one)
+ * @dreq_featneg: feature negotiation options for this connection
* The following two fields are analogous to the ones in dccp_sock:
* @dreq_timestamp_echo: last received timestamp to echo (13.1)
* @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
@@ -420,6 +411,7 @@ struct dccp_request_sock {
__u64 dreq_iss;
__u64 dreq_isr;
__be32 dreq_service;
+ struct list_head dreq_featneg;
__u32 dreq_timestamp_echo;
__u32 dreq_timestamp_time;
};
@@ -493,10 +485,12 @@ struct dccp_ackvec;
* @dccps_r_ack_ratio - feature-remote Ack Ratio
* @dccps_pcslen - sender partial checksum coverage (via sockopt)
* @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
+ * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
* @dccps_ndp_count - number of Non Data Packets since last data packet
* @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
* @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
* @dccps_minisock - associated minisock (accessed via dccp_msk)
+ * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
* @dccps_hc_rx_ackvec - rx half connection ack vector
* @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
* @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
@@ -529,11 +523,13 @@ struct dccp_sock {
__u32 dccps_timestamp_time;
__u16 dccps_l_ack_ratio;
__u16 dccps_r_ack_ratio;
- __u16 dccps_pcslen;
- __u16 dccps_pcrlen;
+ __u8 dccps_pcslen:4;
+ __u8 dccps_pcrlen:4;
+ __u8 dccps_send_ndp_count:1;
__u64 dccps_ndp_count:48;
unsigned long dccps_rate_last;
struct dccp_minisock dccps_minisock;
+ struct list_head dccps_featneg;
struct dccp_ackvec *dccps_hc_rx_ackvec;
struct ccid *dccps_hc_rx_ccid;
struct ccid *dccps_hc_tx_ccid;
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 4aaa4afb1cb..096476f1fb3 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
({ \
int __ret = 0; \
\
- if (unlikely(c)) { \
+ if (!oops_in_progress && unlikely(c)) { \
if (debug_locks_off() && !debug_locks_silent) \
WARN_ON(1); \
__ret = 1; \
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 08d783592b7..c17fd334e57 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -69,8 +69,7 @@ typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
-typedef int (*dm_ioctl_fn) (struct dm_target *ti, struct inode *inode,
- struct file *filp, unsigned int cmd,
+typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
unsigned long arg);
typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
@@ -85,7 +84,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
struct dm_dev {
struct block_device *bdev;
- int mode;
+ fmode_t mode;
char name[16];
};
@@ -95,7 +94,7 @@ struct dm_dev {
* FIXME: too many arguments.
*/
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
- sector_t len, int mode, struct dm_dev **result);
+ sector_t len, fmode_t mode, struct dm_dev **result);
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
@@ -223,7 +222,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
/*
* First create an empty table.
*/
-int dm_table_create(struct dm_table **result, int mode,
+int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md);
/*
@@ -254,7 +253,7 @@ void dm_table_put(struct dm_table *t);
*/
sector_t dm_table_get_size(struct dm_table *t);
unsigned int dm_table_get_num_targets(struct dm_table *t);
-int dm_table_get_mode(struct dm_table *t);
+fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
/*
@@ -354,6 +353,9 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*/
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
+#define dm_array_too_big(fixed, obj, num) \
+ ((num) > (UINT_MAX - (fixed)) / (obj))
+
static inline sector_t to_sector(unsigned long n)
{
return (n >> SECTOR_SHIFT);
diff --git a/include/linux/device.h b/include/linux/device.h
index 246937c9cbc..1a3686d15f9 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -90,6 +90,9 @@ int __must_check bus_for_each_drv(struct bus_type *bus,
struct device_driver *start, void *data,
int (*fn)(struct device_driver *, void *));
+void bus_sort_breadthfirst(struct bus_type *bus,
+ int (*compare)(const struct device *a,
+ const struct device *b));
/*
* Bus notifiers: Get notified of addition/removal of devices
* and binding/unbinding of drivers to devices.
@@ -447,7 +450,7 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
-static inline void *dev_get_drvdata(struct device *dev)
+static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
}
@@ -502,7 +505,6 @@ extern struct device *device_create(struct class *cls, struct device *parent,
dev_t devt, void *drvdata,
const char *fmt, ...)
__attribute__((format(printf, 5, 6)));
-#define device_create_drvdata device_create
extern void device_destroy(struct class *cls, dev_t devt);
/*
@@ -551,7 +553,11 @@ extern const char *dev_driver_string(const struct device *dev);
#define dev_info(dev, format, arg...) \
dev_printk(KERN_INFO , dev , format , ## arg)
-#ifdef DEBUG
+#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+#define dev_dbg(dev, format, ...) do { \
+ dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
+ } while (0)
+#elif defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG , dev , format , ## arg)
#else
@@ -567,6 +573,14 @@ extern const char *dev_driver_string(const struct device *dev);
({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
#endif
+/*
+ * dev_WARN() acts like dev_printk(), but with the key difference
+ * of using a WARN/WARN_ON to get the message out, including the
+ * file/line information and a backtrace.
+ */
+#define dev_WARN(dev, format, arg...) \
+ WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg);
+
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
new file mode 100644
index 00000000000..a9e652a4137
--- /dev/null
+++ b/include/linux/dm-region-hash.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * Device-Mapper dirty region hash interface.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_REGION_HASH_H
+#define DM_REGION_HASH_H
+
+#include <linux/dm-dirty-log.h>
+
+/*-----------------------------------------------------------------
+ * Region hash
+ *----------------------------------------------------------------*/
+struct dm_region_hash;
+struct dm_region;
+
+/*
+ * States a region can have.
+ */
+enum dm_rh_region_states {
+ DM_RH_CLEAN = 0x01, /* No writes in flight. */
+ DM_RH_DIRTY = 0x02, /* Writes in flight. */
+ DM_RH_NOSYNC = 0x04, /* Out of sync. */
+ DM_RH_RECOVERING = 0x08, /* Under resynchronization. */
+};
+
+/*
+ * Region hash create/destroy.
+ */
+struct bio_list;
+struct dm_region_hash *dm_region_hash_create(
+ void *context, void (*dispatch_bios)(void *context,
+ struct bio_list *bios),
+ void (*wakeup_workers)(void *context),
+ void (*wakeup_all_recovery_waiters)(void *context),
+ sector_t target_begin, unsigned max_recovery,
+ struct dm_dirty_log *log, uint32_t region_size,
+ region_t nr_regions);
+void dm_region_hash_destroy(struct dm_region_hash *rh);
+
+struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh);
+
+/*
+ * Conversion functions.
+ */
+region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
+sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region);
+void *dm_rh_region_context(struct dm_region *reg);
+
+/*
+ * Get region size and key (ie. number of the region).
+ */
+sector_t dm_rh_get_region_size(struct dm_region_hash *rh);
+region_t dm_rh_get_region_key(struct dm_region *reg);
+
+/*
+ * Get/set/update region state (and dirty log).
+ *
+ */
+int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block);
+void dm_rh_set_state(struct dm_region_hash *rh, region_t region,
+ enum dm_rh_region_states state, int may_block);
+
+/* Non-zero errors_handled leaves the state of the region NOSYNC */
+void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled);
+
+/* Flush the region hash and dirty log. */
+int dm_rh_flush(struct dm_region_hash *rh);
+
+/* Inc/dec pending count on regions. */
+void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios);
+void dm_rh_dec(struct dm_region_hash *rh, region_t region);
+
+/* Delay bios on regions. */
+void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
+
+void dm_rh_mark_nosync(struct dm_region_hash *rh,
+ struct bio *bio, unsigned done, int error);
+
+/*
+ * Region recovery control.
+ */
+
+/* Prepare some regions for recovery by starting to quiesce them. */
+void dm_rh_recovery_prepare(struct dm_region_hash *rh);
+
+/* Try fetching a quiesced region for recovery. */
+struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh);
+
+/* Report recovery end on a region. */
+void dm_rh_recovery_end(struct dm_region *reg, int error);
+
+/* Returns number of regions with recovery work outstanding. */
+int dm_rh_recovery_in_flight(struct dm_region_hash *rh);
+
+/* Start/stop recovery. */
+void dm_rh_start_recovery(struct dm_region_hash *rh);
+void dm_rh_stop_recovery(struct dm_region_hash *rh);
+
+#endif /* DM_REGION_HASH_H */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
new file mode 100644
index 00000000000..952df39c989
--- /dev/null
+++ b/include/linux/dma_remapping.h
@@ -0,0 +1,156 @@
+#ifndef _DMA_REMAPPING_H
+#define _DMA_REMAPPING_H
+
+/*
+ * VT-d hardware uses 4KiB page size regardless of host page size.
+ */
+#define VTD_PAGE_SHIFT (12)
+#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
+#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
+#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
+
+
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+ u64 val;
+ u64 rsvd1;
+};
+#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
+static inline bool root_present(struct root_entry *root)
+{
+ return (root->val & 1);
+}
+static inline void set_root_present(struct root_entry *root)
+{
+ root->val |= 1;
+}
+static inline void set_root_value(struct root_entry *root, unsigned long value)
+{
+ root->val |= value & VTD_PAGE_MASK;
+}
+
+struct context_entry;
+static inline struct context_entry *
+get_context_addr_from_root(struct root_entry *root)
+{
+ return (struct context_entry *)
+ (root_present(root)?phys_to_virt(
+ root->val & VTD_PAGE_MASK) :
+ NULL);
+}
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+ u64 lo;
+ u64 hi;
+};
+#define context_present(c) ((c).lo & 1)
+#define context_fault_disable(c) (((c).lo >> 1) & 1)
+#define context_translation_type(c) (((c).lo >> 2) & 3)
+#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
+#define context_address_width(c) ((c).hi & 7)
+#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
+
+#define context_set_present(c) do {(c).lo |= 1;} while (0)
+#define context_set_fault_enable(c) \
+ do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
+#define context_set_translation_type(c, val) \
+ do { \
+ (c).lo &= (((u64)-1) << 4) | 3; \
+ (c).lo |= ((val) & 3) << 2; \
+ } while (0)
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define context_set_address_root(c, val) \
+ do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
+#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
+#define context_set_domain_id(c, val) \
+ do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
+#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
+
+/*
+ * 0: readable
+ * 1: writable
+ * 2-6: reserved
+ * 7: super page
+ * 8-11: available
+ * 12-63: Host physcial address
+ */
+struct dma_pte {
+ u64 val;
+};
+#define dma_clear_pte(p) do {(p).val = 0;} while (0)
+
+#define DMA_PTE_READ (1)
+#define DMA_PTE_WRITE (2)
+
+#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
+#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
+#define dma_set_pte_prot(p, prot) \
+ do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
+#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
+#define dma_set_pte_addr(p, addr) do {\
+ (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
+#define dma_pte_present(p) (((p).val & 3) != 0)
+
+struct intel_iommu;
+
+struct dmar_domain {
+ int id; /* domain id */
+ struct intel_iommu *iommu; /* back pointer to owning iommu */
+
+ struct list_head devices; /* all devices' list */
+ struct iova_domain iovad; /* iova's that belong to this domain */
+
+ struct dma_pte *pgd; /* virtual address */
+ spinlock_t mapping_lock; /* page table lock */
+ int gaw; /* max guest address width */
+
+ /* adjusted guest address width, 0 is level 2 30-bit */
+ int agaw;
+
+#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
+ int flags;
+};
+
+/* PCI domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ struct list_head global; /* link to global list */
+ u8 bus; /* PCI bus numer */
+ u8 devfn; /* PCI devfn number */
+ struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct dmar_domain *domain; /* pointer to domain */
+};
+
+extern int init_dmars(void);
+extern void free_dmar_iommu(struct intel_iommu *iommu);
+
+extern int dmar_disabled;
+
+#ifndef CONFIG_DMAR_GFX_WA
+static inline void iommu_prepare_gfx_mapping(void)
+{
+ return;
+}
+#endif /* !CONFIG_DMAR_GFX_WA */
+
+#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index c360c558e59..f1984fc3e06 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -45,7 +45,6 @@ extern struct list_head dmar_drhd_units;
list_for_each_entry(drhd, &dmar_drhd_units, list)
extern int dmar_table_init(void);
-extern int early_dmar_detect(void);
extern int dmar_dev_scope_init(void);
/* Intel IOMMU detection */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index e5084eb5943..34161907b2f 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -44,8 +44,10 @@ extern const struct dmi_device * dmi_find_device(int type, const char *name,
extern void dmi_scan_machine(void);
extern int dmi_get_year(int field);
extern int dmi_name_in_vendors(const char *str);
+extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
extern int dmi_walk(void (*decode)(const struct dmi_header *));
+extern bool dmi_match(enum dmi_field f, const char *str);
#else
@@ -56,9 +58,12 @@ static inline const struct dmi_device * dmi_find_device(int type, const char *na
static inline void dmi_scan_machine(void) { return; }
static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
+static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
static inline int dmi_walk(void (*decode)(const struct dmi_header *))
{ return -1; }
+static inline bool dmi_match(enum dmi_field f, const char *str)
+ { return false; }
#endif
diff --git a/include/linux/ds1286.h b/include/linux/ds1286.h
index d8989860e4c..45ea0aa0aeb 100644
--- a/include/linux/ds1286.h
+++ b/include/linux/ds1286.h
@@ -8,8 +8,6 @@
#ifndef __LINUX_DS1286_H
#define __LINUX_DS1286_H
-#include <asm/ds1286.h>
-
/**********************************************************************
* register summary
**********************************************************************/
diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h
index 6e4ace27027..79a8ed8e6a7 100644
--- a/include/linux/dvb/frontend.h
+++ b/include/linux/dvb/frontend.h
@@ -166,6 +166,7 @@ typedef enum fe_modulation {
VSB_16,
PSK_8,
APSK_16,
+ APSK_32,
DQPSK,
} fe_modulation_t;
@@ -295,6 +296,7 @@ typedef enum fe_delivery_system {
SYS_DVBC_ANNEX_AC,
SYS_DVBC_ANNEX_B,
SYS_DVBT,
+ SYS_DSS,
SYS_DVBS,
SYS_DVBS2,
SYS_DVBH,
diff --git a/include/linux/dynamic_printk.h b/include/linux/dynamic_printk.h
new file mode 100644
index 00000000000..2d528d00907
--- /dev/null
+++ b/include/linux/dynamic_printk.h
@@ -0,0 +1,93 @@
+#ifndef _DYNAMIC_PRINTK_H
+#define _DYNAMIC_PRINTK_H
+
+#define DYNAMIC_DEBUG_HASH_BITS 6
+#define DEBUG_HASH_TABLE_SIZE (1 << DYNAMIC_DEBUG_HASH_BITS)
+
+#define TYPE_BOOLEAN 1
+
+#define DYNAMIC_ENABLED_ALL 0
+#define DYNAMIC_ENABLED_NONE 1
+#define DYNAMIC_ENABLED_SOME 2
+
+extern int dynamic_enabled;
+
+/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
+ * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
+ * use independent hash functions, to reduce the chance of false positives.
+ */
+extern long long dynamic_printk_enabled;
+extern long long dynamic_printk_enabled2;
+
+struct mod_debug {
+ char *modname;
+ char *logical_modname;
+ char *flag_names;
+ int type;
+ int hash;
+ int hash2;
+} __attribute__((aligned(8)));
+
+int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
+ char *flags, int hash, int hash2);
+
+#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+extern int unregister_dynamic_debug_module(char *mod_name);
+extern int __dynamic_dbg_enabled_helper(char *modname, int type,
+ int value, int hash);
+
+#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ \
+ int __ret = 0; \
+ if (unlikely((dynamic_printk_enabled & (1LL << DEBUG_HASH)) && \
+ (dynamic_printk_enabled2 & (1LL << DEBUG_HASH2)))) \
+ __ret = __dynamic_dbg_enabled_helper(module, type, \
+ value, hash);\
+ __ret; })
+
+#define dynamic_pr_debug(fmt, ...) do { \
+ static char mod_name[] \
+ __attribute__((section("__verbose_strings"))) \
+ = KBUILD_MODNAME; \
+ static struct mod_debug descriptor \
+ __used \
+ __attribute__((section("__verbose"), aligned(8))) = \
+ { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
+ if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
+ 0, 0, DEBUG_HASH)) \
+ printk(KERN_DEBUG KBUILD_MODNAME ":" fmt, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define dynamic_dev_dbg(dev, format, ...) do { \
+ static char mod_name[] \
+ __attribute__((section("__verbose_strings"))) \
+ = KBUILD_MODNAME; \
+ static struct mod_debug descriptor \
+ __used \
+ __attribute__((section("__verbose"), aligned(8))) = \
+ { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
+ if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
+ 0, 0, DEBUG_HASH)) \
+ dev_printk(KERN_DEBUG, dev, \
+ KBUILD_MODNAME ": " format, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#else
+
+static inline int unregister_dynamic_debug_module(const char *mod_name)
+{
+ return 0;
+}
+static inline int __dynamic_dbg_enabled_helper(char *modname, int type,
+ int value, int hash)
+{
+ return 0;
+}
+
+#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ 0; })
+#define dynamic_pr_debug(fmt, ...) do { } while (0)
+#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 807373d467f..bb66feb164b 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -208,6 +208,9 @@ typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_siz
#define EFI_GLOBAL_VARIABLE_GUID \
EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
+#define UV_SYSTEM_TABLE_GUID \
+ EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
+
typedef struct {
efi_guid_t guid;
unsigned long table;
@@ -255,6 +258,7 @@ extern struct efi {
unsigned long boot_info; /* boot info table */
unsigned long hcdp; /* HCDP table */
unsigned long uga; /* UGA table */
+ unsigned long uv_systab; /* UV system table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 92f6f634e3e..7a204256b15 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
typedef void *(elevator_init_fn) (struct request_queue *);
-typedef void (elevator_exit_fn) (elevator_t *);
+typedef void (elevator_exit_fn) (struct elevator_queue *);
struct elevator_ops
{
@@ -62,8 +62,8 @@ struct elevator_ops
struct elv_fs_entry {
struct attribute attr;
- ssize_t (*show)(elevator_t *, char *);
- ssize_t (*store)(elevator_t *, const char *, size_t);
+ ssize_t (*show)(struct elevator_queue *, char *);
+ ssize_t (*store)(struct elevator_queue *, const char *, size_t);
};
/*
@@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(elevator_t *);
+extern void elevator_exit(struct elevator_queue *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 25d62e6e329..1cb0f0b9092 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -27,6 +27,7 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/random.h>
+#include <asm/unaligned.h>
#ifdef __KERNEL__
extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
@@ -41,6 +42,10 @@ extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh);
extern void eth_header_cache_update(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
+extern int eth_mac_addr(struct net_device *dev, void *p);
+extern int eth_change_mtu(struct net_device *dev, int new_mtu);
+extern int eth_validate_addr(struct net_device *dev);
+
extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
@@ -136,6 +141,47 @@ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
BUILD_BUG_ON(ETH_ALEN != 6);
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
}
+
+static inline unsigned long zap_last_2bytes(unsigned long value)
+{
+#ifdef __BIG_ENDIAN
+ return value >> 16;
+#else
+ return value << 16;
+#endif
+}
+
+/**
+ * compare_ether_addr_64bits - Compare two Ethernet addresses
+ * @addr1: Pointer to an array of 8 bytes
+ * @addr2: Pointer to an other array of 8 bytes
+ *
+ * Compare two ethernet addresses, returns 0 if equal.
+ * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional
+ * branches, and possibly long word memory accesses on CPU allowing cheap
+ * unaligned memory reads.
+ * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2}
+ *
+ * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
+ */
+
+static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
+ const u8 addr2[6+2])
+{
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ unsigned long fold = ((*(unsigned long *)addr1) ^
+ (*(unsigned long *)addr2));
+
+ if (sizeof(fold) == 8)
+ return zap_last_2bytes(fold) != 0;
+
+ fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
+ (*(unsigned long *)(addr2 + 4)));
+ return fold != 0;
+#else
+ return compare_ether_addr(addr1, addr2);
+#endif
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index b4b038b89ee..27c67a54223 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -467,6 +467,8 @@ struct ethtool_ops {
#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
+#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
+#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 2efe7b863cf..78c775a83f7 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -47,7 +47,7 @@
#ifdef EXT2FS_DEBUG
# define ext2_debug(f, a...) { \
printk ("EXT2-fs DEBUG (%s, %d): %s:", \
- __FILE__, __LINE__, __FUNCTION__); \
+ __FILE__, __LINE__, __func__); \
printk (f, ## a); \
}
#else
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 8120fa1bc23..d14f0291848 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -43,7 +43,7 @@
#define ext3_debug(f, a...) \
do { \
printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:", \
- __FILE__, __LINE__, __FUNCTION__); \
+ __FILE__, __LINE__, __func__); \
printk (KERN_DEBUG f, ## a); \
} while (0)
#else
@@ -380,6 +380,8 @@ struct ext3_inode {
#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
+#define EXT3_MOUNT_DATA_ERR_ABORT 0x400000 /* Abort on file data write
+ * error in ordered mode */
/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
#ifndef _LINUX_EXT2_FS_H
@@ -871,7 +873,7 @@ extern void ext3_update_dynamic_rev (struct super_block *sb);
#define ext3_std_error(sb, errno) \
do { \
if ((errno)) \
- __ext3_std_error((sb), __FUNCTION__, (errno)); \
+ __ext3_std_error((sb), __func__, (errno)); \
} while (0)
/*
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index 8c43b13a02f..cf82d519be4 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -137,17 +137,17 @@ int __ext3_journal_dirty_metadata(const char *where,
handle_t *handle, struct buffer_head *bh);
#define ext3_journal_get_undo_access(handle, bh) \
- __ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh))
+ __ext3_journal_get_undo_access(__func__, (handle), (bh))
#define ext3_journal_get_write_access(handle, bh) \
- __ext3_journal_get_write_access(__FUNCTION__, (handle), (bh))
+ __ext3_journal_get_write_access(__func__, (handle), (bh))
#define ext3_journal_revoke(handle, blocknr, bh) \
- __ext3_journal_revoke(__FUNCTION__, (handle), (blocknr), (bh))
+ __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
#define ext3_journal_get_create_access(handle, bh) \
- __ext3_journal_get_create_access(__FUNCTION__, (handle), (bh))
+ __ext3_journal_get_create_access(__func__, (handle), (bh))
#define ext3_journal_dirty_metadata(handle, bh) \
- __ext3_journal_dirty_metadata(__FUNCTION__, (handle), (bh))
+ __ext3_journal_dirty_metadata(__func__, (handle), (bh))
#define ext3_journal_forget(handle, bh) \
- __ext3_journal_forget(__FUNCTION__, (handle), (bh))
+ __ext3_journal_forget(__func__, (handle), (bh))
int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
@@ -160,7 +160,7 @@ static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
}
#define ext3_journal_stop(handle) \
- __ext3_journal_stop(__FUNCTION__, (handle))
+ __ext3_journal_stop(__func__, (handle))
static inline handle_t *ext3_journal_current_handle(void)
{
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 32368c4f032..06ca9b21dad 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
#endif /* CONFIG_FAULT_INJECTION */
+#ifdef CONFIG_FAILSLAB
+extern bool should_failslab(size_t size, gfp_t gfpflags);
+#else
+static inline bool should_failslab(size_t size, gfp_t gfpflags)
+{
+ return false;
+}
+#endif /* CONFIG_FAILSLAB */
+
#endif /* _LINUX_FAULT_INJECT_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 531ccd5f596..1ee63df5be9 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -808,6 +808,7 @@ struct fb_tile_ops {
struct fb_info {
int node;
int flags;
+ struct mutex lock; /* Lock for open/release/ioctl funcs */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
@@ -887,7 +888,7 @@ struct fb_info {
#define fb_writeq sbus_writeq
#define fb_memset sbus_memset_io
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__)
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index e61e42dfd31..155bafd9e88 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -27,6 +27,7 @@
#ifdef __KERNEL__
extern __be16 fddi_type_trans(struct sk_buff *skb,
struct net_device *dev);
+extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
extern struct net_device *alloc_fddidev(int sizeof_priv);
#endif
diff --git a/include/linux/file.h b/include/linux/file.h
index a20259e248a..335a0a5c316 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -19,10 +19,10 @@ struct file_operations;
struct vfsmount;
struct dentry;
extern int init_file(struct file *, struct vfsmount *mnt,
- struct dentry *dentry, mode_t mode,
+ struct dentry *dentry, fmode_t mode,
const struct file_operations *fop);
extern struct file *alloc_file(struct vfsmount *, struct dentry *dentry,
- mode_t mode, const struct file_operations *fop);
+ fmode_t mode, const struct file_operations *fop);
static inline void fput_light(struct file *file, int fput_needed)
{
diff --git a/include/linux/filter.h b/include/linux/filter.h
index b6ea9aa9e85..1354aaf6abb 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -122,7 +122,8 @@ struct sock_fprog /* Required for SO_ATTACH_FILTER. */
#define SKF_AD_PKTTYPE 4
#define SKF_AD_IFINDEX 8
#define SKF_AD_NLATTR 12
-#define SKF_AD_MAX 16
+#define SKF_AD_NLATTR_NEST 16
+#define SKF_AD_MAX 20
#define SKF_NET_OFF (-0x100000)
#define SKF_LL_OFF (-0x200000)
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 0f0e271f97f..4d078e99c01 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -154,8 +154,13 @@ struct fw_cdev_event_iso_interrupt {
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
*
- * Convenience union for userspace use. Events could be read(2) into a char
- * buffer and then cast to this union for further processing.
+ * Convenience union for userspace use. Events could be read(2) into an
+ * appropriately aligned char buffer and then cast to this union for further
+ * processing. Note that for a request, response or iso_interrupt event,
+ * the data[] or header[] may make the size of the full event larger than
+ * sizeof(union fw_cdev_event). Also note that if you attempt to read(2)
+ * an event into a buffer that is not large enough for it, the data that does
+ * not fit will be discarded so that the next read(2) will return a new event.
*/
union fw_cdev_event {
struct fw_cdev_event_common common;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index deddeedf325..5a361f85cfe 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -6,7 +6,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_FREEZER
/*
* Check if a process has been frozen
*/
@@ -39,29 +39,14 @@ static inline void clear_freeze_flag(struct task_struct *p)
clear_tsk_thread_flag(p, TIF_FREEZE);
}
-/*
- * Wake up a frozen process
- *
- * task_lock() is taken to prevent the race with refrigerator() which may
- * occur if the freezing of tasks fails. Namely, without the lock, if the
- * freezing of tasks failed, thaw_tasks() might have run before a task in
- * refrigerator() could call frozen_process(), in which case the task would be
- * frozen and no one would thaw it.
- */
-static inline int thaw_process(struct task_struct *p)
+static inline bool should_send_signal(struct task_struct *p)
{
- task_lock(p);
- if (frozen(p)) {
- p->flags &= ~PF_FROZEN;
- task_unlock(p);
- wake_up_process(p);
- return 1;
- }
- clear_freeze_flag(p);
- task_unlock(p);
- return 0;
+ return !(p->flags & PF_FREEZER_NOSIG);
}
+/* Takes and releases task alloc lock using task_lock() */
+extern int thaw_process(struct task_struct *p);
+
extern void refrigerator(void);
extern int freeze_processes(void);
extern void thaw_processes(void);
@@ -75,6 +60,15 @@ static inline int try_to_freeze(void)
return 0;
}
+extern bool freeze_task(struct task_struct *p, bool sig_only);
+extern void cancel_freezing(struct task_struct *p);
+
+#ifdef CONFIG_CGROUP_FREEZER
+extern int cgroup_frozen(struct task_struct *task);
+#else /* !CONFIG_CGROUP_FREEZER */
+static inline int cgroup_frozen(struct task_struct *task) { return 0; }
+#endif /* !CONFIG_CGROUP_FREEZER */
+
/*
* The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
* calls wait_for_completion(&vfork) and reset right after it returns from this
@@ -166,7 +160,7 @@ static inline void set_freezable_with_signal(void)
} while (try_to_freeze()); \
__retval; \
})
-#else /* !CONFIG_PM_SLEEP */
+#else /* !CONFIG_FREEZER */
static inline int frozen(struct task_struct *p) { return 0; }
static inline int freezing(struct task_struct *p) { return 0; }
static inline void set_freeze_flag(struct task_struct *p) {}
@@ -191,6 +185,6 @@ static inline void set_freezable_with_signal(void) {}
#define wait_event_freezable_timeout(wq, condition, timeout) \
wait_event_interruptible_timeout(wq, condition, timeout)
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a6a625be13f..001ded4845b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -63,18 +63,32 @@ extern int dir_notify_enable;
#define MAY_ACCESS 16
#define MAY_OPEN 32
-#define FMODE_READ 1
-#define FMODE_WRITE 2
+/* file is open for reading */
+#define FMODE_READ ((__force fmode_t)1)
+/* file is open for writing */
+#define FMODE_WRITE ((__force fmode_t)2)
+/* file is seekable */
+#define FMODE_LSEEK ((__force fmode_t)4)
+/* file can be accessed using pread/pwrite */
+#define FMODE_PREAD ((__force fmode_t)8)
+#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
+/* File is opened for execution with sys_execve / sys_uselib */
+#define FMODE_EXEC ((__force fmode_t)16)
+/* File is opened with O_NDELAY (only set for block devices) */
+#define FMODE_NDELAY ((__force fmode_t)32)
+/* File is opened with O_EXCL (only set for block devices) */
+#define FMODE_EXCL ((__force fmode_t)64)
+/* File is opened using open(.., 3, ..) and is writeable only for ioctls
+ (specialy hack for floppy.c) */
+#define FMODE_WRITE_IOCTL ((__force fmode_t)128)
-/* Internal kernel extensions */
-#define FMODE_LSEEK 4
-#define FMODE_PREAD 8
-#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
-
-/* File is being opened for execution. Primary users of this flag are
- distributed filesystems that can use it to achieve correct ETXTBUSY
- behavior for cross-node execution/opening_for_writing of files */
-#define FMODE_EXEC 16
+/*
+ * Don't update ctime and mtime.
+ *
+ * Currently a special hack for the XFS open_by_handle ioctl, but we'll
+ * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
+ */
+#define FMODE_NOCMTIME ((__force fmode_t)2048)
#define RW_MASK 1
#define RWA_MASK 2
@@ -136,7 +150,7 @@ extern int dir_notify_enable;
/*
* Superblock flags that can be altered by MS_REMOUNT
*/
-#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK)
+#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION)
/*
* Old magic mount flag and mask
@@ -310,6 +324,7 @@ struct poll_table_struct;
struct kstatfs;
struct vm_area_struct;
struct vfsmount;
+struct cred;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -484,13 +499,6 @@ struct address_space_operations {
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
- /*
- * ext3 requires that a successful prepare_write() call be followed
- * by a commit_write() call - they must be balanced
- */
- int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
- int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
-
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
@@ -825,10 +833,10 @@ struct file {
const struct file_operations *f_op;
atomic_long_t f_count;
unsigned int f_flags;
- mode_t f_mode;
+ fmode_t f_mode;
loff_t f_pos;
struct fown_struct f_owner;
- unsigned int f_uid, f_gid;
+ const struct cred *f_cred;
struct file_ra_state f_ra;
u64 f_version;
@@ -1037,7 +1045,6 @@ extern int vfs_setlease(struct file *, long, struct file_lock **);
extern int lease_modify(struct file_lock **, int);
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
-extern struct seq_operations locks_seq_operations;
#else /* !CONFIG_FILE_LOCKING */
#define fcntl_getlk(a, b) ({ -EINVAL; })
#define fcntl_setlk(a, b, c, d) ({ -EACCES; })
@@ -1152,6 +1159,7 @@ struct super_block {
char s_id[32]; /* Informational name */
void *s_fs_info; /* Filesystem private info */
+ fmode_t s_mode;
/*
* The next field is for VFS *only*. No filesystems have any business
@@ -1195,7 +1203,7 @@ enum {
#define has_fs_excl() atomic_read(&current->fs_excl)
#define is_owner_or_cap(inode) \
- ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER))
+ ((current_fsuid() == (inode)->i_uid) || capable(CAP_FOWNER))
/* not quite ready to be deprecated, but... */
extern void lock_super(struct super_block *);
@@ -1266,20 +1274,7 @@ int generic_osync_inode(struct inode *, struct address_space *, int);
* to have different dirent layouts depending on the binary type.
*/
typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
-
-struct block_device_operations {
- int (*open) (struct inode *, struct file *);
- int (*release) (struct inode *, struct file *);
- int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
- long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
- long (*compat_ioctl) (struct file *, unsigned, unsigned long);
- int (*direct_access) (struct block_device *, sector_t,
- void **, unsigned long *);
- int (*media_changed) (struct gendisk *);
- int (*revalidate_disk) (struct gendisk *);
- int (*getgeo)(struct block_device *, struct hd_geometry *);
- struct module *owner;
-};
+struct block_device_operations;
/* These macros are for out of kernel modules to test that
* the kernel supports the unlocked_ioctl and compat_ioctl
@@ -1593,7 +1588,6 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
struct vfsmount *mnt);
extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb);
-void unnamed_dev_init(void);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -1689,7 +1683,8 @@ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
extern long do_sys_open(int dfd, const char __user *filename, int flags,
int mode);
extern struct file *filp_open(const char *, int, int);
-extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
+extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
+ const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
extern char * getname(const char __user *);
@@ -1714,7 +1709,7 @@ extern struct block_device *bdget(dev_t);
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
-extern struct block_device *open_by_devnum(dev_t, unsigned);
+extern struct block_device *open_by_devnum(dev_t, fmode_t);
#else
static inline void bd_forget(struct inode *inode) {}
#endif
@@ -1724,13 +1719,10 @@ extern const struct file_operations bad_sock_fops;
extern const struct file_operations def_fifo_fops;
#ifdef CONFIG_BLOCK
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
-extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
-extern int blkdev_driver_ioctl(struct inode *inode, struct file *file,
- struct gendisk *disk, unsigned cmd,
- unsigned long arg);
+extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *, mode_t, unsigned);
-extern int blkdev_put(struct block_device *);
+extern int blkdev_get(struct block_device *, fmode_t);
+extern int blkdev_put(struct block_device *, fmode_t);
extern int bd_claim(struct block_device *, void *);
extern void bd_release(struct block_device *);
#ifdef CONFIG_SYSFS
@@ -1761,9 +1753,10 @@ extern void chrdev_show(struct seq_file *,off_t);
extern const char *__bdevname(dev_t, char *buffer);
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
-extern struct block_device *open_bdev_excl(const char *, int, void *);
-extern void close_bdev_excl(struct block_device *);
+extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
+extern void close_bdev_exclusive(struct block_device *, fmode_t);
extern void blkdev_show(struct seq_file *,off_t);
+
#else
#define BLKDEV_MAJOR_HASH_SIZE 0
#endif
@@ -1852,6 +1845,11 @@ extern int inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int,
int (*check_acl)(struct inode *, int));
+static inline bool execute_ok(struct inode *inode)
+{
+ return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
+}
+
extern int get_write_access(struct inode *);
extern int deny_write_access(struct file *);
static inline void put_write_access(struct inode * inode)
@@ -1887,7 +1885,9 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+extern struct inode * inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
+extern void inode_add_to_lists(struct super_block *, struct inode *);
extern void iput(struct inode *);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 4e625e0094c..d9051d717d2 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -47,11 +47,7 @@
struct gianfar_platform_data {
/* device specific information */
u32 device_flags;
- /* board specific information */
- u32 board_flags;
- char bus_id[MII_BUS_ID_SIZE];
- u32 phy_id;
- u8 mac_addr[6];
+ char bus_id[BUS_ID_SIZE];
phy_interface_t interface;
};
@@ -60,17 +56,6 @@ struct gianfar_mdio_data {
int irq[32];
};
-/* Flags related to gianfar device features */
-#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
-#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
-#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
-#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
-#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
-#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
-#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
-#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
-#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
-
/* Flags in gianfar_platform_data */
#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */
#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index a89513188ce..00fbd5b245c 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -188,7 +188,7 @@ static inline void fsnotify_close(struct file *file)
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
const char *name = dentry->d_name.name;
- mode_t mode = file->f_mode;
+ fmode_t mode = file->f_mode;
u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index bb384068272..677432b9cb7 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,10 +1,17 @@
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H
-#ifdef CONFIG_FTRACE
-
#include <linux/linkage.h>
#include <linux/fs.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/bitops.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_FUNCTION_TRACER
extern int ftrace_enabled;
extern int
@@ -19,6 +26,45 @@ struct ftrace_ops {
struct ftrace_ops *next;
};
+extern int function_trace_stop;
+
+/*
+ * Type of the current tracing.
+ */
+enum ftrace_tracing_type_t {
+ FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
+ FTRACE_TYPE_RETURN, /* Hook the return of the function */
+};
+
+/* Current tracing type, default is FTRACE_TYPE_ENTER */
+extern enum ftrace_tracing_type_t ftrace_tracing_type;
+
+/**
+ * ftrace_stop - stop function tracer.
+ *
+ * A quick way to stop the function tracer. Note this an on off switch,
+ * it is not something that is recursive like preempt_disable.
+ * This does not disable the calling of mcount, it only stops the
+ * calling of functions from mcount.
+ */
+static inline void ftrace_stop(void)
+{
+ function_trace_stop = 1;
+}
+
+/**
+ * ftrace_start - start the function tracer.
+ *
+ * This function is the inverse of ftrace_stop. This does not enable
+ * the function tracing if the function tracer is disabled. This only
+ * sets the function tracer flag to continue calling the functions
+ * from mcount.
+ */
+static inline void ftrace_start(void)
+{
+ function_trace_stop = 0;
+}
+
/*
* The ftrace_ops must be a static and should also
* be read_mostly. These functions do modify read_mostly variables
@@ -32,15 +78,26 @@ void clear_ftrace_function(void);
extern void ftrace_stub(unsigned long a0, unsigned long a1);
-#else /* !CONFIG_FTRACE */
+#else /* !CONFIG_FUNCTION_TRACER */
# define register_ftrace_function(ops) do { } while (0)
# define unregister_ftrace_function(ops) do { } while (0)
# define clear_ftrace_function(ops) do { } while (0)
-#endif /* CONFIG_FTRACE */
+static inline void ftrace_kill(void) { }
+static inline void ftrace_stop(void) { }
+static inline void ftrace_start(void) { }
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_STACK_TRACER
+extern int stack_tracer_enabled;
+int
+stack_trace_sysctl(struct ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-# define FTRACE_HASHBITS 10
-# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS)
+/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
+#include <asm/ftrace.h>
enum {
FTRACE_FL_FREE = (1 << 0),
@@ -53,9 +110,10 @@ enum {
};
struct dyn_ftrace {
- struct hlist_node node;
- unsigned long ip; /* address of mcount call-site */
- unsigned long flags;
+ struct list_head list;
+ unsigned long ip; /* address of mcount call-site */
+ unsigned long flags;
+ struct dyn_arch_ftrace arch;
};
int ftrace_force_update(void);
@@ -63,47 +121,103 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset);
/* defined in arch */
extern int ftrace_ip_converted(unsigned long ip);
-extern unsigned char *ftrace_nop_replace(void);
-extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
extern int ftrace_dyn_arch_init(void *data);
-extern int ftrace_mcount_set(unsigned long *data);
-extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void);
extern void ftrace_call(void);
extern void mcount_call(void);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern void ftrace_graph_caller(void);
+extern int ftrace_enable_ftrace_graph_caller(void);
+extern int ftrace_disable_ftrace_graph_caller(void);
+#else
+static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
+static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
+#endif
+
+/**
+ * ftrace_make_nop - convert code into top
+ * @mod: module structure if called by module load initialization
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should be calling
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a caller to @addr
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr);
+
+/**
+ * ftrace_make_call - convert a nop call site into a call to addr
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should call
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a nop
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+
+
+/* May be defined in arch */
+extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
-void ftrace_disable_daemon(void);
-void ftrace_enable_daemon(void);
+extern void ftrace_release(void *start, unsigned long size);
+extern void ftrace_disable_daemon(void);
+extern void ftrace_enable_daemon(void);
#else
# define skip_trace(ip) ({ 0; })
# define ftrace_force_update() ({ 0; })
# define ftrace_set_filter(buf, len, reset) do { } while (0)
# define ftrace_disable_daemon() do { } while (0)
# define ftrace_enable_daemon() do { } while (0)
+static inline void ftrace_release(void *start, unsigned long size) { }
#endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
-void ftrace_kill_atomic(void);
static inline void tracer_disable(void)
{
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
ftrace_enabled = 0;
#endif
}
-/* Ftrace disable/restore without lock. Some synchronization mechanism
+/*
+ * Ftrace disable/restore without lock. Some synchronization mechanism
* must be used to prevent ftrace_enabled to be changed between
- * disable/restore. */
+ * disable/restore.
+ */
static inline int __ftrace_enabled_save(void)
{
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
int saved_ftrace_enabled = ftrace_enabled;
ftrace_enabled = 0;
return saved_ftrace_enabled;
@@ -114,7 +228,7 @@ static inline int __ftrace_enabled_save(void)
static inline void __ftrace_enabled_restore(int enabled)
{
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
ftrace_enabled = enabled;
#endif
}
@@ -155,11 +269,227 @@ static inline void __ftrace_enabled_restore(int enabled)
#endif
#ifdef CONFIG_TRACING
+extern int ftrace_dump_on_oops;
+
+extern void tracing_start(void);
+extern void tracing_stop(void);
+extern void ftrace_off_permanent(void);
+
extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+
+/**
+ * ftrace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __ftrace_printk is an internal function for ftrace_printk and
+ * the @ip is passed in via the ftrace_printk macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving ftrace_printks scattered around in
+ * your code.
+ */
+# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
+extern int
+__ftrace_printk(unsigned long ip, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern void ftrace_dump(void);
#else
static inline void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+static inline int
+ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
+
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void ftrace_off_permanent(void) { }
+static inline int
+ftrace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+static inline void ftrace_dump(void) { }
+#endif
+
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+extern void ftrace_init(void);
+extern void ftrace_init_module(struct module *mod,
+ unsigned long *start, unsigned long *end);
+#else
+static inline void ftrace_init(void) { }
+static inline void
+ftrace_init_module(struct module *mod,
+ unsigned long *start, unsigned long *end) { }
+#endif
+
+enum {
+ POWER_NONE = 0,
+ POWER_CSTATE = 1,
+ POWER_PSTATE = 2,
+};
+
+struct power_trace {
+#ifdef CONFIG_POWER_TRACER
+ ktime_t stamp;
+ ktime_t end;
+ int type;
+ int state;
+#endif
+};
+
+#ifdef CONFIG_POWER_TRACER
+extern void trace_power_start(struct power_trace *it, unsigned int type,
+ unsigned int state);
+extern void trace_power_mark(struct power_trace *it, unsigned int type,
+ unsigned int state);
+extern void trace_power_end(struct power_trace *it);
+#else
+static inline void trace_power_start(struct power_trace *it, unsigned int type,
+ unsigned int state) { }
+static inline void trace_power_mark(struct power_trace *it, unsigned int type,
+ unsigned int state) { }
+static inline void trace_power_end(struct power_trace *it) { }
+#endif
+
+
+/*
+ * Structure that defines an entry function trace.
+ */
+struct ftrace_graph_ent {
+ unsigned long func; /* Current function */
+ int depth;
+};
+
+/*
+ * Structure that defines a return function trace.
+ */
+struct ftrace_graph_ret {
+ unsigned long func; /* Current function */
+ unsigned long long calltime;
+ unsigned long long rettime;
+ /* Number of functions that overran the depth limit for current task */
+ unsigned long overrun;
+ int depth;
+};
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/*
+ * Sometimes we don't want to trace a function with the function
+ * graph tracer but we want them to keep traced by the usual function
+ * tracer if the function graph tracer is not configured.
+ */
+#define __notrace_funcgraph notrace
+
+/*
+ * We want to which function is an entrypoint of a hardirq.
+ * That will help us to put a signal on output.
+ */
+#define __irq_entry __attribute__((__section__(".irqentry.text")))
+
+/* Limits of hardirq entrypoints */
+extern char __irqentry_text_start[];
+extern char __irqentry_text_end[];
+
+#define FTRACE_RETFUNC_DEPTH 50
+#define FTRACE_RETSTACK_ALLOC_SIZE 32
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+
+extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc);
+
+extern void ftrace_graph_stop(void);
+
+/* The current handlers in use */
+extern trace_func_graph_ret_t ftrace_graph_return;
+extern trace_func_graph_ent_t ftrace_graph_entry;
+
+extern void unregister_ftrace_graph(void);
+
+extern void ftrace_graph_init_task(struct task_struct *t);
+extern void ftrace_graph_exit_task(struct task_struct *t);
+
+static inline int task_curr_ret_stack(struct task_struct *t)
+{
+ return t->curr_ret_stack;
+}
+
+static inline void pause_graph_tracing(void)
+{
+ atomic_inc(&current->tracing_graph_pause);
+}
+
+static inline void unpause_graph_tracing(void)
+{
+ atomic_dec(&current->tracing_graph_pause);
+}
+#else
+
+#define __notrace_funcgraph
+#define __irq_entry
+
+static inline void ftrace_graph_init_task(struct task_struct *t) { }
+static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+
+static inline int task_curr_ret_stack(struct task_struct *tsk)
+{
+ return -1;
+}
+
+static inline void pause_graph_tracing(void) { }
+static inline void unpause_graph_tracing(void) { }
#endif
+#ifdef CONFIG_TRACING
+#include <linux/sched.h>
+
+/* flags for current->trace */
+enum {
+ TSK_TRACE_FL_TRACE_BIT = 0,
+ TSK_TRACE_FL_GRAPH_BIT = 1,
+};
+enum {
+ TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
+ TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
+};
+
+static inline void set_tsk_trace_trace(struct task_struct *tsk)
+{
+ set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
+}
+
+static inline void clear_tsk_trace_trace(struct task_struct *tsk)
+{
+ clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
+}
+
+static inline int test_tsk_trace_trace(struct task_struct *tsk)
+{
+ return tsk->trace & TSK_TRACE_FL_TRACE;
+}
+
+static inline void set_tsk_trace_graph(struct task_struct *tsk)
+{
+ set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
+}
+
+static inline void clear_tsk_trace_graph(struct task_struct *tsk)
+{
+ clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
+}
+
+static inline int test_tsk_trace_graph(struct task_struct *tsk)
+{
+ return tsk->trace & TSK_TRACE_FL_GRAPH;
+}
+
+#endif /* CONFIG_TRACING */
+
#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
new file mode 100644
index 00000000000..366a054d0b0
--- /dev/null
+++ b/include/linux/ftrace_irq.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_FTRACE_IRQ_H
+#define _LINUX_FTRACE_IRQ_H
+
+
+#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
+extern void ftrace_nmi_enter(void);
+extern void ftrace_nmi_exit(void);
+#else
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
+#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 265635dc990..350fe9767bb 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -17,8 +17,14 @@
* - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in
* - add blksize field to fuse_attr
* - add file flags field to fuse_read_in and fuse_write_in
+ *
+ * 7.10
+ * - add nonseekable open flag
*/
+#ifndef _LINUX_FUSE_H
+#define _LINUX_FUSE_H
+
#include <asm/types.h>
#include <linux/major.h>
@@ -26,7 +32,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 9
+#define FUSE_KERNEL_MINOR_VERSION 10
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -98,9 +104,11 @@ struct fuse_file_lock {
*
* FOPEN_DIRECT_IO: bypass page cache for this open file
* FOPEN_KEEP_CACHE: don't invalidate the data cache on open
+ * FOPEN_NONSEEKABLE: the file is not seekable
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
+#define FOPEN_NONSEEKABLE (1 << 2)
/**
* INIT request/reply flags
@@ -409,3 +417,5 @@ struct fuse_dirent {
#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1))
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
+
+#endif /* _LINUX_FUSE_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 586ab56a3ec..3bf5bb5a34f 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -25,7 +25,8 @@ union ktime;
#define FUTEX_WAKE_BITSET 10
#define FUTEX_PRIVATE_FLAG 128
-#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
+#define FUTEX_CLOCK_REALTIME 256
+#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
@@ -164,6 +165,8 @@ union futex_key {
} both;
};
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr);
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index f64e29c0ef3..0cd825f7363 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -146,10 +146,11 @@ static inline void gameport_unpin_driver(struct gameport *gameport)
mutex_unlock(&gameport->drv_mutex);
}
-void __gameport_register_driver(struct gameport_driver *drv, struct module *owner);
-static inline void gameport_register_driver(struct gameport_driver *drv)
+int __gameport_register_driver(struct gameport_driver *drv,
+ struct module *owner, const char *mod_name);
+static inline int __must_check gameport_register_driver(struct gameport_driver *drv)
{
- __gameport_register_driver(drv, THIS_MODULE);
+ return __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME);
}
void gameport_unregister_driver(struct gameport_driver *drv);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 206cdf96c3a..16948eaecae 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -25,9 +25,6 @@ extern struct device_type part_type;
extern struct kobject *block_depr;
extern struct class block_class;
-extern const struct seq_operations partitions_op;
-extern const struct seq_operations diskstats_op;
-
enum {
/* These three have identical behaviour; use the second one if DOS FDISK gets
confused about extended/logical partitions starting past cylinder 1023. */
@@ -129,6 +126,7 @@ struct blk_scsi_cmd_filter {
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
+ struct hd_struct *last_lookup;
struct hd_struct *part[];
};
@@ -525,7 +523,9 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
extern int disk_expand_part_tbl(struct gendisk *disk, int target);
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
-extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int);
+extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
+ int partno, sector_t start,
+ sector_t len, int flags);
extern void delete_partition(struct gendisk *, int);
extern void printk_all_partitions(void);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 730a20b8357..e10c49a5b96 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -8,6 +8,7 @@
#else
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -32,6 +33,8 @@ static inline int gpio_request(unsigned gpio, const char *label)
static inline void gpio_free(unsigned gpio)
{
+ might_sleep();
+
/* GPIO can never have been requested */
WARN_ON(1);
}
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 181006cc94a..f83288347dd 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <linux/smp_lock.h>
#include <linux/lockdep.h>
+#include <linux/ftrace_irq.h>
#include <asm/hardirq.h>
#include <asm/system.h>
@@ -118,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk)
}
#endif
-#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ)
+#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
extern void rcu_irq_enter(void);
extern void rcu_irq_exit(void);
+extern void rcu_nmi_enter(void);
+extern void rcu_nmi_exit(void);
#else
# define rcu_irq_enter() do { } while (0)
# define rcu_irq_exit() do { } while (0)
-#endif /* CONFIG_PREEMPT_RCU */
+# define rcu_nmi_enter() do { } while (0)
+# define rcu_nmi_exit() do { } while (0)
+#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
/*
* It is safe to do non-atomic ops on ->hardirq_context,
@@ -134,7 +139,6 @@ extern void rcu_irq_exit(void);
*/
#define __irq_enter() \
do { \
- rcu_irq_enter(); \
account_system_vtime(current); \
add_preempt_count(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
@@ -153,7 +157,6 @@ extern void irq_enter(void);
trace_hardirq_exit(); \
account_system_vtime(current); \
sub_preempt_count(HARDIRQ_OFFSET); \
- rcu_irq_exit(); \
} while (0)
/*
@@ -161,7 +164,20 @@ extern void irq_enter(void);
*/
extern void irq_exit(void);
-#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0)
-#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0)
+#define nmi_enter() \
+ do { \
+ ftrace_nmi_enter(); \
+ lockdep_off(); \
+ rcu_nmi_enter(); \
+ __irq_enter(); \
+ } while (0)
+
+#define nmi_exit() \
+ do { \
+ __irq_exit(); \
+ rcu_nmi_exit(); \
+ lockdep_on(); \
+ ftrace_nmi_exit(); \
+ } while (0)
#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index c59769693be..fd47a151665 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -43,7 +43,7 @@ struct hdlc_proto {
};
-/* Pointed to by dev->priv */
+/* Pointed to by netdev_priv(dev) */
typedef struct hdlc_device {
/* used by HDLC layer to take control over HDLC device from hw driver*/
int (*attach)(struct net_device *dev,
@@ -80,7 +80,7 @@ struct net_device *alloc_hdlcdev(void *priv);
static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev)
{
- return dev->priv;
+ return netdev_priv(dev);
}
static __inline__ void debug_frame(const struct sk_buff *skb)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f13bca2dd53..e5780f8c934 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -410,6 +410,7 @@ struct hid_output_fifo {
#define HID_SUSPENDED 5
#define HID_CLEAR_HALT 6
#define HID_DISCONNECTED 7
+#define HID_STARTED 8
struct hid_input {
struct list_head list;
@@ -417,6 +418,11 @@ struct hid_input {
struct input_dev *input;
};
+enum hid_type {
+ HID_TYPE_OTHER = 0,
+ HID_TYPE_USBMOUSE
+};
+
struct hid_driver;
struct hid_ll_driver;
@@ -431,6 +437,7 @@ struct hid_device { /* device report descriptor */
__u32 vendor; /* Vendor ID */
__u32 product; /* Product ID */
__u32 version; /* HID version */
+ enum hid_type type; /* device type (mouse, kbd, ...) */
unsigned country; /* HID country */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7dcbc82f3b7..13875ce9112 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -63,12 +63,14 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
#endif /* CONFIG_HIGHMEM */
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_atomic(page, KM_USER0);
clear_user_page(addr, vaddr, page);
kunmap_atomic(addr, KM_USER0);
}
+#endif
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/**
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index bab303dafd6..f148e490841 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -32,7 +32,9 @@ struct hippi_cb {
};
extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-
+extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
+extern int hippi_mac_addr(struct net_device *dev, void *p);
+extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
extern struct net_device *alloc_hippi_dev(int sizeof_priv);
#endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2f245fe63bd..bd37078c2d7 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -20,6 +20,8 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/wait.h>
+#include <linux/percpu.h>
+
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
@@ -41,31 +43,6 @@ enum hrtimer_restart {
};
/*
- * hrtimer callback modes:
- *
- * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
- * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
- * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
- * does not restart the timer
- * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
- * Special mode for tick emulation and
- * scheduler timer. Such timers are per
- * cpu and not allowed to be migrated on
- * cpu unplug.
- * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
- * with timer->base lock unlocked
- * used for timers which call wakeup to
- * avoid lock order problems with rq->lock
- */
-enum hrtimer_cb_mode {
- HRTIMER_CB_SOFTIRQ,
- HRTIMER_CB_IRQSAFE,
- HRTIMER_CB_IRQSAFE_NO_RESTART,
- HRTIMER_CB_IRQSAFE_PERCPU,
- HRTIMER_CB_IRQSAFE_UNLOCKED,
-};
-
-/*
* Values to track state of the timer
*
* Possible states:
@@ -73,7 +50,6 @@ enum hrtimer_cb_mode {
* 0x00 inactive
* 0x01 enqueued into rbtree
* 0x02 callback function running
- * 0x04 callback pending (high resolution mode)
*
* Special cases:
* 0x03 callback function running and enqueued
@@ -95,20 +71,22 @@ enum hrtimer_cb_mode {
#define HRTIMER_STATE_INACTIVE 0x00
#define HRTIMER_STATE_ENQUEUED 0x01
#define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_PENDING 0x04
-#define HRTIMER_STATE_MIGRATE 0x08
+#define HRTIMER_STATE_MIGRATE 0x04
/**
* struct hrtimer - the basic hrtimer structure
* @node: red black tree node for time ordered insertion
- * @expires: the absolute expiry time in the hrtimers internal
+ * @_expires: the absolute expiry time in the hrtimers internal
* representation. The time is related to the clock on
- * which the timer is based.
+ * which the timer is based. Is setup by adding
+ * slack to the _softexpires value. For non range timers
+ * identical to _softexpires.
+ * @_softexpires: the absolute earliest expiry time of the hrtimer.
+ * The time which was given as expiry time when the timer
+ * was armed.
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
- * @cb_mode: high resolution timer feature to select the callback execution
- * mode
* @cb_entry: list head to enqueue an expired timer into the callback list
* @start_site: timer statistics field to store the site where the timer
* was started
@@ -121,16 +99,16 @@ enum hrtimer_cb_mode {
*/
struct hrtimer {
struct rb_node node;
- ktime_t expires;
+ ktime_t _expires;
+ ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
- enum hrtimer_cb_mode cb_mode;
struct list_head cb_entry;
#ifdef CONFIG_TIMER_STATS
+ int start_pid;
void *start_site;
char start_comm[16];
- int start_pid;
#endif
};
@@ -155,10 +133,8 @@ struct hrtimer_sleeper {
* @first: pointer to the timer node which expires first
* @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
- * @get_softirq_time: function to retrieve the current time from the softirq
* @softirq_time: the time when running the hrtimer queue in the softirq
* @offset: offset of this clock to the monotonic base
- * @reprogram: function to reprogram the timer event
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
@@ -167,13 +143,9 @@ struct hrtimer_clock_base {
struct rb_node *first;
ktime_t resolution;
ktime_t (*get_time)(void);
- ktime_t (*get_softirq_time)(void);
ktime_t softirq_time;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t offset;
- int (*reprogram)(struct hrtimer *t,
- struct hrtimer_clock_base *b,
- ktime_t n);
#endif
};
@@ -191,15 +163,11 @@ struct hrtimer_clock_base {
* @check_clocks: Indictator, when set evaluate time source and clock
* event devices whether high resolution mode can be
* activated.
- * @cb_pending: Expired timers are moved from the rbtree to this
- * list in the timer interrupt. The list is processed
- * in the softirq.
* @nr_events: Total number of timer interrupt events
*/
struct hrtimer_cpu_base {
spinlock_t lock;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
- struct list_head cb_pending;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
@@ -207,6 +175,71 @@ struct hrtimer_cpu_base {
#endif
};
+static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
+{
+ timer->_expires = time;
+ timer->_softexpires = time;
+}
+
+static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
+{
+ timer->_softexpires = time;
+ timer->_expires = ktime_add_safe(time, delta);
+}
+
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+{
+ timer->_softexpires = time;
+ timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
+}
+
+static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
+{
+ timer->_expires.tv64 = tv64;
+ timer->_softexpires.tv64 = tv64;
+}
+
+static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
+{
+ timer->_expires = ktime_add_safe(timer->_expires, time);
+ timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
+}
+
+static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
+{
+ timer->_expires = ktime_add_ns(timer->_expires, ns);
+ timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
+}
+
+static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
+{
+ return timer->_expires;
+}
+
+static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
+{
+ return timer->_softexpires;
+}
+
+static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
+{
+ return timer->_expires.tv64;
+}
+static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
+{
+ return timer->_softexpires.tv64;
+}
+
+static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
+{
+ return ktime_to_ns(timer->_expires);
+}
+
+static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
+{
+ return ktime_sub(timer->_expires, timer->base->get_time());
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device;
@@ -227,6 +260,8 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
return timer->base->cpu_base->hres_active;
}
+extern void hrtimer_peek_ahead_timers(void);
+
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -249,6 +284,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
* is expired in the next softirq when the clock was advanced.
*/
static inline void clock_was_set(void) { }
+static inline void hrtimer_peek_ahead_timers(void) { }
static inline void hres_timers_resume(void) { }
@@ -270,6 +306,10 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
extern ktime_t ktime_get(void);
extern ktime_t ktime_get_real(void);
+
+DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+
+
/* Exported timer functions: */
/* Initialize timers: */
@@ -294,12 +334,25 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
/* Basic timer operations: */
extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode);
+extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long range_ns, const enum hrtimer_mode mode);
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
+static inline int hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
+{
+ unsigned long delta;
+ ktime_t soft, hard;
+ soft = hrtimer_get_softexpires(timer);
+ hard = hrtimer_get_expires(timer);
+ delta = ktime_to_ns(ktime_sub(hard, soft));
+ return hrtimer_start_range_ns(timer, soft, delta, mode);
+}
+
static inline int hrtimer_restart(struct hrtimer *timer)
{
- return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+ return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/* Query timers: */
@@ -322,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
*/
static inline int hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state &
- (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
+ return timer->state & HRTIMER_STATE_ENQUEUED;
}
/*
@@ -356,6 +408,10 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
struct task_struct *tsk);
+extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+ const enum hrtimer_mode mode);
+extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
+
/* Soft interrupt function to run the hrtimer queues: */
extern void hrtimer_run_queues(void);
extern void hrtimer_run_pending(void);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 32e0ef0f6e1..e1c8afc002c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -27,7 +27,7 @@ void unmap_hugepage_range(struct vm_area_struct *,
void __unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *);
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
-int hugetlb_report_meminfo(char *);
+void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(int, char *);
unsigned long hugetlb_total_pages(void);
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -79,7 +79,9 @@ static inline unsigned long hugetlb_total_pages(void)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
#define unmap_hugepage_range(vma, start, end, page) BUG()
-#define hugetlb_report_meminfo(buf) 0
+static inline void hugetlb_report_meminfo(struct seq_file *m)
+{
+}
#define hugetlb_report_node_meminfo(n, buf) 0
#define follow_huge_pmd(mm, addr, pmd, write) NULL
#define follow_huge_pud(mm, addr, pud, write) NULL
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 0177d280f73..0f91a957a69 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -31,7 +31,10 @@ struct i2c_algo_pcf_data {
int (*getpcf) (void *data, int ctl);
int (*getown) (void *data);
int (*getclock) (void *data);
- void (*waitforpin) (void);
+ void (*waitforpin) (void *data);
+
+ void (*xfer_begin) (void *data);
+ void (*xfer_end) (void *data);
/* Multi-master lost arbitration back-off delay (msecs)
* This should be set by the bus adapter or knowledgable client
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 493435bcdbe..01d67ba9e98 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -60,7 +60,7 @@
#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */
#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
-#define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */
+#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */
#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */
#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 06115128047..33a5992d493 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -53,45 +53,44 @@ struct i2c_board_info;
* transmit one message at a time, a more complex version can be used to
* transmit an arbitrary number of messages without interruption.
*/
-extern int i2c_master_send(struct i2c_client *,const char* ,int);
-extern int i2c_master_recv(struct i2c_client *,char* ,int);
+extern int i2c_master_send(struct i2c_client *client, const char *buf,
+ int count);
+extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
/* Transfer num messages.
*/
-extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
-
+extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
/* This is the very generalized SMBus access routine. You probably do not
want to use this, though; one of the functions below may be much easier,
and probably just as fast.
Note that we use i2c_adapter here, because you do not need a specific
smbus adapter to call this function. */
-extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr,
- unsigned short flags,
- char read_write, u8 command, int size,
- union i2c_smbus_data * data);
+extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data);
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
-extern s32 i2c_smbus_read_byte(struct i2c_client * client);
-extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value);
-extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command);
-extern s32 i2c_smbus_write_byte_data(struct i2c_client * client,
- u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command);
-extern s32 i2c_smbus_write_word_data(struct i2c_client * client,
- u8 command, u16 value);
+extern s32 i2c_smbus_read_byte(struct i2c_client *client);
+extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
+extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
+extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
+ u8 command, u8 value);
+extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
+extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
+ u8 command, u16 value);
/* Returns the number of read bytes */
extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(struct i2c_client * client,
- u8 command, u8 length,
- const u8 *values);
+extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client,
+extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client,
+extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
u8 command, u8 length,
const u8 *values);
@@ -169,7 +168,7 @@ struct i2c_driver {
/* a ioctl like command that can be used to perform specific functions
* with the device.
*/
- int (*command)(struct i2c_client *client,unsigned int cmd, void *arg);
+ int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
struct device_driver driver;
const struct i2c_device_id *id_table;
@@ -224,14 +223,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
return to_i2c_client(dev);
}
-static inline void *i2c_get_clientdata (struct i2c_client *dev)
+static inline void *i2c_get_clientdata(const struct i2c_client *dev)
{
- return dev_get_drvdata (&dev->dev);
+ return dev_get_drvdata(&dev->dev);
}
-static inline void i2c_set_clientdata (struct i2c_client *dev, void *data)
+static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
{
- dev_set_drvdata (&dev->dev, data);
+ dev_set_drvdata(&dev->dev, data);
}
/**
@@ -240,6 +239,7 @@ static inline void i2c_set_clientdata (struct i2c_client *dev, void *data)
* @flags: to initialize i2c_client.flags
* @addr: stored in i2c_client.addr
* @platform_data: stored in i2c_client.dev.platform_data
+ * @archdata: copied into i2c_client.dev.archdata
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@@ -259,6 +259,7 @@ struct i2c_board_info {
unsigned short flags;
unsigned short addr;
void *platform_data;
+ struct dev_archdata *archdata;
int irq;
};
@@ -272,7 +273,7 @@ struct i2c_board_info {
* fields (such as associated irq, or device-specific platform_data)
* are provided using conventional syntax.
*/
-#define I2C_BOARD_INFO(dev_type,dev_addr) \
+#define I2C_BOARD_INFO(dev_type, dev_addr) \
.type = (dev_type), .addr = (dev_addr)
@@ -306,10 +307,12 @@ extern void i2c_unregister_device(struct i2c_client *);
*/
#ifdef CONFIG_I2C_BOARDINFO
extern int
-i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n);
+i2c_register_board_info(int busnum, struct i2c_board_info const *info,
+ unsigned n);
#else
static inline int
-i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n)
+i2c_register_board_info(int busnum, struct i2c_board_info const *info,
+ unsigned n)
{
return 0;
}
@@ -328,11 +331,11 @@ struct i2c_algorithm {
using common I2C messages */
/* master_xfer should return the number of messages successfully
processed, or a negative value on error */
- int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs,
- int num);
+ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data * data);
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
/* To determine what the adapter supports */
u32 (*functionality) (struct i2c_adapter *);
@@ -345,7 +348,7 @@ struct i2c_algorithm {
struct i2c_adapter {
struct module *owner;
unsigned int id;
- unsigned int class;
+ unsigned int class; /* classes to allow probing for */
const struct i2c_algorithm *algo; /* the algorithm to access the bus */
void *algo_data;
@@ -369,14 +372,14 @@ struct i2c_adapter {
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
-static inline void *i2c_get_adapdata (struct i2c_adapter *dev)
+static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
{
- return dev_get_drvdata (&dev->dev);
+ return dev_get_drvdata(&dev->dev);
}
-static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data)
+static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
{
- dev_set_drvdata (&dev->dev, data);
+ dev_set_drvdata(&dev->dev, data);
}
/*flags for the client struct: */
@@ -449,7 +452,7 @@ extern int i2c_probe(struct i2c_adapter *adapter,
const struct i2c_client_address_data *address_data,
int (*found_proc) (struct i2c_adapter *, int, int));
-extern struct i2c_adapter* i2c_get_adapter(int id);
+extern struct i2c_adapter *i2c_get_adapter(int id);
extern void i2c_put_adapter(struct i2c_adapter *adap);
@@ -465,7 +468,7 @@ static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func)
return (func & i2c_get_functionality(adap)) == func;
}
-/* Return id number for a specific adapter */
+/* Return the adapter number for a specific adapter */
static inline int i2c_adapter_id(struct i2c_adapter *adap)
{
return adap->nr;
@@ -526,7 +529,7 @@ struct i2c_msg {
#define I2C_FUNC_I2C 0x00000001
#define I2C_FUNC_10BIT_ADDR 0x00000002
-#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART,..} */
+#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */
#define I2C_FUNC_SMBUS_PEC 0x00000008
#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
#define I2C_FUNC_SMBUS_QUICK 0x00010000
@@ -541,30 +544,26 @@ struct i2c_msg {
#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
-#define I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 0x10000000 /* I2C-like block xfer */
-#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2 0x20000000 /* w/ 2-byte reg. addr. */
-
-#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
- I2C_FUNC_SMBUS_WRITE_BYTE)
-#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \
- I2C_FUNC_SMBUS_WRITE_BYTE_DATA)
-#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \
- I2C_FUNC_SMBUS_WRITE_WORD_DATA)
-#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
- I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)
-#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)
-#define I2C_FUNC_SMBUS_I2C_BLOCK_2 (I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 | \
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2)
-
-#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \
- I2C_FUNC_SMBUS_BYTE | \
- I2C_FUNC_SMBUS_BYTE_DATA | \
- I2C_FUNC_SMBUS_WORD_DATA | \
- I2C_FUNC_SMBUS_PROC_CALL | \
- I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
- I2C_FUNC_SMBUS_I2C_BLOCK | \
- I2C_FUNC_SMBUS_PEC)
+
+#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
+ I2C_FUNC_SMBUS_WRITE_BYTE)
+#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA)
+#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \
+ I2C_FUNC_SMBUS_WRITE_WORD_DATA)
+#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)
+#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \
+ I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)
+
+#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \
+ I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_PROC_CALL | \
+ I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK | \
+ I2C_FUNC_SMBUS_PEC)
/*
* Data for SMBus Messages
@@ -574,7 +573,7 @@ union i2c_smbus_data {
__u8 byte;
__u16 word;
__u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */
- /* and one more for user-space compatibility */
+ /* and one more for user-space compatibility */
};
/* i2c_smbus_xfer read or write markers */
@@ -602,21 +601,21 @@ union i2c_smbus_data {
/* Default fill of many variables */
#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
- I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
+ I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
module header */
@@ -625,7 +624,7 @@ union i2c_smbus_data {
static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
static unsigned int var##_num; \
module_param_array(var, short, &var##_num, 0); \
- MODULE_PARM_DESC(var,desc)
+ MODULE_PARM_DESC(var, desc)
#define I2C_CLIENT_MODULE_PARM_FORCE(name) \
I2C_CLIENT_MODULE_PARM(force_##name, \
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
new file mode 100644
index 00000000000..fb604dcd38f
--- /dev/null
+++ b/include/linux/i2c/twl4030.h
@@ -0,0 +1,343 @@
+/*
+ * twl4030.h - header for TWL4030 PM and audio CODEC device
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __TWL4030_H_
+#define __TWL4030_H_
+
+/*
+ * Using the twl4030 core we address registers using a pair
+ * { module id, relative register offset }
+ * which that core then maps to the relevant
+ * { i2c slave, absolute register address }
+ *
+ * The module IDs are meaningful only to the twl4030 core code,
+ * which uses them as array indices to look up the first register
+ * address each module uses within a given i2c slave.
+ */
+
+/* Slave 0 (i2c address 0x48) */
+#define TWL4030_MODULE_USB 0x00
+
+/* Slave 1 (i2c address 0x49) */
+#define TWL4030_MODULE_AUDIO_VOICE 0x01
+#define TWL4030_MODULE_GPIO 0x02
+#define TWL4030_MODULE_INTBR 0x03
+#define TWL4030_MODULE_PIH 0x04
+#define TWL4030_MODULE_TEST 0x05
+
+/* Slave 2 (i2c address 0x4a) */
+#define TWL4030_MODULE_KEYPAD 0x06
+#define TWL4030_MODULE_MADC 0x07
+#define TWL4030_MODULE_INTERRUPTS 0x08
+#define TWL4030_MODULE_LED 0x09
+#define TWL4030_MODULE_MAIN_CHARGE 0x0A
+#define TWL4030_MODULE_PRECHARGE 0x0B
+#define TWL4030_MODULE_PWM0 0x0C
+#define TWL4030_MODULE_PWM1 0x0D
+#define TWL4030_MODULE_PWMA 0x0E
+#define TWL4030_MODULE_PWMB 0x0F
+
+/* Slave 3 (i2c address 0x4b) */
+#define TWL4030_MODULE_BACKUP 0x10
+#define TWL4030_MODULE_INT 0x11
+#define TWL4030_MODULE_PM_MASTER 0x12
+#define TWL4030_MODULE_PM_RECEIVER 0x13
+#define TWL4030_MODULE_RTC 0x14
+#define TWL4030_MODULE_SECURED_REG 0x15
+
+/*
+ * Read and write single 8-bit registers
+ */
+int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
+int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
+
+/*
+ * Read and write several 8-bit registers at once.
+ *
+ * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1
+ * for the value, and populate your data starting at offset 1.
+ */
+int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, u8 num_bytes);
+int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, u8 num_bytes);
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * NOTE: at up to 1024 registers, this is a big chip.
+ *
+ * Avoid putting register declarations in this file, instead of into
+ * a driver-private file, unless some of the registers in a block
+ * need to be shared with other drivers. One example is blocks that
+ * have Secondary IRQ Handler (SIH) registers.
+ */
+
+#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0)
+#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1)
+#define TWL4030_SIH_CTRL_COR_MASK BIT(2)
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * GPIO Block Register offsets (use TWL4030_MODULE_GPIO)
+ */
+
+#define REG_GPIODATAIN1 0x0
+#define REG_GPIODATAIN2 0x1
+#define REG_GPIODATAIN3 0x2
+#define REG_GPIODATADIR1 0x3
+#define REG_GPIODATADIR2 0x4
+#define REG_GPIODATADIR3 0x5
+#define REG_GPIODATAOUT1 0x6
+#define REG_GPIODATAOUT2 0x7
+#define REG_GPIODATAOUT3 0x8
+#define REG_CLEARGPIODATAOUT1 0x9
+#define REG_CLEARGPIODATAOUT2 0xA
+#define REG_CLEARGPIODATAOUT3 0xB
+#define REG_SETGPIODATAOUT1 0xC
+#define REG_SETGPIODATAOUT2 0xD
+#define REG_SETGPIODATAOUT3 0xE
+#define REG_GPIO_DEBEN1 0xF
+#define REG_GPIO_DEBEN2 0x10
+#define REG_GPIO_DEBEN3 0x11
+#define REG_GPIO_CTRL 0x12
+#define REG_GPIOPUPDCTR1 0x13
+#define REG_GPIOPUPDCTR2 0x14
+#define REG_GPIOPUPDCTR3 0x15
+#define REG_GPIOPUPDCTR4 0x16
+#define REG_GPIOPUPDCTR5 0x17
+#define REG_GPIO_ISR1A 0x19
+#define REG_GPIO_ISR2A 0x1A
+#define REG_GPIO_ISR3A 0x1B
+#define REG_GPIO_IMR1A 0x1C
+#define REG_GPIO_IMR2A 0x1D
+#define REG_GPIO_IMR3A 0x1E
+#define REG_GPIO_ISR1B 0x1F
+#define REG_GPIO_ISR2B 0x20
+#define REG_GPIO_ISR3B 0x21
+#define REG_GPIO_IMR1B 0x22
+#define REG_GPIO_IMR2B 0x23
+#define REG_GPIO_IMR3B 0x24
+#define REG_GPIO_EDR1 0x28
+#define REG_GPIO_EDR2 0x29
+#define REG_GPIO_EDR3 0x2A
+#define REG_GPIO_EDR4 0x2B
+#define REG_GPIO_EDR5 0x2C
+#define REG_GPIO_SIH_CTRL 0x2D
+
+/* Up to 18 signals are available as GPIOs, when their
+ * pins are not assigned to another use (such as ULPI/USB).
+ */
+#define TWL4030_GPIO_MAX 18
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Keypad register offsets (use TWL4030_MODULE_KEYPAD)
+ * ... SIH/interrupt only
+ */
+
+#define TWL4030_KEYPAD_KEYP_ISR1 0x11
+#define TWL4030_KEYPAD_KEYP_IMR1 0x12
+#define TWL4030_KEYPAD_KEYP_ISR2 0x13
+#define TWL4030_KEYPAD_KEYP_IMR2 0x14
+#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */
+#define TWL4030_KEYPAD_KEYP_EDR 0x16
+#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Multichannel ADC register offsets (use TWL4030_MODULE_MADC)
+ * ... SIH/interrupt only
+ */
+
+#define TWL4030_MADC_ISR1 0x61
+#define TWL4030_MADC_IMR1 0x62
+#define TWL4030_MADC_ISR2 0x63
+#define TWL4030_MADC_IMR2 0x64
+#define TWL4030_MADC_SIR 0x65 /* test register */
+#define TWL4030_MADC_EDR 0x66
+#define TWL4030_MADC_SIH_CTRL 0x67
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS)
+ */
+
+#define TWL4030_INTERRUPTS_BCIISR1A 0x0
+#define TWL4030_INTERRUPTS_BCIISR2A 0x1
+#define TWL4030_INTERRUPTS_BCIIMR1A 0x2
+#define TWL4030_INTERRUPTS_BCIIMR2A 0x3
+#define TWL4030_INTERRUPTS_BCIISR1B 0x4
+#define TWL4030_INTERRUPTS_BCIISR2B 0x5
+#define TWL4030_INTERRUPTS_BCIIMR1B 0x6
+#define TWL4030_INTERRUPTS_BCIIMR2B 0x7
+#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */
+#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */
+#define TWL4030_INTERRUPTS_BCIEDR1 0xa
+#define TWL4030_INTERRUPTS_BCIEDR2 0xb
+#define TWL4030_INTERRUPTS_BCIEDR3 0xc
+#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Power Interrupt block register offsets (use TWL4030_MODULE_INT)
+ */
+
+#define TWL4030_INT_PWR_ISR1 0x0
+#define TWL4030_INT_PWR_IMR1 0x1
+#define TWL4030_INT_PWR_ISR2 0x2
+#define TWL4030_INT_PWR_IMR2 0x3
+#define TWL4030_INT_PWR_SIR 0x4 /* test register */
+#define TWL4030_INT_PWR_EDR1 0x5
+#define TWL4030_INT_PWR_EDR2 0x6
+#define TWL4030_INT_PWR_SIH_CTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
+struct twl4030_bci_platform_data {
+ int *battery_tmp_tbl;
+ unsigned int tblsize;
+};
+
+/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */
+struct twl4030_gpio_platform_data {
+ int gpio_base;
+ unsigned irq_base, irq_end;
+
+ /* package the two LED signals as output-only GPIOs? */
+ bool use_leds;
+
+ /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */
+ u8 mmc_cd;
+
+ /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup
+ * should be enabled. Else, if that bit is set in "pulldowns",
+ * that pulldown is enabled. Don't waste power by letting any
+ * digital inputs float...
+ */
+ u32 pullups;
+ u32 pulldowns;
+
+ int (*setup)(struct device *dev,
+ unsigned gpio, unsigned ngpio);
+ int (*teardown)(struct device *dev,
+ unsigned gpio, unsigned ngpio);
+};
+
+struct twl4030_madc_platform_data {
+ int irq_line;
+};
+
+struct twl4030_keypad_data {
+ int rows;
+ int cols;
+ int *keymap;
+ int irq;
+ unsigned int keymapsize;
+ unsigned int rep:1;
+};
+
+enum twl4030_usb_mode {
+ T2_USB_MODE_ULPI = 1,
+ T2_USB_MODE_CEA2011_3PIN = 2,
+};
+
+struct twl4030_usb_data {
+ enum twl4030_usb_mode usb_mode;
+};
+
+struct twl4030_platform_data {
+ unsigned irq_base, irq_end;
+ struct twl4030_bci_platform_data *bci;
+ struct twl4030_gpio_platform_data *gpio;
+ struct twl4030_madc_platform_data *madc;
+ struct twl4030_keypad_data *keypad;
+ struct twl4030_usb_data *usb;
+
+ /* REVISIT more to come ... _nothing_ should be hard-wired */
+};
+
+/*----------------------------------------------------------------------*/
+
+int twl4030_sih_setup(int module);
+
+/*
+ * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the
+ * IRQ data to subsidiary devices using platform device resources.
+ */
+
+/* IRQ information-need base */
+#include <mach/irqs.h>
+/* TWL4030 interrupts */
+
+/* #define TWL4030_MODIRQ_GPIO (TWL4030_IRQ_BASE + 0) */
+#define TWL4030_MODIRQ_KEYPAD (TWL4030_IRQ_BASE + 1)
+#define TWL4030_MODIRQ_BCI (TWL4030_IRQ_BASE + 2)
+#define TWL4030_MODIRQ_MADC (TWL4030_IRQ_BASE + 3)
+/* #define TWL4030_MODIRQ_USB (TWL4030_IRQ_BASE + 4) */
+/* #define TWL4030_MODIRQ_PWR (TWL4030_IRQ_BASE + 5) */
+
+#define TWL4030_PWRIRQ_PWRBTN (TWL4030_PWR_IRQ_BASE + 0)
+/* #define TWL4030_PWRIRQ_CHG_PRES (TWL4030_PWR_IRQ_BASE + 1) */
+/* #define TWL4030_PWRIRQ_USB_PRES (TWL4030_PWR_IRQ_BASE + 2) */
+/* #define TWL4030_PWRIRQ_RTC (TWL4030_PWR_IRQ_BASE + 3) */
+/* #define TWL4030_PWRIRQ_HOT_DIE (TWL4030_PWR_IRQ_BASE + 4) */
+/* #define TWL4030_PWRIRQ_PWROK_TIMEOUT (TWL4030_PWR_IRQ_BASE + 5) */
+/* #define TWL4030_PWRIRQ_MBCHG (TWL4030_PWR_IRQ_BASE + 6) */
+/* #define TWL4030_PWRIRQ_SC_DETECT (TWL4030_PWR_IRQ_BASE + 7) */
+
+/* Rest are unsued currently*/
+
+/* Offsets to Power Registers */
+#define TWL4030_VDAC_DEV_GRP 0x3B
+#define TWL4030_VDAC_DEDICATED 0x3E
+#define TWL4030_VAUX1_DEV_GRP 0x17
+#define TWL4030_VAUX1_DEDICATED 0x1A
+#define TWL4030_VAUX2_DEV_GRP 0x1B
+#define TWL4030_VAUX2_DEDICATED 0x1E
+#define TWL4030_VAUX3_DEV_GRP 0x1F
+#define TWL4030_VAUX3_DEDICATED 0x22
+
+/* TWL4030 GPIO interrupt definitions */
+
+#define TWL4030_GPIO_IRQ_NO(n) (TWL4030_GPIO_IRQ_BASE + (n))
+
+/*
+ * Exported TWL4030 GPIO APIs
+ *
+ * WARNING -- use standard GPIO and IRQ calls instead; these will vanish.
+ */
+int twl4030_set_gpio_debounce(int gpio, int enable);
+
+#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
+ defined(CONFIG_TWL4030_BCI_BATTERY_MODULE)
+ extern int twl4030charger_usb_en(int enable);
+#else
+ static inline int twl4030charger_usb_en(int enable) { return 0; }
+#endif
+
+#endif /* End of __TWL4030_H */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 75ae6d8aba4..4c4e57d1f19 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -570,7 +570,6 @@ struct i2o_controller {
#endif
spinlock_t lock; /* lock for controller
configuration */
-
void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */
};
@@ -691,289 +690,22 @@ static inline u32 i2o_dma_high(dma_addr_t dma_addr)
};
#endif
-/**
- * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
- * @c: I2O controller for which the calculation should be done
- * @body_size: maximum body size used for message in 32-bit words.
- *
- * Return the maximum number of SG elements in a SG list.
- */
-static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
-{
- i2o_status_block *sb = c->status_block.virt;
- u16 sg_count =
- (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
- body_size;
-
- if (c->pae_support) {
- /*
- * for 64-bit a SG attribute element must be added and each
- * SG element needs 12 bytes instead of 8.
- */
- sg_count -= 2;
- sg_count /= 3;
- } else
- sg_count /= 2;
-
- if (c->short_req && (sg_count > 8))
- sg_count = 8;
-
- return sg_count;
-};
-
-/**
- * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
- * @c: I2O controller
- * @ptr: pointer to the data which should be mapped
- * @size: size of data in bytes
- * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
- * @sg_ptr: pointer to the SG list inside the I2O message
- *
- * This function does all necessary DMA handling and also writes the I2O
- * SGL elements into the I2O message. For details on DMA handling see also
- * dma_map_single(). The pointer sg_ptr will only be set to the end of the
- * SG list if the allocation was successful.
- *
- * Returns DMA address which must be checked for failures using
- * dma_mapping_error().
- */
-static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
+extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size);
+extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
size_t size,
enum dma_data_direction direction,
- u32 ** sg_ptr)
-{
- u32 sg_flags;
- u32 *mptr = *sg_ptr;
- dma_addr_t dma_addr;
-
- switch (direction) {
- case DMA_TO_DEVICE:
- sg_flags = 0xd4000000;
- break;
- case DMA_FROM_DEVICE:
- sg_flags = 0xd0000000;
- break;
- default:
- return 0;
- }
-
- dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
- if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
-#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
- if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- *mptr++ = cpu_to_le32(0x7C020002);
- *mptr++ = cpu_to_le32(PAGE_SIZE);
- }
-#endif
-
- *mptr++ = cpu_to_le32(sg_flags | size);
- *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
-#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
- if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
-#endif
- *sg_ptr = mptr;
- }
- return dma_addr;
-};
-
-/**
- * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
- * @c: I2O controller
- * @sg: SG list to be mapped
- * @sg_count: number of elements in the SG list
- * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
- * @sg_ptr: pointer to the SG list inside the I2O message
- *
- * This function does all necessary DMA handling and also writes the I2O
- * SGL elements into the I2O message. For details on DMA handling see also
- * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
- * list if the allocation was successful.
- *
- * Returns 0 on failure or 1 on success.
- */
-static inline int i2o_dma_map_sg(struct i2o_controller *c,
+ u32 ** sg_ptr);
+extern int i2o_dma_map_sg(struct i2o_controller *c,
struct scatterlist *sg, int sg_count,
enum dma_data_direction direction,
- u32 ** sg_ptr)
-{
- u32 sg_flags;
- u32 *mptr = *sg_ptr;
-
- switch (direction) {
- case DMA_TO_DEVICE:
- sg_flags = 0x14000000;
- break;
- case DMA_FROM_DEVICE:
- sg_flags = 0x10000000;
- break;
- default:
- return 0;
- }
-
- sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
- if (!sg_count)
- return 0;
-
-#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
- if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- *mptr++ = cpu_to_le32(0x7C020002);
- *mptr++ = cpu_to_le32(PAGE_SIZE);
- }
-#endif
-
- while (sg_count-- > 0) {
- if (!sg_count)
- sg_flags |= 0xC0000000;
- *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
- *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
-#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
- if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
-#endif
- sg = sg_next(sg);
- }
- *sg_ptr = mptr;
-
- return 1;
-};
-
-/**
- * i2o_dma_alloc - Allocate DMA memory
- * @dev: struct device pointer to the PCI device of the I2O controller
- * @addr: i2o_dma struct which should get the DMA buffer
- * @len: length of the new DMA memory
- * @gfp_mask: GFP mask
- *
- * Allocate a coherent DMA memory and write the pointers into addr.
- *
- * Returns 0 on success or -ENOMEM on failure.
- */
-static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
- size_t len, gfp_t gfp_mask)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- int dma_64 = 0;
-
- if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
- dma_64 = 1;
- if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
- return -ENOMEM;
- }
-
- addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
-
- if ((sizeof(dma_addr_t) > 4) && dma_64)
- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
- printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
-
- if (!addr->virt)
- return -ENOMEM;
-
- memset(addr->virt, 0, len);
- addr->len = len;
-
- return 0;
-};
-
-/**
- * i2o_dma_free - Free DMA memory
- * @dev: struct device pointer to the PCI device of the I2O controller
- * @addr: i2o_dma struct which contains the DMA buffer
- *
- * Free a coherent DMA memory and set virtual address of addr to NULL.
- */
-static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
-{
- if (addr->virt) {
- if (addr->phys)
- dma_free_coherent(dev, addr->len, addr->virt,
- addr->phys);
- else
- kfree(addr->virt);
- addr->virt = NULL;
- }
-};
-
-/**
- * i2o_dma_realloc - Realloc DMA memory
- * @dev: struct device pointer to the PCI device of the I2O controller
- * @addr: pointer to a i2o_dma struct DMA buffer
- * @len: new length of memory
- * @gfp_mask: GFP mask
- *
- * If there was something allocated in the addr, free it first. If len > 0
- * than try to allocate it and write the addresses back to the addr
- * structure. If len == 0 set the virtual address to NULL.
- *
- * Returns the 0 on success or negative error code on failure.
- */
-static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
- size_t len, gfp_t gfp_mask)
-{
- i2o_dma_free(dev, addr);
-
- if (len)
- return i2o_dma_alloc(dev, addr, len, gfp_mask);
-
- return 0;
-};
-
-/*
- * i2o_pool_alloc - Allocate an slab cache and mempool
- * @mempool: pointer to struct i2o_pool to write data into.
- * @name: name which is used to identify cache
- * @size: size of each object
- * @min_nr: minimum number of objects
- *
- * First allocates a slab cache with name and size. Then allocates a
- * mempool which uses the slab cache for allocation and freeing.
- *
- * Returns 0 on success or negative error code on failure.
- */
-static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
- size_t size, int min_nr)
-{
- pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
- if (!pool->name)
- goto exit;
- strcpy(pool->name, name);
-
- pool->slab =
- kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!pool->slab)
- goto free_name;
-
- pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
- if (!pool->mempool)
- goto free_slab;
-
- return 0;
-
- free_slab:
- kmem_cache_destroy(pool->slab);
-
- free_name:
- kfree(pool->name);
-
- exit:
- return -ENOMEM;
-};
-
-/*
- * i2o_pool_free - Free slab cache and mempool again
- * @mempool: pointer to struct i2o_pool which should be freed
- *
- * Note that you have to return all objects to the mempool again before
- * calling i2o_pool_free().
- */
-static inline void i2o_pool_free(struct i2o_pool *pool)
-{
- mempool_destroy(pool->mempool);
- kmem_cache_destroy(pool->slab);
- kfree(pool->name);
-};
-
+ u32 ** sg_ptr);
+extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len);
+extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr);
+extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
+ size_t len);
+extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
+ size_t size, int min_nr);
+extern void i2o_pool_free(struct i2o_pool *pool);
/* I2O driver (OSM) functions */
extern int i2o_driver_register(struct i2o_driver *);
extern void i2o_driver_unregister(struct i2o_driver *);
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
new file mode 100644
index 00000000000..05a80c44513
--- /dev/null
+++ b/include/linux/i7300_idle.h
@@ -0,0 +1,83 @@
+
+#ifndef I7300_IDLE_H
+#define I7300_IDLE_H
+
+#include <linux/pci.h>
+
+/*
+ * I/O AT controls (PCI bus 0 device 8 function 0)
+ * DIMM controls (PCI bus 0 device 16 function 1)
+ */
+#define IOAT_BUS 0
+#define IOAT_DEVFN PCI_DEVFN(8, 0)
+#define MEMCTL_BUS 0
+#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
+
+struct fbd_ioat {
+ unsigned int vendor;
+ unsigned int ioat_dev;
+};
+
+/*
+ * The i5000 chip-set has the same hooks as the i7300
+ * but support is disabled by default because this driver
+ * has not been validated on that platform.
+ */
+#define SUPPORT_I5000 0
+
+static const struct fbd_ioat fbd_ioat_list[] = {
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB},
+#if SUPPORT_I5000
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT},
+#endif
+ {0, 0}
+};
+
+/* table of devices that work with this driver */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
+#if SUPPORT_I5000
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
+#endif
+ { } /* Terminating entry */
+};
+
+/* Check for known platforms with I/O-AT */
+static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
+ struct pci_dev **ioat_dev)
+{
+ int i;
+ struct pci_dev *memdev, *dmadev;
+
+ memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
+ if (!memdev)
+ return -ENODEV;
+
+ for (i = 0; pci_tbl[i].vendor != 0; i++) {
+ if (memdev->vendor == pci_tbl[i].vendor &&
+ memdev->device == pci_tbl[i].device) {
+ break;
+ }
+ }
+ if (pci_tbl[i].vendor == 0)
+ return -ENODEV;
+
+ dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
+ if (!dmadev)
+ return -ENODEV;
+
+ for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
+ if (dmadev->vendor == fbd_ioat_list[i].vendor &&
+ dmadev->device == fbd_ioat_list[i].ioat_dev) {
+ if (fbd_dev)
+ *fbd_dev = memdev;
+ if (ioat_dev)
+ *ioat_dev = dmadev;
+
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+#endif
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 03067443198..a93a8dd3311 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -40,16 +40,18 @@ struct icmp6hdr {
struct icmpv6_nd_ra {
__u8 hop_limit;
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u8 reserved:4,
+ __u8 reserved:3,
router_pref:2,
+ home_agent:1,
other:1,
managed:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
__u8 managed:1,
other:1,
+ home_agent:1,
router_pref:2,
- reserved:4;
+ reserved:3;
#else
#error "Please fix <asm/byteorder.h>"
#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index c47e371554c..e99c56de7f5 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -122,8 +122,6 @@ struct ide_io_ports {
#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
#define SECTOR_SIZE 512
-#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
-
/*
* Timeouts for various operations:
*/
@@ -172,9 +170,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *);
enum { ide_unknown, ide_generic, ide_pci,
ide_cmd640, ide_dtc2278, ide_ali14xx,
ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_rz1000, ide_trm290,
- ide_cmd646, ide_cy82c693, ide_4drives,
- ide_pmac, ide_acorn,
+ ide_4drives, ide_pmac, ide_acorn,
ide_au1xxx, ide_palm3710
};
@@ -461,12 +457,26 @@ struct ide_acpi_drive_link;
struct ide_acpi_hwif_link;
#endif
+struct ide_drive_s;
+
+struct ide_disk_ops {
+ int (*check)(struct ide_drive_s *, const char *);
+ int (*get_capacity)(struct ide_drive_s *);
+ void (*setup)(struct ide_drive_s *);
+ void (*flush)(struct ide_drive_s *);
+ int (*init_media)(struct ide_drive_s *, struct gendisk *);
+ int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
+ int);
+ ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
+ sector_t);
+ int (*end_request)(struct ide_drive_s *, int, int);
+ int (*ioctl)(struct ide_drive_s *, struct block_device *,
+ fmode_t, unsigned int, unsigned long);
+};
+
/* ATAPI device flags */
enum {
IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
- IDE_AFLAG_MEDIA_CHANGED = (1 << 1),
- /* Drive cannot lock the door. */
- IDE_AFLAG_NO_DOORLOCK = (1 << 2),
/* ide-cd */
/* Drive cannot eject the disc. */
@@ -482,8 +492,6 @@ enum {
* when more than one interrupt is needed.
*/
IDE_AFLAG_LIMIT_NFRAMES = (1 << 7),
- /* Seeking in progress. */
- IDE_AFLAG_SEEKING = (1 << 8),
/* Saved TOC information is current. */
IDE_AFLAG_TOC_VALID = (1 << 9),
/* We think that the drive door is locked. */
@@ -498,14 +506,10 @@ enum {
IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17),
/* ide-floppy */
- /* Format in progress */
- IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18),
/* Avoid commands not supported in Clik drive */
IDE_AFLAG_CLIK_DRIVE = (1 << 19),
/* Requires BH algorithm for packets */
IDE_AFLAG_ZIP_DRIVE = (1 << 20),
- /* Write protect */
- IDE_AFLAG_WP = (1 << 21),
/* Supports format progress report */
IDE_AFLAG_SRFP = (1 << 22),
@@ -578,7 +582,11 @@ enum {
/* don't unload heads */
IDE_DFLAG_NO_UNLOAD = (1 << 27),
/* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = (1 << 28)
+ IDE_DFLAG_PARKED = (1 << 28),
+ IDE_DFLAG_MEDIA_CHANGED = (1 << 29),
+ /* write protect */
+ IDE_DFLAG_WP = (1 << 30),
+ IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 31),
};
struct ide_drive_s {
@@ -597,6 +605,8 @@ struct ide_drive_s {
#endif
struct hwif_s *hwif; /* actually (ide_hwif_t *) */
+ const struct ide_disk_ops *disk_ops;
+
unsigned long dev_flags;
unsigned long sleep; /* sleep until this time */
@@ -829,8 +839,6 @@ typedef struct hwif_s {
unsigned extra_ports; /* number of extra dma ports */
unsigned present : 1; /* this interface exists */
- unsigned serialized : 1; /* serialized all channel operation */
- unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
struct device gendev;
@@ -893,6 +901,8 @@ typedef struct hwgroup_s {
int req_gen;
int req_gen_timer;
+
+ spinlock_t lock;
} ide_hwgroup_t;
typedef struct ide_driver_s ide_driver_t;
@@ -1106,6 +1116,14 @@ enum {
IDE_PM_COMPLETED,
};
+int generic_ide_suspend(struct device *, pm_message_t);
+int generic_ide_resume(struct device *);
+
+void ide_complete_power_step(ide_drive_t *, struct request *);
+ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
+void ide_complete_pm_request(ide_drive_t *, struct request *);
+void ide_check_pm_state(ide_drive_t *, struct request *);
+
/*
* Subdrivers support.
*
@@ -1123,8 +1141,8 @@ struct ide_driver_s {
void (*resume)(ide_drive_t *);
void (*shutdown)(ide_drive_t *);
#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t *proc;
- const struct ide_proc_devset *settings;
+ ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
+ const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
#endif
};
@@ -1142,8 +1160,7 @@ struct ide_ioctl_devset {
int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
unsigned long, const struct ide_ioctl_devset *);
-int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *,
- unsigned, unsigned long);
+int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
extern int ide_vlb_clk;
extern int ide_pci_clk;
@@ -1281,6 +1298,13 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o
#define ide_pci_register_driver(d) pci_register_driver(d)
#endif
+static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
+ return 1;
+ return 0;
+}
+
void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
hw_regs_t *, hw_regs_t **);
void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
@@ -1354,12 +1378,13 @@ enum {
IDE_HFLAG_LEGACY_IRQS = (1 << 21),
/* force use of legacy IRQs */
IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
- /* limit LBA48 requests to 256 sectors */
- IDE_HFLAG_RQSIZE_256 = (1 << 23),
+ /* host is TRM290 */
+ IDE_HFLAG_TRM290 = (1 << 23),
/* use 32-bit I/O ops */
IDE_HFLAG_IO_32BIT = (1 << 24),
/* unmask IRQs */
IDE_HFLAG_UNMASK_IRQS = (1 << 25),
+ IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
/* serialize ports if DMA is possible (for sl82c105) */
IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
/* force host out of "simplex" mode */
@@ -1392,6 +1417,9 @@ struct ide_port_info {
ide_pci_enablebit_t enablebits[2];
hwif_chipset_t chipset;
+
+ u16 max_sectors; /* if < than the default one */
+
u32 host_flags;
u8 pio_mask;
u8 swdma_mask;
@@ -1587,13 +1615,13 @@ extern struct mutex ide_cfg_mtx;
/*
* Structure locking:
*
- * ide_cfg_mtx and ide_lock together protect changes to
- * ide_hwif_t->{next,hwgroup}
+ * ide_cfg_mtx and hwgroup->lock together protect changes to
+ * ide_hwif_t->next
* ide_drive_t->next
*
- * ide_hwgroup_t->busy: ide_lock
- * ide_hwgroup_t->hwif: ide_lock
- * ide_hwif_t->mate: constant, no locking
+ * ide_hwgroup_t->busy: hwgroup->lock
+ * ide_hwgroup_t->hwif: hwgroup->lock
+ * ide_hwif_t->{hwgroup,mate}: constant, no locking
* ide_drive_t->hwif: constant, no locking
*/
diff --git a/include/linux/idr.h b/include/linux/idr.h
index fa035f96f2a..dd846df8cd3 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -52,13 +52,14 @@ struct idr_layer {
unsigned long bitmap; /* A zero bit means "space here" */
struct idr_layer *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
+ int layer; /* distance from leaf */
struct rcu_head rcu_head;
};
struct idr {
struct idr_layer *top;
struct idr_layer *id_free;
- int layers;
+ int layers; /* only valid without concurrent changes */
int id_free_cnt;
spinlock_t lock;
};
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 14126bc3664..c4e6ca1a630 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -12,8 +12,8 @@
* published by the Free Software Foundation.
*/
-#ifndef IEEE80211_H
-#define IEEE80211_H
+#ifndef LINUX_IEEE80211_H
+#define LINUX_IEEE80211_H
#include <linux/types.h>
#include <asm/byteorder.h>
@@ -97,7 +97,10 @@
#define IEEE80211_MAX_FRAME_LEN 2352
#define IEEE80211_MAX_SSID_LEN 32
+
#define IEEE80211_MAX_MESH_ID_LEN 32
+#define IEEE80211_MESH_CONFIG_LEN 19
+
#define IEEE80211_QOS_CTL_LEN 2
#define IEEE80211_QOS_CTL_TID_MASK 0x000F
#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
@@ -666,6 +669,13 @@ struct ieee80211_cts {
u8 ra[6];
} __attribute__ ((packed));
+struct ieee80211_pspoll {
+ __le16 frame_control;
+ __le16 aid;
+ u8 bssid[6];
+ u8 ta[6];
+} __attribute__ ((packed));
+
/**
* struct ieee80211_bar - HT Block Ack Request
*
@@ -685,28 +695,88 @@ struct ieee80211_bar {
#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+
+#define IEEE80211_HT_MCS_MASK_LEN 10
+
+/**
+ * struct ieee80211_mcs_info - MCS information
+ * @rx_mask: RX mask
+ * @rx_highest: highest supported RX rate
+ * @tx_params: TX parameters
+ */
+struct ieee80211_mcs_info {
+ u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
+ __le16 rx_highest;
+ u8 tx_params;
+ u8 reserved[3];
+} __attribute__((packed));
+
+/* 802.11n HT capability MSC set */
+#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
+#define IEEE80211_HT_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
+/* value 0 == 1 stream etc */
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
+#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+
+/*
+ * 802.11n D5.0 20.3.5 / 20.6 says:
+ * - indices 0 to 7 and 32 are single spatial stream
+ * - 8 to 31 are multiple spatial streams using equal modulation
+ * [8..15 for two streams, 16..23 for three and 24..31 for four]
+ * - remainder are multiple spatial streams using unequal modulation
+ */
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
+ (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
+
/**
* struct ieee80211_ht_cap - HT capabilities
*
- * This structure refers to "HT capabilities element" as
- * described in 802.11n draft section 7.3.2.52
+ * This structure is the "HT capabilities element" as
+ * described in 802.11n D5.0 7.3.2.57
*/
struct ieee80211_ht_cap {
__le16 cap_info;
u8 ampdu_params_info;
- u8 supp_mcs_set[16];
+
+ /* 16 bytes MCS information */
+ struct ieee80211_mcs_info mcs;
+
__le16 extended_ht_cap_info;
__le32 tx_BF_cap_info;
u8 antenna_selection_info;
} __attribute__ ((packed));
+/* 802.11n HT capabilities masks (for cap_info) */
+#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
+#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+#define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000
+#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
+#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
+
+/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
+#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
+#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
+
/**
- * struct ieee80211_ht_cap - HT additional information
+ * struct ieee80211_ht_info - HT information
*
- * This structure refers to "HT information element" as
- * described in 802.11n draft section 7.3.2.53
+ * This structure is the "HT information element" as
+ * described in 802.11n D5.0 7.3.2.58
*/
-struct ieee80211_ht_addt_info {
+struct ieee80211_ht_info {
u8 control_chan;
u8 ht_param;
__le16 operation_mode;
@@ -714,36 +784,33 @@ struct ieee80211_ht_addt_info {
u8 basic_set[16];
} __attribute__ ((packed));
-/* 802.11n HT capabilities masks */
-#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
-#define IEEE80211_HT_CAP_SM_PS 0x000C
-#define IEEE80211_HT_CAP_GRN_FLD 0x0010
-#define IEEE80211_HT_CAP_SGI_20 0x0020
-#define IEEE80211_HT_CAP_SGI_40 0x0040
-#define IEEE80211_HT_CAP_DELAY_BA 0x0400
-#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
-#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
-/* 802.11n HT capability AMPDU settings */
-#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
-#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
-/* 802.11n HT capability MSC set */
-#define IEEE80211_SUPP_MCS_SET_UEQM 4
-#define IEEE80211_HT_CAP_MAX_STREAMS 4
-#define IEEE80211_SUPP_MCS_SET_LEN 10
-/* maximum streams the spec allows */
-#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01
-#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02
-#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C
-#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10
-/* 802.11n HT IE masks */
-#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03
-#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00
-#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01
-#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03
-#define IEEE80211_HT_IE_CHA_WIDTH 0x04
-#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
-#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
-#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
+/* for ht_param */
+#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
+#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
+#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10
+#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0
+
+/* for operation_mode */
+#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
+#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
+#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+
+/* for stbc_param */
+#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
+#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
+#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
+#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
+#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
+#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
+
/* block-ack parameters */
#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
@@ -769,7 +836,6 @@ struct ieee80211_ht_addt_info {
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
-#define WLAN_AUTH_FAST_BSS_TRANSITION 2
#define WLAN_AUTH_LEAP 128
#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -949,7 +1015,7 @@ enum ieee80211_eid {
WLAN_EID_EXT_SUPP_RATES = 50,
/* 802.11n */
WLAN_EID_HT_CAPABILITY = 45,
- WLAN_EID_HT_EXTRA_INFO = 61,
+ WLAN_EID_HT_INFORMATION = 61,
/* 802.11i */
WLAN_EID_RSN = 48,
WLAN_EID_WPA = 221,
@@ -976,6 +1042,68 @@ enum ieee80211_spectrum_mgmt_actioncode {
WLAN_ACTION_SPCT_CHL_SWITCH = 4,
};
+/*
+ * IEEE 802.11-2007 7.3.2.9 Country information element
+ *
+ * Minimum length is 8 octets, ie len must be evenly
+ * divisible by 2
+ */
+
+/* Although the spec says 8 I'm seeing 6 in practice */
+#define IEEE80211_COUNTRY_IE_MIN_LEN 6
+
+/*
+ * For regulatory extension stuff see IEEE 802.11-2007
+ * Annex I (page 1141) and Annex J (page 1147). Also
+ * review 7.3.2.9.
+ *
+ * When dot11RegulatoryClassesRequired is true and the
+ * first_channel/reg_extension_id is >= 201 then the IE
+ * compromises of the 'ext' struct represented below:
+ *
+ * - Regulatory extension ID - when generating IE this just needs
+ * to be monotonically increasing for each triplet passed in
+ * the IE
+ * - Regulatory class - index into set of rules
+ * - Coverage class - index into air propagation time (Table 7-27),
+ * in microseconds, you can compute the air propagation time from
+ * the index by multiplying by 3, so index 10 yields a propagation
+ * of 10 us. Valid values are 0-31, values 32-255 are not defined
+ * yet. A value of 0 inicates air propagation of <= 1 us.
+ *
+ * See also Table I.2 for Emission limit sets and table
+ * I.3 for Behavior limit sets. Table J.1 indicates how to map
+ * a reg_class to an emission limit set and behavior limit set.
+ */
+#define IEEE80211_COUNTRY_EXTENSION_ID 201
+
+/*
+ * Channels numbers in the IE must be monotonically increasing
+ * if dot11RegulatoryClassesRequired is not true.
+ *
+ * If dot11RegulatoryClassesRequired is true consecutive
+ * subband triplets following a regulatory triplet shall
+ * have monotonically increasing first_channel number fields.
+ *
+ * Channel numbers shall not overlap.
+ *
+ * Note that max_power is signed.
+ */
+struct ieee80211_country_ie_triplet {
+ union {
+ struct {
+ u8 first_channel;
+ u8 num_channels;
+ s8 max_power;
+ } __attribute__ ((packed)) chans;
+ struct {
+ u8 reg_extension_id;
+ u8 reg_class;
+ u8 coverage_class;
+ } __attribute__ ((packed)) ext;
+ };
+} __attribute__ ((packed));
+
/* BACK action code */
enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
@@ -1057,4 +1185,4 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
return hdr->addr1;
}
-#endif /* IEEE80211_H */
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if.h b/include/linux/if.h
index 65246846c84..2a6e29620a9 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -65,6 +65,7 @@
#define IFF_BONDING 0x20 /* bonding master or slave */
#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */
+#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 4d3401812e6..5ff89809a58 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -87,6 +87,9 @@
#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */
#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
+#define ARPHRD_PHONET 820 /* PhoNet media type */
+#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
+
#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
#define ARPHRD_NONE 0xFFFE /* zero header length */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 9e7b49b8062..a5cb0c3f6dc 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -114,6 +114,8 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling);
+extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
+
#else
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
@@ -133,6 +135,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
BUG();
return NET_XMIT_SUCCESS;
}
+
+static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
+{
+ return 0;
+}
#endif
/**
diff --git a/include/linux/in.h b/include/linux/in.h
index db458beef19..d60122a3a08 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -80,6 +80,10 @@ struct in_addr {
/* BSD compatibility */
#define IP_RECVRETOPTS IP_RETOPTS
+/* TProxy original addresses */
+#define IP_ORIGDSTADDR 20
+#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
+
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
#define IP_PMTUDISC_WANT 1 /* Use per route hints */
diff --git a/include/linux/init.h b/include/linux/init.h
index 93538b696e3..68cb0265d00 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -40,7 +40,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold
+#define __init __section(.init.text) __cold notrace
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
@@ -112,21 +112,25 @@
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
+#define __INITRODATA .section ".init.rodata","a"
#define __FINITDATA .previous
#define __DEVINIT .section ".devinit.text", "ax"
#define __DEVINITDATA .section ".devinit.data", "aw"
+#define __DEVINITRODATA .section ".devinit.rodata", "a"
#define __CPUINIT .section ".cpuinit.text", "ax"
#define __CPUINITDATA .section ".cpuinit.data", "aw"
+#define __CPUINITRODATA .section ".cpuinit.rodata", "a"
#define __MEMINIT .section ".meminit.text", "ax"
#define __MEMINITDATA .section ".meminit.data", "aw"
+#define __MEMINITRODATA .section ".meminit.rodata", "a"
/* silence warnings when references are OK */
#define __REF .section ".ref.text", "ax"
#define __REFDATA .section ".ref.data", "aw"
-#define __REFCONST .section ".ref.rodata", "aw"
+#define __REFCONST .section ".ref.rodata", "a"
#ifndef __ASSEMBLY__
/*
@@ -233,9 +237,6 @@ struct obs_kernel_param {
__attribute__((aligned((sizeof(long))))) \
= { __setup_str_##unique_id, fn, early }
-#define __setup_null_param(str, unique_id) \
- __setup_param(str, unique_id, NULL, 0)
-
#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
@@ -296,7 +297,6 @@ void __init parse_early_param(void);
void cleanup_module(void) __attribute__((alias(#exitfn)));
#define __setup_param(str, unique_id, fn) /* nothing */
-#define __setup_null_param(str, unique_id) /* nothing */
#define __setup(str, func) /* nothing */
#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 021d8e720c7..959f5522d10 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -57,7 +57,6 @@ extern struct nsproxy init_nsproxy;
.mnt_ns = NULL, \
INIT_NET_NS(net_ns) \
INIT_IPC_NS(ipc_ns) \
- .user_ns = &init_user_ns, \
}
#define INIT_SIGHAND(sighand) { \
@@ -113,6 +112,8 @@ extern struct group_info init_groups;
# define CAP_INIT_BSET CAP_INIT_EFF_SET
#endif
+extern struct cred init_cred;
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -147,13 +148,10 @@ extern struct group_info init_groups;
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
- .group_info = &init_groups, \
- .cap_effective = CAP_INIT_EFF_SET, \
- .cap_inheritable = CAP_INIT_INH_SET, \
- .cap_permitted = CAP_FULL_SET, \
- .cap_bset = CAP_INIT_BSET, \
- .securebits = SECUREBITS_DEFAULT, \
- .user = INIT_USER, \
+ .real_cred = &init_cred, \
+ .cred = &init_cred, \
+ .cred_exec_mutex = \
+ __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
@@ -170,6 +168,7 @@ extern struct group_info init_groups;
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
.pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index bd578578a8b..37ea2894b3c 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -134,6 +134,8 @@ extern void inotify_remove_watch_locked(struct inotify_handle *,
struct inotify_watch *);
extern void get_inotify_watch(struct inotify_watch *);
extern void put_inotify_watch(struct inotify_watch *);
+extern int pin_inotify_watch(struct inotify_watch *);
+extern void unpin_inotify_watch(struct inotify_watch *);
#else
@@ -228,6 +230,15 @@ static inline void put_inotify_watch(struct inotify_watch *watch)
{
}
+extern inline int pin_inotify_watch(struct inotify_watch *watch)
+{
+ return 0;
+}
+
+extern inline void unpin_inotify_watch(struct inotify_watch *watch)
+{
+}
+
#endif /* CONFIG_INOTIFY */
#endif /* __KERNEL __ */
diff --git a/include/linux/input.h b/include/linux/input.h
index a5802c9c81a..9a6355f74db 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -238,6 +238,7 @@ struct input_absinfo {
#define KEY_KPEQUAL 117
#define KEY_KPPLUSMINUS 118
#define KEY_PAUSE 119
+#define KEY_SCALE 120 /* AL Compiz Scale (Expose) */
#define KEY_KPCOMMA 121
#define KEY_HANGEUL 122
@@ -322,6 +323,7 @@ struct input_absinfo {
#define KEY_PAUSECD 201
#define KEY_PROG3 202
#define KEY_PROG4 203
+#define KEY_DASHBOARD 204 /* AL Dashboard */
#define KEY_SUSPEND 205
#define KEY_CLOSE 206 /* AC Close */
#define KEY_PLAY 207
@@ -577,9 +579,22 @@ struct input_absinfo {
#define KEY_BRL_DOT9 0x1f9
#define KEY_BRL_DOT10 0x1fa
+#define KEY_NUMERIC_0 0x200 /* used by phones, remote controls, */
+#define KEY_NUMERIC_1 0x201 /* and other keypads */
+#define KEY_NUMERIC_2 0x202
+#define KEY_NUMERIC_3 0x203
+#define KEY_NUMERIC_4 0x204
+#define KEY_NUMERIC_5 0x205
+#define KEY_NUMERIC_6 0x206
+#define KEY_NUMERIC_7 0x207
+#define KEY_NUMERIC_8 0x208
+#define KEY_NUMERIC_9 0x209
+#define KEY_NUMERIC_STAR 0x20a
+#define KEY_NUMERIC_POUND 0x20b
+
/* We avoid low common keys in module aliases so they don't get huge. */
#define KEY_MIN_INTERESTING KEY_MUTE
-#define KEY_MAX 0x1ff
+#define KEY_MAX 0x2ff
#define KEY_CNT (KEY_MAX+1)
/*
@@ -644,6 +659,8 @@ struct input_absinfo {
#define SW_RADIO SW_RFKILL_ALL /* deprecated */
#define SW_MICROPHONE_INSERT 0x04 /* set = inserted */
#define SW_DOCK 0x05 /* set = plugged into dock */
+#define SW_LINEOUT_INSERT 0x06 /* set = inserted */
+#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */
#define SW_MAX 0x0f
#define SW_CNT (SW_MAX+1)
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
new file mode 100644
index 00000000000..3d017cfd245
--- /dev/null
+++ b/include/linux/intel-iommu.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Ashok Raj <ashok.raj@intel.com>
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ */
+
+#ifndef _INTEL_IOMMU_H_
+#define _INTEL_IOMMU_H_
+
+#include <linux/types.h>
+#include <linux/msi.h>
+#include <linux/sysdev.h>
+#include <linux/iova.h>
+#include <linux/io.h>
+#include <linux/dma_remapping.h>
+#include <asm/cacheflush.h>
+#include <asm/iommu.h>
+
+/*
+ * Intel IOMMU register specification per version 1.0 public spec.
+ */
+
+#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
+#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
+#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
+#define DMAR_GCMD_REG 0x18 /* Global command register */
+#define DMAR_GSTS_REG 0x1c /* Global status register */
+#define DMAR_RTADDR_REG 0x20 /* Root entry table */
+#define DMAR_CCMD_REG 0x28 /* Context command reg */
+#define DMAR_FSTS_REG 0x34 /* Fault Status register */
+#define DMAR_FECTL_REG 0x38 /* Fault control register */
+#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
+#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
+#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
+#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
+#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
+#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
+#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
+#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
+#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
+#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
+#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
+#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
+#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
+
+#define OFFSET_STRIDE (9)
+/*
+#define dmar_readl(dmar, reg) readl(dmar + reg)
+#define dmar_readq(dmar, reg) ({ \
+ u32 lo, hi; \
+ lo = readl(dmar + reg); \
+ hi = readl(dmar + reg + 4); \
+ (((u64) hi) << 32) + lo; })
+*/
+static inline u64 dmar_readq(void __iomem *addr)
+{
+ u32 lo, hi;
+ lo = readl(addr);
+ hi = readl(addr + 4);
+ return (((u64) hi) << 32) + lo;
+}
+
+static inline void dmar_writeq(void __iomem *addr, u64 val)
+{
+ writel((u32)val, addr);
+ writel((u32)(val >> 32), addr + 4);
+}
+
+#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
+#define DMAR_VER_MINOR(v) ((v) & 0x0f)
+
+/*
+ * Decoding Capability Register
+ */
+#define cap_read_drain(c) (((c) >> 55) & 1)
+#define cap_write_drain(c) (((c) >> 54) & 1)
+#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
+#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
+#define cap_pgsel_inv(c) (((c) >> 39) & 1)
+
+#define cap_super_page_val(c) (((c) >> 34) & 0xf)
+#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
+ * OFFSET_STRIDE) + 21)
+
+#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
+#define cap_max_fault_reg_offset(c) \
+ (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
+
+#define cap_zlr(c) (((c) >> 22) & 1)
+#define cap_isoch(c) (((c) >> 23) & 1)
+#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
+#define cap_sagaw(c) (((c) >> 8) & 0x1f)
+#define cap_caching_mode(c) (((c) >> 7) & 1)
+#define cap_phmr(c) (((c) >> 6) & 1)
+#define cap_plmr(c) (((c) >> 5) & 1)
+#define cap_rwbf(c) (((c) >> 4) & 1)
+#define cap_afl(c) (((c) >> 3) & 1)
+#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
+/*
+ * Extended Capability Register
+ */
+
+#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
+#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
+#define ecap_max_iotlb_offset(e) \
+ (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
+#define ecap_coherent(e) ((e) & 0x1)
+#define ecap_qis(e) ((e) & 0x2)
+#define ecap_eim_support(e) ((e >> 4) & 0x1)
+#define ecap_ir_support(e) ((e >> 3) & 0x1)
+#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
+
+
+/* IOTLB_REG */
+#define DMA_TLB_FLUSH_GRANU_OFFSET 60
+#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
+#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
+#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
+#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
+#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
+#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
+#define DMA_TLB_IVT (((u64)1) << 63)
+#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_TLB_MAX_SIZE (0x3f)
+
+/* INVALID_DESC */
+#define DMA_CCMD_INVL_GRANU_OFFSET 61
+#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
+#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
+#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
+#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
+#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
+#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
+#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_ID_TLB_ADDR(addr) (addr)
+#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
+
+/* PMEN_REG */
+#define DMA_PMEN_EPM (((u32)1)<<31)
+#define DMA_PMEN_PRS (((u32)1)<<0)
+
+/* GCMD_REG */
+#define DMA_GCMD_TE (((u32)1) << 31)
+#define DMA_GCMD_SRTP (((u32)1) << 30)
+#define DMA_GCMD_SFL (((u32)1) << 29)
+#define DMA_GCMD_EAFL (((u32)1) << 28)
+#define DMA_GCMD_WBF (((u32)1) << 27)
+#define DMA_GCMD_QIE (((u32)1) << 26)
+#define DMA_GCMD_SIRTP (((u32)1) << 24)
+#define DMA_GCMD_IRE (((u32) 1) << 25)
+
+/* GSTS_REG */
+#define DMA_GSTS_TES (((u32)1) << 31)
+#define DMA_GSTS_RTPS (((u32)1) << 30)
+#define DMA_GSTS_FLS (((u32)1) << 29)
+#define DMA_GSTS_AFLS (((u32)1) << 28)
+#define DMA_GSTS_WBFS (((u32)1) << 27)
+#define DMA_GSTS_QIES (((u32)1) << 26)
+#define DMA_GSTS_IRTPS (((u32)1) << 24)
+#define DMA_GSTS_IRES (((u32)1) << 25)
+
+/* CCMD_REG */
+#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
+#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
+#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
+#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
+#define DMA_CCMD_MASK_NOBIT 0
+#define DMA_CCMD_MASK_1BIT 1
+#define DMA_CCMD_MASK_2BIT 2
+#define DMA_CCMD_MASK_3BIT 3
+#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
+#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
+
+/* FECTL_REG */
+#define DMA_FECTL_IM (((u32)1) << 31)
+
+/* FSTS_REG */
+#define DMA_FSTS_PPF ((u32)2)
+#define DMA_FSTS_PFO ((u32)1)
+#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
+
+/* FRCD_REG, 32 bits access */
+#define DMA_FRCD_F (((u32)1) << 31)
+#define dma_frcd_type(d) ((d >> 30) & 1)
+#define dma_frcd_fault_reason(c) (c & 0xff)
+#define dma_frcd_source_id(c) (c & 0xffff)
+/* low 64 bit */
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
+do { \
+ cycles_t start_time = get_cycles(); \
+ while (1) { \
+ sts = op(iommu->reg + offset); \
+ if (cond) \
+ break; \
+ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
+ panic("DMAR hardware is malfunctioning\n"); \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#define QI_LENGTH 256 /* queue length */
+
+enum {
+ QI_FREE,
+ QI_IN_USE,
+ QI_DONE
+};
+
+#define QI_CC_TYPE 0x1
+#define QI_IOTLB_TYPE 0x2
+#define QI_DIOTLB_TYPE 0x3
+#define QI_IEC_TYPE 0x4
+#define QI_IWD_TYPE 0x5
+
+#define QI_IEC_SELECTIVE (((u64)1) << 4)
+#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
+#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
+
+#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
+#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+
+#define QI_IOTLB_DID(did) (((u64)did) << 16)
+#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
+#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
+#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
+#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
+#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_IOTLB_AM(am) (((u8)am))
+
+#define QI_CC_FM(fm) (((u64)fm) << 48)
+#define QI_CC_SID(sid) (((u64)sid) << 32)
+#define QI_CC_DID(did) (((u64)did) << 16)
+#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+
+struct qi_desc {
+ u64 low, high;
+};
+
+struct q_inval {
+ spinlock_t q_lock;
+ struct qi_desc *desc; /* invalidation queue */
+ int *desc_status; /* desc status */
+ int free_head; /* first free entry */
+ int free_tail; /* last free entry */
+ int free_cnt;
+};
+
+#ifdef CONFIG_INTR_REMAP
+/* 1MB - maximum possible interrupt remapping table size */
+#define INTR_REMAP_PAGE_ORDER 8
+#define INTR_REMAP_TABLE_REG_SIZE 0xf
+
+#define INTR_REMAP_TABLE_ENTRIES 65536
+
+struct ir_table {
+ struct irte *base;
+};
+#endif
+
+struct iommu_flush {
+ int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type, int non_present_entry_flush);
+ int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type, int non_present_entry_flush);
+};
+
+struct intel_iommu {
+ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+ u64 cap;
+ u64 ecap;
+ int seg;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+ spinlock_t register_lock; /* protect register handling */
+ int seq_id; /* sequence id of the iommu */
+
+#ifdef CONFIG_DMAR
+ unsigned long *domain_ids; /* bitmap of domains */
+ struct dmar_domain **domains; /* ptr to domains */
+ spinlock_t lock; /* protect context, domain ids */
+ struct root_entry *root_entry; /* virtual address */
+
+ unsigned int irq;
+ unsigned char name[7]; /* Device Name */
+ struct msi_msg saved_msg;
+ struct sys_device sysdev;
+ struct iommu_flush flush;
+#endif
+ struct q_inval *qi; /* Queued invalidation info */
+#ifdef CONFIG_INTR_REMAP
+ struct ir_table *ir_table; /* Interrupt remapping info */
+#endif
+};
+
+static inline void __iommu_flush_cache(
+ struct intel_iommu *iommu, void *addr, int size)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(addr, size);
+}
+
+extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+
+extern int alloc_iommu(struct dmar_drhd_unit *drhd);
+extern void free_iommu(struct intel_iommu *iommu);
+extern int dmar_enable_qi(struct intel_iommu *iommu);
+extern void qi_global_iec(struct intel_iommu *iommu);
+
+extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type, int non_present_entry_flush);
+extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type,
+ int non_present_entry_flush);
+
+extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+
+void intel_iommu_domain_exit(struct dmar_domain *domain);
+struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev);
+int intel_iommu_context_mapping(struct dmar_domain *domain,
+ struct pci_dev *pdev);
+int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
+ u64 hpa, size_t size, int prot);
+void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn);
+struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev);
+u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova);
+
+#ifdef CONFIG_DMAR
+int intel_iommu_found(void);
+#else /* CONFIG_DMAR */
+static inline int intel_iommu_found(void)
+{
+ return 0;
+}
+#endif /* CONFIG_DMAR */
+
+extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
+extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
+extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
+extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
+
+#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 58ff4e74b2f..be3c484b524 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -8,9 +8,14 @@
#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/irqflags.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <linux/irqnr.h>
+
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -248,10 +253,9 @@ enum
BLOCK_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
-#ifdef CONFIG_HIGH_RES_TIMERS
- HRTIMER_SOFTIRQ,
-#endif
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
+
+ NR_SOFTIRQS
};
/* softirq mask and active fields moved to irq_cpustat_t in
@@ -271,6 +275,25 @@ extern void softirq_init(void);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+/* This is the worklist that queues up per-cpu softirq work.
+ *
+ * send_remote_sendirq() adds work to these lists, and
+ * the softirq handler itself dequeues from them. The queues
+ * are protected by disabling local cpu interrupts and they must
+ * only be accessed by the local cpu that they are for.
+ */
+DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
+
+/* Try to send a softirq to a remote cpu. If this cannot be done, the
+ * work will be queued to the local cpu.
+ */
+extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
+
+/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
+ * and compute the current cpu, passed in as 'this_cpu'.
+ */
+extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
+ int this_cpu, int softirq);
/* Tasklets --- multithreaded analogue of BHs.
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
new file mode 100644
index 00000000000..82df31726a5
--- /dev/null
+++ b/include/linux/io-mapping.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright © 2008 Keith Packard <keithp@keithp.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_IO_MAPPING_H
+#define _LINUX_IO_MAPPING_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/iomap.h>
+
+/*
+ * The io_mapping mechanism provides an abstraction for mapping
+ * individual pages from an io device to the CPU in an efficient fashion.
+ *
+ * See Documentation/io_mapping.txt
+ */
+
+/* this struct isn't actually defined anywhere */
+struct io_mapping;
+
+#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+
+/*
+ * For small address space machines, mapping large objects
+ * into the kernel virtual space isn't practical. Where
+ * available, use fixmap support to dynamically map pages
+ * of the object at run time.
+ */
+
+static inline struct io_mapping *
+io_mapping_create_wc(unsigned long base, unsigned long size)
+{
+ return (struct io_mapping *) base;
+}
+
+static inline void
+io_mapping_free(struct io_mapping *mapping)
+{
+}
+
+/* Atomic map/unmap */
+static inline void *
+io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ offset += (unsigned long) mapping;
+ return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0,
+ __pgprot(__PAGE_KERNEL_WC));
+}
+
+static inline void
+io_mapping_unmap_atomic(void *vaddr)
+{
+ iounmap_atomic(vaddr, KM_USER0);
+}
+
+static inline void *
+io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ offset += (unsigned long) mapping;
+ return ioremap_wc(offset, PAGE_SIZE);
+}
+
+static inline void
+io_mapping_unmap(void *vaddr)
+{
+ iounmap(vaddr);
+}
+
+#else
+
+/* Create the io_mapping object*/
+static inline struct io_mapping *
+io_mapping_create_wc(unsigned long base, unsigned long size)
+{
+ return (struct io_mapping *) ioremap_wc(base, size);
+}
+
+static inline void
+io_mapping_free(struct io_mapping *mapping)
+{
+ iounmap(mapping);
+}
+
+/* Atomic map/unmap */
+static inline void *
+io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return ((char *) mapping) + offset;
+}
+
+static inline void
+io_mapping_unmap_atomic(void *vaddr)
+{
+}
+
+/* Non-atomic map/unmap */
+static inline void *
+io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return ((char *) mapping) + offset;
+}
+
+static inline void
+io_mapping_unmap(void *vaddr)
+{
+}
+
+#endif /* HAVE_ATOMIC_IOMAP */
+
+#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index a6d0586e2bf..3b068e5b567 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -23,4 +23,7 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
extern void iommu_area_free(unsigned long *map, unsigned long start,
unsigned int nr);
+extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
+ unsigned long io_page_size);
+
#endif
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index ee9bcc6f32b..041e95aac2b 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -34,7 +34,8 @@ struct resource_list {
*/
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
-#define IORESOURCE_IO 0x00000100 /* Resource type */
+#define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */
+#define IORESOURCE_IO 0x00000100
#define IORESOURCE_MEM 0x00000200
#define IORESOURCE_IRQ 0x00000400
#define IORESOURCE_DMA 0x00000800
@@ -126,6 +127,10 @@ static inline resource_size_t resource_size(struct resource *res)
{
return res->end - res->start + 1;
}
+static inline unsigned long resource_type(struct resource *res)
+{
+ return res->flags & IORESOURCE_TYPE_BITS;
+}
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
@@ -169,6 +174,7 @@ extern struct resource * __devm_request_region(struct device *dev,
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
+extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
new file mode 100644
index 00000000000..228f6c94b69
--- /dev/null
+++ b/include/linux/iova.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ *
+ */
+
+#ifndef _IOVA_H_
+#define _IOVA_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rbtree.h>
+#include <linux/dma-mapping.h>
+
+/* IO virtual address start page frame number */
+#define IOVA_START_PFN (1)
+
+/* iova structure */
+struct iova {
+ struct rb_node node;
+ unsigned long pfn_hi; /* IOMMU dish out addr hi */
+ unsigned long pfn_lo; /* IOMMU dish out addr lo */
+};
+
+/* holds all the iova translations for a domain */
+struct iova_domain {
+ spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
+ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
+ struct rb_root rbroot; /* iova domain rbtree root */
+ struct rb_node *cached32_node; /* Save last alloced node */
+ unsigned long dma_32bit_pfn;
+};
+
+struct iova *alloc_iova_mem(void);
+void free_iova_mem(struct iova *iova);
+void free_iova(struct iova_domain *iovad, unsigned long pfn);
+void __free_iova(struct iova_domain *iovad, struct iova *iova);
+struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn,
+ bool size_aligned);
+struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
+ unsigned long pfn_hi);
+void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
+void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
+void put_iova_domain(struct iova_domain *iovad);
+
+#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 641e026eee8..0b816cae533 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -278,6 +278,7 @@ struct ipv6_pinfo {
struct in6_addr saddr;
struct in6_addr rcv_saddr;
struct in6_addr daddr;
+ struct in6_pktinfo sticky_pktinfo;
struct in6_addr *daddr_cache;
#ifdef CONFIG_IPV6_SUBTREES
struct in6_addr *saddr_cache;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8d9411bc60f..98564dc6447 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -18,6 +18,7 @@
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/irqnr.h>
#include <linux/errno.h>
#include <asm/irq.h>
@@ -62,7 +63,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
-#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
+#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
+#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -127,9 +129,14 @@ struct irq_chip {
const char *typename;
};
+struct timer_rand_state;
+struct irq_2_iommu;
/**
* struct irq_desc - interrupt descriptor
- *
+ * @irq: interrupt number for this descriptor
+ * @timer_rand_state: pointer to timer rand state struct
+ * @kstat_irqs: irq stats per cpu
+ * @irq_2_iommu: iommu with this irq
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @chip: low level interrupt hardware access
* @msi_desc: MSI descriptor
@@ -141,17 +148,24 @@ struct irq_chip {
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
- * @irqs_unhandled: stats field for spurious unhandled interrupts
* @last_unhandled: aging timer for unhandled count
+ * @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
* @cpu: cpu index useful for balancing
* @pending_mask: pending rebalanced interrupts
* @dir: /proc/irq/ procfs entry
- * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
+ unsigned int irq;
+#ifdef CONFIG_SPARSE_IRQ
+ struct timer_rand_state *timer_rand_state;
+ unsigned int *kstat_irqs;
+# ifdef CONFIG_INTR_REMAP
+ struct irq_2_iommu *irq_2_iommu;
+# endif
+#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
@@ -163,14 +177,14 @@ struct irq_desc {
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
- unsigned int irqs_unhandled;
unsigned long last_unhandled; /* Aging timer for unhandled count */
+ unsigned int irqs_unhandled;
spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_t affinity;
unsigned int cpu;
#endif
-#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_t pending_mask;
#endif
#ifdef CONFIG_PROC_FS
@@ -179,8 +193,53 @@ struct irq_desc {
const char *name;
} ____cacheline_internodealigned_in_smp;
+extern void early_irq_init(void);
+extern void arch_early_irq_init(void);
+extern void arch_init_chip_data(struct irq_desc *desc, int cpu);
+extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int cpu);
+extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
+
+#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
+static inline struct irq_desc *irq_to_desc(unsigned int irq)
+{
+ return (irq < NR_IRQS) ? irq_desc + irq : NULL;
+}
+static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+{
+ return irq_to_desc(irq);
+}
+
+#else
+
+extern struct irq_desc *irq_to_desc(unsigned int irq);
+extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
+extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
+
+# define for_each_irq_desc(irq, desc) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq))
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq))
+
+#define kstat_irqs_this_cpu(DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()])
+#define kstat_incr_irqs_this_cpu(irqno, DESC) \
+ ((DESC)->kstat_irqs[smp_processor_id()]++)
+
+#endif
+
+static inline struct irq_desc *
+irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
+{
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+ return irq_to_desc(irq);
+#else
+ return desc;
+#endif
+}
+
/*
* Migration helpers for obsolete names, they will go away:
*/
@@ -198,19 +257,14 @@ extern int setup_irq(unsigned int irq, struct irqaction *new);
#ifdef CONFIG_GENERIC_HARDIRQS
-#ifndef handle_dynamic_tick
-# define handle_dynamic_tick(a) do { } while (0)
-#endif
-
#ifdef CONFIG_SMP
-#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
+#ifdef CONFIG_GENERIC_PENDING_IRQ
-void set_pending_irq(unsigned int irq, cpumask_t mask);
void move_native_irq(int irq);
void move_masked_irq(int irq);
-#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */
+#else /* CONFIG_GENERIC_PENDING_IRQ */
static inline void move_irq(int irq)
{
@@ -224,10 +278,6 @@ static inline void move_masked_irq(int irq)
{
}
-static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
-{
-}
-
#endif /* CONFIG_GENERIC_PENDING_IRQ */
#else /* CONFIG_SMP */
@@ -237,19 +287,14 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
#endif /* CONFIG_SMP */
-#ifdef CONFIG_IRQBALANCE
-extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
-#else
-static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
-{
-}
-#endif
-
extern int no_irq_affinity;
static inline int irq_balancing_disabled(unsigned int irq)
{
- return irq_desc[irq].status & IRQ_NO_BALANCING_MASK;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status & IRQ_NO_BALANCING_MASK;
}
/* Handle irq action chains: */
@@ -279,10 +324,8 @@ extern unsigned int __do_IRQ(unsigned int irq);
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
*/
-static inline void generic_handle_irq(unsigned int irq)
+static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
- struct irq_desc *desc = irq_desc + irq;
-
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
desc->handle_irq(irq, desc);
#else
@@ -293,6 +336,11 @@ static inline void generic_handle_irq(unsigned int irq)
#endif
}
+static inline void generic_handle_irq(unsigned int irq)
+{
+ generic_handle_irq_desc(irq, irq_to_desc(irq));
+}
+
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
int action_ret);
@@ -325,7 +373,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
static inline void __set_irq_handler_unlocked(int irq,
irq_flow_handler_t handler)
{
- irq_desc[irq].handle_irq = handler;
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->handle_irq = handler;
}
/*
@@ -353,13 +404,14 @@ extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);
/* Handle dynamic irq creation and destruction */
+extern unsigned int create_irq_nr(unsigned int irq_want);
extern int create_irq(void);
extern void destroy_irq(unsigned int irq);
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
{
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
@@ -374,10 +426,15 @@ extern int set_irq_chip_data(unsigned int irq, void *data);
extern int set_irq_type(unsigned int irq, unsigned int type);
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
-#define get_irq_chip(irq) (irq_desc[irq].chip)
-#define get_irq_chip_data(irq) (irq_desc[irq].chip_data)
-#define get_irq_data(irq) (irq_desc[irq].handler_data)
-#define get_irq_msi(irq) (irq_desc[irq].msi_desc)
+#define get_irq_chip(irq) (irq_to_desc(irq)->chip)
+#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
+#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
+#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
+
+#define get_irq_desc_chip(desc) ((desc)->chip)
+#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
+#define get_irq_desc_data(desc) ((desc)->handler_data)
+#define get_irq_desc_msi(desc) ((desc)->msi_desc)
#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
new file mode 100644
index 00000000000..95d2b74641f
--- /dev/null
+++ b/include/linux/irqnr.h
@@ -0,0 +1,38 @@
+#ifndef _LINUX_IRQNR_H
+#define _LINUX_IRQNR_H
+
+/*
+ * Generic irq_desc iterators:
+ */
+#ifdef __KERNEL__
+
+#ifndef CONFIG_GENERIC_HARDIRQS
+#include <asm/irq.h>
+# define nr_irqs NR_IRQS
+
+# define for_each_irq_desc(irq, desc) \
+ for (irq = 0; irq < nr_irqs; irq++)
+
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1; irq >= 0; irq--)
+#else
+
+extern int nr_irqs;
+
+#ifndef CONFIG_SPARSE_IRQ
+
+struct irq_desc;
+# define for_each_irq_desc(irq, desc) \
+ for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
+ irq >= 0; irq--, desc--)
+#endif
+#endif
+
+#define for_each_irq_nr(irq) \
+ for (irq = 0; irq < nr_irqs; irq++)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 07a9b52a265..346e2b80be7 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -61,7 +61,7 @@ extern u8 journal_enable_debug;
do { \
if ((n) <= journal_enable_debug) { \
printk (KERN_DEBUG "(%s, %d): %s: ", \
- __FILE__, __LINE__, __FUNCTION__); \
+ __FILE__, __LINE__, __func__); \
printk (f, ## a); \
} \
} while (0)
@@ -816,6 +816,9 @@ struct journal_s
#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
#define JFS_BARRIER 0x020 /* Use IDE barriers */
+#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
+ * data write error in ordered
+ * mode */
/*
* Function declarations for the journaling transaction and buffer
@@ -908,7 +911,7 @@ extern int journal_set_features
(journal_t *, unsigned long, unsigned long, unsigned long);
extern int journal_create (journal_t *);
extern int journal_load (journal_t *journal);
-extern void journal_destroy (journal_t *);
+extern int journal_destroy (journal_t *);
extern int journal_recover (journal_t *journal);
extern int journal_wipe (journal_t *, int);
extern int journal_skip_recovery (journal_t *);
@@ -984,7 +987,7 @@ extern int cleanup_journal_tail(journal_t *);
#define jbd_ENOSYS() \
do { \
- printk (KERN_ERR "JBD unimplemented function %s\n", __FUNCTION__); \
+ printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
current->state = TASK_UNINTERRUPTIBLE; \
schedule(); \
} while (1)
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d2e91ea998f..c7d106ef22e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -61,7 +61,7 @@ extern u8 jbd2_journal_enable_debug;
do { \
if ((n) <= jbd2_journal_enable_debug) { \
printk (KERN_DEBUG "(%s, %d): %s: ", \
- __FILE__, __LINE__, __FUNCTION__); \
+ __FILE__, __LINE__, __func__); \
printk (f, ## a); \
} \
} while (0)
@@ -641,6 +641,11 @@ struct transaction_s
*/
int t_handle_count;
+ /*
+ * For use by the filesystem to store fs-specific data
+ * structures associated with the transaction
+ */
+ struct list_head t_private_list;
};
struct transaction_run_stats_s {
@@ -935,6 +940,10 @@ struct journal_s
pid_t j_last_sync_writer;
+ /* This function is called when a transaction is closed */
+ void (*j_commit_callback)(journal_t *,
+ transaction_t *);
+
/*
* Journal statistics
*/
@@ -1143,7 +1152,7 @@ extern int jbd2_cleanup_journal_tail(journal_t *);
#define jbd_ENOSYS() \
do { \
- printk (KERN_ERR "JBD unimplemented function %s\n", __FUNCTION__); \
+ printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
current->state = TASK_UNINTERRUPTIBLE; \
schedule(); \
} while (1)
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index abb6ac639e8..1a9cf78bfce 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void)
((long)(a) - (long)(b) >= 0))
#define time_before_eq(a,b) time_after_eq(b,a)
+/*
+ * Calculate whether a is in the range of [b, c].
+ */
#define time_in_range(a,b,c) \
(time_after_eq(a,b) && \
time_before_eq(a,c))
+/*
+ * Calculate whether a is in the range of [b, c).
+ */
+#define time_in_range_open(a,b,c) \
+ (time_after_eq(a,b) && \
+ time_before(a,c))
+
/* Same as above, but does so with platform independent 64bit types.
* These must be used when utilizing jiffies_64 (i.e. return value of
* get_jiffies_64() */
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 8a62d1e84b9..bb70ebb6a2d 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -3,7 +3,7 @@
*
* buffer_head fields for JBD
*
- * 27 May 2001 Andrew Morton <akpm@digeo.com>
+ * 27 May 2001 Andrew Morton
* Created - pulled out of fs.h
*/
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index b9614488744..f3fe34391d8 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -93,12 +93,10 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
}
/*
- * Pretty-print a function pointer.
- *
- * ia64 and ppc64 function pointers are really function descriptors,
- * which contain a pointer the real address.
+ * Pretty-print a function pointer. This function is deprecated.
+ * Please use the "%pF" vsprintf format instead.
*/
-static inline void print_fn_descriptor_symbol(const char *fmt, void *addr)
+static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
{
#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
addr = *(void **)addr;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 75d81f157d2..ca9ff6411df 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -16,6 +16,7 @@
#include <linux/log2.h>
#include <linux/typecheck.h>
#include <linux/ratelimit.h>
+#include <linux/dynamic_printk.h>
#include <asm/byteorder.h>
#include <asm/bug.h>
@@ -115,6 +116,8 @@ extern int _cond_resched(void);
# define might_resched() do { } while (0)
#endif
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ void __might_sleep(char *file, int line);
/**
* might_sleep - annotation for functions that can sleep
*
@@ -125,8 +128,6 @@ extern int _cond_resched(void);
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
-#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- void __might_sleep(char *file, int line);
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
#else
@@ -140,6 +141,15 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void);
+#else
+static inline void might_fault(void)
+{
+ might_sleep();
+}
+#endif
+
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(long time);
NORET_TYPE void panic(const char * fmt, ...)
@@ -187,9 +197,35 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
+extern int func_ptr_is_kernel_text(void *ptr);
+
struct pid;
extern struct pid *session_of_pgrp(struct pid *pgrp);
+/*
+ * FW_BUG
+ * Add this to a message where you are sure the firmware is buggy or behaves
+ * really stupid or out of spec. Be aware that the responsible BIOS developer
+ * should be able to fix this issue or at least get a concrete idea of the
+ * problem by reading your message without the need of looking at the kernel
+ * code.
+ *
+ * Use it for definite and high priority BIOS bugs.
+ *
+ * FW_WARN
+ * Use it for not that clear (e.g. could the kernel messed up things already?)
+ * and medium priority BIOS bugs.
+ *
+ * FW_INFO
+ * Use this one if you want to tell the user or vendor about something
+ * suspicious, but generally harmless related to the firmware.
+ *
+ * Use it for information or very low priority BIOS bugs.
+ */
+#define FW_BUG "[Firmware Bug]: "
+#define FW_WARN "[Firmware Warn]: "
+#define FW_INFO "[Firmware Info]: "
+
#ifdef CONFIG_PRINTK
asmlinkage int vprintk(const char *fmt, va_list args)
__attribute__ ((format (printf, 1, 0)));
@@ -213,6 +249,9 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
{ return false; }
#endif
+extern int printk_needs_cpu(int cpu);
+extern void printk_tick(void);
+
extern void asmlinkage __attribute__((format(printf, 1, 2)))
early_printk(const char *fmt, ...);
@@ -235,9 +274,10 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
extern int panic_timeout;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
-extern int tainted;
extern const char *print_tainted(void);
-extern void add_taint(unsigned);
+extern void add_taint(unsigned flag);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
extern int root_mountflags;
/* Values used for system_state */
@@ -250,16 +290,17 @@ extern enum system_states {
SYSTEM_SUSPEND_DISK,
} system_state;
-#define TAINT_PROPRIETARY_MODULE (1<<0)
-#define TAINT_FORCED_MODULE (1<<1)
-#define TAINT_UNSAFE_SMP (1<<2)
-#define TAINT_FORCED_RMMOD (1<<3)
-#define TAINT_MACHINE_CHECK (1<<4)
-#define TAINT_BAD_PAGE (1<<5)
-#define TAINT_USER (1<<6)
-#define TAINT_DIE (1<<7)
-#define TAINT_OVERRIDDEN_ACPI_TABLE (1<<8)
-#define TAINT_WARN (1<<9)
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_UNSAFE_SMP 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
extern void dump_stack(void) __cold;
@@ -288,28 +329,36 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
return buf;
}
-#define pr_emerg(fmt, arg...) \
- printk(KERN_EMERG fmt, ##arg)
-#define pr_alert(fmt, arg...) \
- printk(KERN_ALERT fmt, ##arg)
-#define pr_crit(fmt, arg...) \
- printk(KERN_CRIT fmt, ##arg)
-#define pr_err(fmt, arg...) \
- printk(KERN_ERR fmt, ##arg)
-#define pr_warning(fmt, arg...) \
- printk(KERN_WARNING fmt, ##arg)
-#define pr_notice(fmt, arg...) \
- printk(KERN_NOTICE fmt, ##arg)
-#define pr_info(fmt, arg...) \
- printk(KERN_INFO fmt, ##arg)
-
-#ifdef DEBUG
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_emerg(fmt, ...) \
+ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice(fmt, ...) \
+ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+
/* If you are writing a driver, please use dev_dbg instead */
-#define pr_debug(fmt, arg...) \
- printk(KERN_DEBUG fmt, ##arg)
+#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+#define pr_debug(fmt, ...) do { \
+ dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
+#elif defined(DEBUG)
+#define pr_debug(fmt, ...) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
-#define pr_debug(fmt, arg...) \
- ({ if (0) printk(KERN_DEBUG fmt, ##arg); 0; })
+#define pr_debug(fmt, ...) \
+ ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
#endif
/*
@@ -323,18 +372,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
((unsigned char *)&addr)[3]
#define NIPQUAD_FMT "%u.%u.%u.%u"
-#define NIP6(addr) \
- ntohs((addr).s6_addr16[0]), \
- ntohs((addr).s6_addr16[1]), \
- ntohs((addr).s6_addr16[2]), \
- ntohs((addr).s6_addr16[3]), \
- ntohs((addr).s6_addr16[4]), \
- ntohs((addr).s6_addr16[5]), \
- ntohs((addr).s6_addr16[6]), \
- ntohs((addr).s6_addr16[7])
-#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
-#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
-
#if defined(__LITTLE_ENDIAN)
#define HIPQUAD(addr) \
((unsigned char *)&addr)[3], \
@@ -486,4 +523,9 @@ struct sysinfo {
#define NUMA_BUILD 0
#endif
+/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+#endif
+
#endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index cf9f40a91c9..4ee4b3d2316 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,7 +28,9 @@ struct cpu_usage_stat {
struct kernel_stat {
struct cpu_usage_stat cpustat;
- unsigned int irqs[NR_IRQS];
+#ifndef CONFIG_SPARSE_IRQ
+ unsigned int irqs[NR_IRQS];
+#endif
};
DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -39,19 +41,44 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
extern unsigned long long nr_context_switches(void);
+#ifndef CONFIG_SPARSE_IRQ
+#define kstat_irqs_this_cpu(irq) \
+ (kstat_this_cpu.irqs[irq])
+
+struct irq_desc;
+
+static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
+ struct irq_desc *desc)
+{
+ kstat_this_cpu.irqs[irq]++;
+}
+#endif
+
+
+#ifndef CONFIG_SPARSE_IRQ
+static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+{
+ return kstat_cpu(cpu).irqs[irq];
+}
+#else
+extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+#endif
+
/*
* Number of interrupts per specific IRQ source, since bootup
*/
-static inline int kstat_irqs(int irq)
+static inline unsigned int kstat_irqs(unsigned int irq)
{
- int cpu, sum = 0;
+ unsigned int sum = 0;
+ int cpu;
for_each_possible_cpu(cpu)
- sum += kstat_cpu(cpu).irqs[irq];
+ sum += kstat_irqs_cpu(irq, cpu);
return sum;
}
+extern unsigned long long task_delta_exec(struct task_struct *);
extern void account_user_time(struct task_struct *, cputime_t);
extern void account_user_time_scaled(struct task_struct *, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 17f76fc0517..adc34f2c6ef 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -100,6 +100,10 @@ struct kimage {
#define KEXEC_TYPE_DEFAULT 0
#define KEXEC_TYPE_CRASH 1
unsigned int preserve_context : 1;
+
+#ifdef ARCH_HAS_KIMAGE_ARCH
+ struct kimage_arch arch;
+#endif
};
diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h
deleted file mode 100644
index e8b8a7a5c49..00000000000
--- a/include/linux/key-ui.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* key-ui.h: key userspace interface stuff
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _LINUX_KEY_UI_H
-#define _LINUX_KEY_UI_H
-
-#include <linux/key.h>
-
-/* the key tree */
-extern struct rb_root key_serial_tree;
-extern spinlock_t key_serial_lock;
-
-/* required permissions */
-#define KEY_VIEW 0x01 /* require permission to view attributes */
-#define KEY_READ 0x02 /* require permission to read content */
-#define KEY_WRITE 0x04 /* require permission to update / modify */
-#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
-#define KEY_LINK 0x10 /* require permission to link */
-#define KEY_SETATTR 0x20 /* require permission to change attributes */
-#define KEY_ALL 0x3f /* all the above permissions */
-
-/*
- * the keyring payload contains a list of the keys to which the keyring is
- * subscribed
- */
-struct keyring_list {
- struct rcu_head rcu; /* RCU deletion hook */
- unsigned short maxkeys; /* max keys this list can hold */
- unsigned short nkeys; /* number of keys currently held */
- unsigned short delkey; /* key to be unlinked by RCU */
- struct key *keys[0];
-};
-
-/*
- * check to see whether permission is granted to use a key in the desired way
- */
-extern int key_task_permission(const key_ref_t key_ref,
- struct task_struct *context,
- key_perm_t perm);
-
-static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
-{
- return key_task_permission(key_ref, current, perm);
-}
-
-extern key_ref_t lookup_user_key(struct task_struct *context,
- key_serial_t id, int create, int partial,
- key_perm_t perm);
-
-extern long join_session_keyring(const char *name);
-
-extern struct key_type *key_type_lookup(const char *type);
-extern void key_type_put(struct key_type *ktype);
-
-#define key_negative_timeout 60 /* default timeout on a negative key's existence */
-
-
-#endif /* _LINUX_KEY_UI_H */
diff --git a/include/linux/key.h b/include/linux/key.h
index 1b70e35a71e..21d32a142c0 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -73,6 +73,7 @@ struct key;
struct seq_file;
struct user_struct;
struct signal_struct;
+struct cred;
struct key_type;
struct key_owner;
@@ -181,7 +182,7 @@ struct key {
extern struct key *key_alloc(struct key_type *type,
const char *desc,
uid_t uid, gid_t gid,
- struct task_struct *ctx,
+ const struct cred *cred,
key_perm_t perm,
unsigned long flags);
@@ -249,7 +250,7 @@ extern int key_unlink(struct key *keyring,
struct key *key);
extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
- struct task_struct *ctx,
+ const struct cred *cred,
unsigned long flags,
struct key *dest);
@@ -276,24 +277,11 @@ extern ctl_table key_sysctls[];
/*
* the userspace interface
*/
-extern void switch_uid_keyring(struct user_struct *new_user);
-extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk);
-extern int copy_thread_group_keys(struct task_struct *tsk);
-extern void exit_keys(struct task_struct *tsk);
-extern void exit_thread_group_keys(struct signal_struct *tg);
-extern int suid_keys(struct task_struct *tsk);
-extern int exec_keys(struct task_struct *tsk);
+extern int install_thread_keyring_to_cred(struct cred *cred);
extern void key_fsuid_changed(struct task_struct *tsk);
extern void key_fsgid_changed(struct task_struct *tsk);
extern void key_init(void);
-#define __install_session_keyring(tsk, keyring) \
-({ \
- struct key *old_session = tsk->signal->session_keyring; \
- tsk->signal->session_keyring = keyring; \
- old_session; \
-})
-
#else /* CONFIG_KEYS */
#define key_validate(k) 0
@@ -302,17 +290,9 @@ extern void key_init(void);
#define key_revoke(k) do { } while(0)
#define key_put(k) do { } while(0)
#define key_ref_put(k) do { } while(0)
-#define make_key_ref(k, p) ({ NULL; })
-#define key_ref_to_ptr(k) ({ NULL; })
+#define make_key_ref(k, p) NULL
+#define key_ref_to_ptr(k) NULL
#define is_key_possessed(k) 0
-#define switch_uid_keyring(u) do { } while(0)
-#define __install_session_keyring(t, k) ({ NULL; })
-#define copy_keys(f,t) 0
-#define copy_thread_group_keys(t) 0
-#define exit_keys(t) do { } while(0)
-#define exit_thread_group_keys(tg) do { } while(0)
-#define suid_keys(t) do { } while(0)
-#define exec_keys(t) do { } while(0)
#define key_fsuid_changed(t) do { } while(0)
#define key_fsgid_changed(t) do { } while(0)
#define key_init() do { } while(0)
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index 656ee6b77a4..c0688eb7209 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -1,6 +1,6 @@
/* keyctl.h: keyctl command IDs
*
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004, 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
#define KEY_SPEC_REQKEY_AUTH_KEY -7 /* - key ID for assumed request_key auth key */
+#define KEY_SPEC_REQUESTOR_KEYRING -8 /* - key ID for request_key() dest keyring */
/* request-key default keyrings */
#define KEY_REQKEY_DEFL_NO_CHANGE -1
@@ -30,6 +31,7 @@
#define KEY_REQKEY_DEFL_USER_KEYRING 4
#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5
#define KEY_REQKEY_DEFL_GROUP_KEYRING 6
+#define KEY_REQKEY_DEFL_REQUESTOR_KEYRING 7
/* keyctl commands */
#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index a1a91577813..92213a9194e 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -99,4 +99,7 @@ struct file;
extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
struct file **filp);
+extern int usermodehelper_disable(void);
+extern void usermodehelper_enable(void);
+
#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0be7795655f..497b1d1f7a0 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -29,6 +29,7 @@
* <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
* <prasanna@in.ibm.com> added function-return probes.
*/
+#include <linux/linkage.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/smp.h>
@@ -47,7 +48,7 @@
#define KPROBE_HIT_SSDONE 0x00000008
/* Attach to insert probes on any functions which should be ignored*/
-#define __kprobes __attribute__((__section__(".kprobes.text")))
+#define __kprobes __attribute__((__section__(".kprobes.text"))) notrace
struct kprobe;
struct pt_regs;
@@ -256,7 +257,7 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
#else /* CONFIG_KPROBES */
-#define __kprobes /**/
+#define __kprobes notrace
struct jprobe;
struct kretprobe;
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 70a30651cd1..f18b86fa865 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -311,22 +311,33 @@ struct kvm_s390_interrupt {
/* This structure represents a single trace buffer record. */
struct kvm_trace_rec {
- __u32 event:28;
- __u32 extra_u32:3;
- __u32 cycle_in:1;
+ /* variable rec_val
+ * is split into:
+ * bits 0 - 27 -> event id
+ * bits 28 -30 -> number of extra data args of size u32
+ * bits 31 -> binary indicator for if tsc is in record
+ */
+ __u32 rec_val;
__u32 pid;
__u32 vcpu_id;
union {
struct {
- __u64 cycle_u64;
+ __u64 timestamp;
__u32 extra_u32[KVM_TRC_EXTRA_MAX];
- } __attribute__((packed)) cycle;
+ } __attribute__((packed)) timestamp;
struct {
__u32 extra_u32[KVM_TRC_EXTRA_MAX];
- } nocycle;
+ } notimestamp;
} u;
};
+#define TRACE_REC_EVENT_ID(val) \
+ (0x0fffffff & (val))
+#define TRACE_REC_NUM_DATA_ARGS(val) \
+ (0x70000000 & ((val) << 28))
+#define TRACE_REC_TCS(val) \
+ (0x80000000 & ((val) << 31))
+
#define KVMIO 0xAE
/*
@@ -372,6 +383,10 @@ struct kvm_trace_rec {
#define KVM_CAP_MP_STATE 14
#define KVM_CAP_COALESCED_MMIO 15
#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
+#if defined(CONFIG_X86)||defined(CONFIG_IA64)
+#define KVM_CAP_DEVICE_ASSIGNMENT 17
+#endif
+#define KVM_CAP_IOMMU 18
/*
* ioctls for VM fds
@@ -401,6 +416,10 @@ struct kvm_trace_rec {
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
+#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
+ struct kvm_assigned_pci_dev)
+#define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
+ struct kvm_assigned_irq)
/*
* ioctls for vcpu fds
@@ -440,4 +459,51 @@ struct kvm_trace_rec {
#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
+#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
+#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
+#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
+#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
+#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
+#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
+#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
+#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
+#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
+#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
+#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
+#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
+#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
+#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
+#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
+#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
+#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
+#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
+#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
+#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
+#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
+#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
+#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
+#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
+
+struct kvm_assigned_pci_dev {
+ __u32 assigned_dev_id;
+ __u32 busnr;
+ __u32 devfn;
+ __u32 flags;
+ union {
+ __u32 reserved[12];
+ };
+};
+
+struct kvm_assigned_irq {
+ __u32 assigned_dev_id;
+ __u32 host_irq;
+ __u32 guest_irq;
+ __u32 flags;
+ union {
+ __u32 reserved[12];
+ };
+};
+
+#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8525afc5310..bb92be2153b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -34,6 +34,10 @@
#define KVM_REQ_MMU_RELOAD 3
#define KVM_REQ_TRIPLE_FAULT 4
#define KVM_REQ_PENDING_TIMER 5
+#define KVM_REQ_UNHALT 6
+#define KVM_REQ_MMU_SYNC 7
+
+#define KVM_USERSPACE_IRQ_SOURCE_ID 0
struct kvm_vcpu;
extern struct kmem_cache *kvm_vcpu_cache;
@@ -279,12 +283,71 @@ void kvm_free_physmem(struct kvm *kvm);
struct kvm *kvm_arch_create_vm(void);
void kvm_arch_destroy_vm(struct kvm *kvm);
+void kvm_free_all_assigned_devices(struct kvm *kvm);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+int kvm_is_mmio_pfn(pfn_t pfn);
+
+struct kvm_irq_ack_notifier {
+ struct hlist_node link;
+ unsigned gsi;
+ void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
+};
+
+struct kvm_assigned_dev_kernel {
+ struct kvm_irq_ack_notifier ack_notifier;
+ struct work_struct interrupt_work;
+ struct list_head list;
+ int assigned_dev_id;
+ int host_busnr;
+ int host_devfn;
+ int host_irq;
+ int guest_irq;
+ int irq_requested;
+ int irq_source_id;
+ struct pci_dev *dev;
+ struct kvm *kvm;
+};
+void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
+void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
+void kvm_register_irq_ack_notifier(struct kvm *kvm,
+ struct kvm_irq_ack_notifier *kian);
+void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
+ struct kvm_irq_ack_notifier *kian);
+int kvm_request_irq_source_id(struct kvm *kvm);
+void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+
+#ifdef CONFIG_DMAR
+int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
+ unsigned long npages);
+int kvm_iommu_map_guest(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel *assigned_dev);
+int kvm_iommu_unmap_guest(struct kvm *kvm);
+#else /* CONFIG_DMAR */
+static inline int kvm_iommu_map_pages(struct kvm *kvm,
+ gfn_t base_gfn,
+ unsigned long npages)
+{
+ return 0;
+}
+
+static inline int kvm_iommu_map_guest(struct kvm *kvm,
+ struct kvm_assigned_dev_kernel
+ *assigned_dev)
+{
+ return -ENODEV;
+}
+
+static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
+{
+ return 0;
+}
+#endif /* CONFIG_DMAR */
+
static inline void kvm_guest_enter(void)
{
account_system_vtime(current);
@@ -307,6 +370,11 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn)
return (gpa_t)gfn << PAGE_SHIFT;
}
+static inline hpa_t pfn_to_hpa(pfn_t pfn)
+{
+ return (hpa_t)pfn << PAGE_SHIFT;
+}
+
static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
@@ -326,6 +394,25 @@ struct kvm_stats_debugfs_item {
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
+#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 5, d1, d2, d3, d4, d5)
+#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 4, d1, d2, d3, d4, 0)
+#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 3, d1, d2, d3, 0, 0)
+#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 2, d1, d2, 0, 0, 0)
+#define KVMTRACE_1D(evt, vcpu, d1, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 1, d1, 0, 0, 0, 0)
+#define KVMTRACE_0D(evt, vcpu, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 0, 0, 0, 0, 0, 0)
+
#ifdef CONFIG_KVM_TRACE
int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
void kvm_trace_cleanup(void);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d41ccb56146..d3a73f5a48c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -123,7 +123,7 @@ extern void ledtrig_ide_activity(void);
*/
struct led_info {
const char *name;
- char *default_trigger;
+ const char *default_trigger;
int flags;
};
@@ -135,7 +135,7 @@ struct led_platform_data {
/* For the leds-gpio driver */
struct gpio_led {
const char *name;
- char *default_trigger;
+ const char *default_trigger;
unsigned gpio;
u8 active_low;
};
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index e7217dc58f3..a53407a4165 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -54,9 +54,13 @@ struct lguest_vqconfig {
/* Write command first word is a request. */
enum lguest_req
{
- LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */
+ LHREQ_INITIALIZE, /* + base, pfnlimit, start */
LHREQ_GETDMA, /* No longer used */
LHREQ_IRQ, /* + irq */
LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
};
+
+/* The alignment to use between consumer and producer parts of vring.
+ * x86 pagesize for historical reasons. */
+#define LGUEST_VRING_ALIGN 4096
#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 947cf84e555..3449de597ef 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -213,10 +213,11 @@ enum {
ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
- ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */
ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */
+ ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
+ ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
@@ -340,6 +341,9 @@ enum {
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
+ /* mask of flags to transfer *to* the slave link */
+ ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
+
/* max tries if error condition is still set after ->error_handler */
ATA_EH_MAX_TRIES = 5,
@@ -369,6 +373,10 @@ enum {
ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */
ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
+ ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
+ ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
+ not multiple of 16 bytes */
+ ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -1278,26 +1286,62 @@ static inline int ata_link_active(struct ata_link *link)
return ata_tag_valid(link->active_tag) || link->sactive;
}
-extern struct ata_link *__ata_port_next_link(struct ata_port *ap,
- struct ata_link *link,
- bool dev_only);
+/*
+ * Iterators
+ *
+ * ATA_LITER_* constants are used to select link iteration mode and
+ * ATA_DITER_* device iteration mode.
+ *
+ * For a custom iteration directly using ata_{link|dev}_next(), if
+ * @link or @dev, respectively, is NULL, the first element is
+ * returned. @dev and @link can be any valid device or link and the
+ * next element according to the iteration mode will be returned.
+ * After the last element, NULL is returned.
+ */
+enum ata_link_iter_mode {
+ ATA_LITER_EDGE, /* if present, PMP links only; otherwise,
+ * host link. no slave link */
+ ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */
+ ATA_LITER_PMP_FIRST, /* PMP links followed by host link,
+ * slave link still comes after host link */
+};
+
+enum ata_dev_iter_mode {
+ ATA_DITER_ENABLED,
+ ATA_DITER_ENABLED_REVERSE,
+ ATA_DITER_ALL,
+ ATA_DITER_ALL_REVERSE,
+};
-#define __ata_port_for_each_link(link, ap) \
- for ((link) = __ata_port_next_link((ap), NULL, false); (link); \
- (link) = __ata_port_next_link((ap), (link), false))
+extern struct ata_link *ata_link_next(struct ata_link *link,
+ struct ata_port *ap,
+ enum ata_link_iter_mode mode);
-#define ata_port_for_each_link(link, ap) \
- for ((link) = __ata_port_next_link((ap), NULL, true); (link); \
- (link) = __ata_port_next_link((ap), (link), true))
+extern struct ata_device *ata_dev_next(struct ata_device *dev,
+ struct ata_link *link,
+ enum ata_dev_iter_mode mode);
-#define ata_link_for_each_dev(dev, link) \
- for ((dev) = (link)->device; \
- (dev) < (link)->device + ata_link_max_devices(link) || ((dev) = NULL); \
- (dev)++)
+/*
+ * Shortcut notation for iterations
+ *
+ * ata_for_each_link() iterates over each link of @ap according to
+ * @mode. @link points to the current link in the loop. @link is
+ * NULL after loop termination. ata_for_each_dev() works the same way
+ * except that it iterates over each device of @link.
+ *
+ * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be
+ * specified when using the following shorthand notations. Only the
+ * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be
+ * specified. This not only increases brevity but also makes it
+ * impossible to use ATA_LITER_* for device iteration or vice-versa.
+ */
+#define ata_for_each_link(link, ap, mode) \
+ for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \
+ (link) = ata_link_next((link), (ap), ATA_LITER_##mode))
-#define ata_link_for_each_dev_reverse(dev, link) \
- for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \
- (dev) >= (link)->device || ((dev) = NULL); (dev)--)
+#define ata_for_each_dev(dev, link, mode) \
+ for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \
+ (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
/**
* ata_ncq_enabled - Test whether NCQ is enabled
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 56ba3739465..fee9e59649c 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -4,8 +4,6 @@
#include <linux/compiler.h>
#include <asm/linkage.h>
-#define notrace __attribute__((no_instrument_function))
-
#ifdef __cplusplus
#define CPP_ASMLINKAGE extern "C"
#else
@@ -66,14 +64,6 @@
name:
#endif
-#define KPROBE_ENTRY(name) \
- .pushsection .kprobes.text, "ax"; \
- ENTRY(name)
-
-#define KPROBE_END(name) \
- END(name); \
- .popsection
-
#ifndef END
#define END(name) \
.size name, .-name
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
new file mode 100644
index 00000000000..93150ecf3ea
--- /dev/null
+++ b/include/linux/list_nulls.h
@@ -0,0 +1,94 @@
+#ifndef _LINUX_LIST_NULLS_H
+#define _LINUX_LIST_NULLS_H
+
+/*
+ * Special version of lists, where end of list is not a NULL pointer,
+ * but a 'nulls' marker, which can have many different values.
+ * (up to 2^31 different values guaranteed on all platforms)
+ *
+ * In the standard hlist, termination of a list is the NULL pointer.
+ * In this special 'nulls' variant, we use the fact that objects stored in
+ * a list are aligned on a word (4 or 8 bytes alignment).
+ * We therefore use the last significant bit of 'ptr' :
+ * Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1)
+ * Set to 0 : This is a pointer to some object (ptr)
+ */
+
+struct hlist_nulls_head {
+ struct hlist_nulls_node *first;
+};
+
+struct hlist_nulls_node {
+ struct hlist_nulls_node *next, **pprev;
+};
+#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
+ ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
+
+#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
+/**
+ * ptr_is_a_nulls - Test if a ptr is a nulls
+ * @ptr: ptr to be tested
+ *
+ */
+static inline int is_a_nulls(const struct hlist_nulls_node *ptr)
+{
+ return ((unsigned long)ptr & 1);
+}
+
+/**
+ * get_nulls_value - Get the 'nulls' value of the end of chain
+ * @ptr: end of chain
+ *
+ * Should be called only if is_a_nulls(ptr);
+ */
+static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
+{
+ return ((unsigned long)ptr) >> 1;
+}
+
+static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
+{
+ return is_a_nulls(h->first);
+}
+
+static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
+{
+ struct hlist_nulls_node *next = n->next;
+ struct hlist_nulls_node **pprev = n->pprev;
+ *pprev = next;
+ if (!is_a_nulls(next))
+ next->pprev = pprev;
+}
+
+/**
+ * hlist_nulls_for_each_entry - iterate over list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ */
+#define hlist_nulls_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ *
+ */
+#define hlist_nulls_for_each_entry_from(tpos, pos, member) \
+ for (; (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+#endif
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index e5872dc994c..fbc48f89852 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -41,6 +41,7 @@ struct nlmclnt_initdata {
size_t addrlen;
unsigned short protocol;
u32 nfs_version;
+ int noresvport;
};
/*
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index b56d5aa9b19..23da3fa69ef 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -49,6 +49,7 @@ struct nlm_host {
unsigned short h_proto; /* transport proto */
unsigned short h_reclaiming : 1,
h_server : 1, /* server side, not client side */
+ h_noresvport : 1,
h_inuse : 1;
wait_queue_head_t h_gracewait; /* wait while reclaiming */
struct rw_semaphore h_rwsem; /* Reboot recovery lock */
@@ -220,7 +221,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
const size_t salen,
const unsigned short protocol,
const u32 version,
- const char *hostname);
+ const char *hostname,
+ int noresvport);
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const char *hostname,
const size_t hostname_len);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 331e5f1c2d8..23bf02fb124 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -73,6 +73,8 @@ struct lock_class_key {
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
};
+#define LOCKSTAT_POINTS 4
+
/*
* The lock-class itself:
*/
@@ -119,7 +121,8 @@ struct lock_class {
int name_version;
#ifdef CONFIG_LOCK_STAT
- unsigned long contention_point[4];
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
#endif
};
@@ -144,6 +147,7 @@ enum bounce_type {
struct lock_class_stats {
unsigned long contention_point[4];
+ unsigned long contending_point[4];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
@@ -165,6 +169,7 @@ struct lockdep_map {
const char *name;
#ifdef CONFIG_LOCK_STAT
int cpu;
+ unsigned long ip;
#endif
};
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
-extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
- unsigned long ip);
+extern void lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip);
+
+static inline void lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ lock_set_class(lock, lock->name, lock->key, subclass, ip);
+}
# define INIT_LOCKDEP .lockdep_recursion = 0,
@@ -328,13 +340,15 @@ static inline void lockdep_on(void)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
+# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
-# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
+# define lockdep_init_map(lock, name, key, sub) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
# define lockdep_set_class_and_name(lock, key, name) \
- do { (void)(key); } while (0)
+ do { (void)(key); (void)(name); } while (0)
#define lockdep_set_class_and_subclass(lock, key, sub) \
do { (void)(key); } while (0)
#define lockdep_set_subclass(lock, sub) do { } while (0)
@@ -355,7 +369,7 @@ struct lock_class_key { };
#ifdef CONFIG_LOCK_STAT
extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
-extern void lock_acquired(struct lockdep_map *lock);
+extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
#define LOCK_CONTENDED(_lock, try, lock) \
do { \
@@ -363,20 +377,20 @@ do { \
lock_contended(&(_lock)->dep_map, _RET_IP_); \
lock(_lock); \
} \
- lock_acquired(&(_lock)->dep_map); \
+ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
} while (0)
#else /* CONFIG_LOCK_STAT */
#define lock_contended(lockdep_map, ip) do {} while (0)
-#define lock_acquired(lockdep_map) do {} while (0)
+#define lock_acquired(lockdep_map, ip) do {} while (0)
#define LOCK_CONTENDED(_lock, try, lock) \
lock(_lock)
#endif /* CONFIG_LOCK_STAT */
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
+#ifdef CONFIG_GENERIC_HARDIRQS
extern void early_init_irq_lock_class(void);
#else
static inline void early_init_irq_lock_class(void)
@@ -480,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define lock_map_release(l) do { } while (0)
#endif
+#ifdef CONFIG_PROVE_LOCKING
+# define might_lock(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+# define might_lock_read(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+#else
+# define might_lock(lock) do { } while (0)
+# define might_lock_read(lock) do { } while (0)
+#endif
+
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/map_to_7segment.h b/include/linux/map_to_7segment.h
new file mode 100644
index 00000000000..7df8432c440
--- /dev/null
+++ b/include/linux/map_to_7segment.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef MAP_TO_7SEGMENT_H
+#define MAP_TO_7SEGMENT_H
+
+/* This file provides translation primitives and tables for the conversion
+ * of (ASCII) characters to a 7-segments notation.
+ *
+ * The 7 segment's wikipedia notation below is used as standard.
+ * See: http://en.wikipedia.org/wiki/Seven_segment_display
+ *
+ * Notation: +-a-+
+ * f b
+ * +-g-+
+ * e c
+ * +-d-+
+ *
+ * Usage:
+ *
+ * Register a map variable, and fill it with a character set:
+ * static SEG7_DEFAULT_MAP(map_seg7);
+ *
+ *
+ * Then use for conversion:
+ * seg7 = map_to_seg7(&map_seg7, some_char);
+ * ...
+ *
+ * In device drivers it is recommended, if required, to make the char map
+ * accessible via the sysfs interface using the following scheme:
+ *
+ * static ssize_t show_map(struct device *dev, char *buf) {
+ * memcpy(buf, &map_seg7, sizeof(map_seg7));
+ * return sizeof(map_seg7);
+ * }
+ * static ssize_t store_map(struct device *dev, const char *buf, size_t cnt) {
+ * if(cnt != sizeof(map_seg7))
+ * return -EINVAL;
+ * memcpy(&map_seg7, buf, cnt);
+ * return cnt;
+ * }
+ * static DEVICE_ATTR(map_seg7, PERMS_RW, show_map, store_map);
+ *
+ * History:
+ * 2005-05-31 RFC linux-kernel@vger.kernel.org
+ */
+#include <linux/errno.h>
+
+
+#define BIT_SEG7_A 0
+#define BIT_SEG7_B 1
+#define BIT_SEG7_C 2
+#define BIT_SEG7_D 3
+#define BIT_SEG7_E 4
+#define BIT_SEG7_F 5
+#define BIT_SEG7_G 6
+#define BIT_SEG7_RESERVED 7
+
+struct seg7_conversion_map {
+ unsigned char table[128];
+};
+
+static inline int map_to_seg7(struct seg7_conversion_map *map, int c)
+{
+ return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL;
+}
+
+#define SEG7_CONVERSION_MAP(_name, _map) \
+ struct seg7_conversion_map _name = { .table = { _map } }
+
+/*
+ * It is recommended to use a facility that allows user space to redefine
+ * custom character sets for LCD devices. Please use a sysfs interface
+ * as described above.
+ */
+#define MAP_TO_SEG7_SYSFS_FILE "map_seg7"
+
+/*******************************************************************************
+ * ASCII conversion table
+ ******************************************************************************/
+
+#define _SEG7(l,a,b,c,d,e,f,g) \
+ ( a<<BIT_SEG7_A | b<<BIT_SEG7_B | c<<BIT_SEG7_C | d<<BIT_SEG7_D | \
+ e<<BIT_SEG7_E | f<<BIT_SEG7_F | g<<BIT_SEG7_G )
+
+#define _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+
+#define _MAP_33_47_ASCII_SEG7_SYMBOL \
+ _SEG7('!',0,0,0,0,1,1,0), _SEG7('"',0,1,0,0,0,1,0), _SEG7('#',0,1,1,0,1,1,0),\
+ _SEG7('$',1,0,1,1,0,1,1), _SEG7('%',0,0,1,0,0,1,0), _SEG7('&',1,0,1,1,1,1,1),\
+ _SEG7('\'',0,0,0,0,0,1,0),_SEG7('(',1,0,0,1,1,1,0), _SEG7(')',1,1,1,1,0,0,0),\
+ _SEG7('*',0,1,1,0,1,1,1), _SEG7('+',0,1,1,0,0,0,1), _SEG7(',',0,0,0,0,1,0,0),\
+ _SEG7('-',0,0,0,0,0,0,1), _SEG7('.',0,0,0,0,1,0,0), _SEG7('/',0,1,0,0,1,0,1),
+
+#define _MAP_48_57_ASCII_SEG7_NUMERIC \
+ _SEG7('0',1,1,1,1,1,1,0), _SEG7('1',0,1,1,0,0,0,0), _SEG7('2',1,1,0,1,1,0,1),\
+ _SEG7('3',1,1,1,1,0,0,1), _SEG7('4',0,1,1,0,0,1,1), _SEG7('5',1,0,1,1,0,1,1),\
+ _SEG7('6',1,0,1,1,1,1,1), _SEG7('7',1,1,1,0,0,0,0), _SEG7('8',1,1,1,1,1,1,1),\
+ _SEG7('9',1,1,1,1,0,1,1),
+
+#define _MAP_58_64_ASCII_SEG7_SYMBOL \
+ _SEG7(':',0,0,0,1,0,0,1), _SEG7(';',0,0,0,1,0,0,1), _SEG7('<',1,0,0,0,0,1,1),\
+ _SEG7('=',0,0,0,1,0,0,1), _SEG7('>',1,1,0,0,0,0,1), _SEG7('?',1,1,1,0,0,1,0),\
+ _SEG7('@',1,1,0,1,1,1,1),
+
+#define _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \
+ _SEG7('A',1,1,1,0,1,1,1), _SEG7('B',1,1,1,1,1,1,1), _SEG7('C',1,0,0,1,1,1,0),\
+ _SEG7('D',1,1,1,1,1,1,0), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\
+ _SEG7('G',1,1,1,1,0,1,1), _SEG7('H',0,1,1,0,1,1,1), _SEG7('I',0,1,1,0,0,0,0),\
+ _SEG7('J',0,1,1,1,0,0,0), _SEG7('K',0,1,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\
+ _SEG7('M',1,1,1,0,1,1,0), _SEG7('N',1,1,1,0,1,1,0), _SEG7('O',1,1,1,1,1,1,0),\
+ _SEG7('P',1,1,0,0,1,1,1), _SEG7('Q',1,1,1,1,1,1,0), _SEG7('R',1,1,1,0,1,1,1),\
+ _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('U',0,1,1,1,1,1,0),\
+ _SEG7('V',0,1,1,1,1,1,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\
+ _SEG7('Y',0,1,1,0,0,1,1), _SEG7('Z',1,1,0,1,1,0,1),
+
+#define _MAP_91_96_ASCII_SEG7_SYMBOL \
+ _SEG7('[',1,0,0,1,1,1,0), _SEG7('\\',0,0,1,0,0,1,1),_SEG7(']',1,1,1,1,0,0,0),\
+ _SEG7('^',1,1,0,0,0,1,0), _SEG7('_',0,0,0,1,0,0,0), _SEG7('`',0,1,0,0,0,0,0),
+
+#define _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
+ _SEG7('A',1,1,1,0,1,1,1), _SEG7('b',0,0,1,1,1,1,1), _SEG7('c',0,0,0,1,1,0,1),\
+ _SEG7('d',0,1,1,1,1,0,1), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\
+ _SEG7('G',1,1,1,1,0,1,1), _SEG7('h',0,0,1,0,1,1,1), _SEG7('i',0,0,1,0,0,0,0),\
+ _SEG7('j',0,0,1,1,0,0,0), _SEG7('k',0,0,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\
+ _SEG7('M',1,1,1,0,1,1,0), _SEG7('n',0,0,1,0,1,0,1), _SEG7('o',0,0,1,1,1,0,1),\
+ _SEG7('P',1,1,0,0,1,1,1), _SEG7('q',1,1,1,0,0,1,1), _SEG7('r',0,0,0,0,1,0,1),\
+ _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('u',0,0,1,1,1,0,0),\
+ _SEG7('v',0,0,1,1,1,0,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\
+ _SEG7('y',0,1,1,1,0,1,1), _SEG7('Z',1,1,0,1,1,0,1),
+
+#define _MAP_123_126_ASCII_SEG7_SYMBOL \
+ _SEG7('{',1,0,0,1,1,1,0), _SEG7('|',0,0,0,0,1,1,0), _SEG7('}',1,1,1,1,0,0,0),\
+ _SEG7('~',1,0,0,0,0,0,0),
+
+/* Maps */
+
+/* This set tries to map as close as possible to the visible characteristics
+ * of the ASCII symbol, lowercase and uppercase letters may differ in
+ * presentation on the display.
+ */
+#define MAP_ASCII7SEG_ALPHANUM \
+ _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \
+ _MAP_33_47_ASCII_SEG7_SYMBOL \
+ _MAP_48_57_ASCII_SEG7_NUMERIC \
+ _MAP_58_64_ASCII_SEG7_SYMBOL \
+ _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \
+ _MAP_91_96_ASCII_SEG7_SYMBOL \
+ _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
+ _MAP_123_126_ASCII_SEG7_SYMBOL
+
+/* This set tries to map as close as possible to the symbolic characteristics
+ * of the ASCII character for maximum discrimination.
+ * For now this means all alpha chars are in lower case representations.
+ * (This for example facilitates the use of hex numbers with uppercase input.)
+ */
+#define MAP_ASCII7SEG_ALPHANUM_LC \
+ _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \
+ _MAP_33_47_ASCII_SEG7_SYMBOL \
+ _MAP_48_57_ASCII_SEG7_NUMERIC \
+ _MAP_58_64_ASCII_SEG7_SYMBOL \
+ _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
+ _MAP_91_96_ASCII_SEG7_SYMBOL \
+ _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
+ _MAP_123_126_ASCII_SEG7_SYMBOL
+
+#define SEG7_DEFAULT_MAP(_name) \
+ SEG7_CONVERSION_MAP(_name,MAP_ASCII7SEG_ALPHANUM)
+
+#endif /* MAP_TO_7SEGMENT_H */
+
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 1290653f924..b85e74ca782 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -12,6 +12,7 @@
* See the file COPYING for more details.
*/
+#include <stdarg.h>
#include <linux/types.h>
struct module;
@@ -48,10 +49,28 @@ struct marker {
void (*call)(const struct marker *mdata, void *call_private, ...);
struct marker_probe_closure single;
struct marker_probe_closure *multi;
+ const char *tp_name; /* Optional tracepoint name */
+ void *tp_cb; /* Optional tracepoint callback */
} __attribute__((aligned(8)));
#ifdef CONFIG_MARKERS
+#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \
+ static const char __mstrtab_##name[] \
+ __attribute__((section("__markers_strings"))) \
+ = #name "\0" format; \
+ static struct marker __mark_##name \
+ __attribute__((section("__markers"), aligned(8))) = \
+ { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
+ 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\
+ NULL, tp_name_str, tp_cb }
+
+#define DEFINE_MARKER(name, format) \
+ _DEFINE_MARKER(name, NULL, NULL, format)
+
+#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \
+ _DEFINE_MARKER(name, #tp_name, tp_cb, format)
+
/*
* Note : the empty asm volatile with read constraint is used here instead of a
* "used" attribute to fix a gcc 4.1.x bug.
@@ -65,14 +84,7 @@ struct marker {
*/
#define __trace_mark(generic, name, call_private, format, args...) \
do { \
- static const char __mstrtab_##name[] \
- __attribute__((section("__markers_strings"))) \
- = #name "\0" format; \
- static struct marker __mark_##name \
- __attribute__((section("__markers"), aligned(8))) = \
- { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
- 0, 0, marker_probe_cb, \
- { __mark_empty_function, NULL}, NULL }; \
+ DEFINE_MARKER(name, format); \
__mark_check_format(format, ## args); \
if (unlikely(__mark_##name.state)) { \
(*__mark_##name.call) \
@@ -80,14 +92,39 @@ struct marker {
} \
} while (0)
+#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
+ do { \
+ void __check_tp_type(void) \
+ { \
+ register_trace_##tp_name(tp_cb); \
+ } \
+ DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \
+ __mark_check_format(format, ## args); \
+ (*__mark_##name.call)(&__mark_##name, call_private, \
+ ## args); \
+ } while (0)
+
extern void marker_update_probe_range(struct marker *begin,
struct marker *end);
+
+#define GET_MARKER(name) (__mark_##name)
+
#else /* !CONFIG_MARKERS */
+#define DEFINE_MARKER(name, tp_name, tp_cb, format)
#define __trace_mark(generic, name, call_private, format, args...) \
__mark_check_format(format, ## args)
+#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
+ do { \
+ void __check_tp_type(void) \
+ { \
+ register_trace_##tp_name(tp_cb); \
+ } \
+ __mark_check_format(format, ## args); \
+ } while (0)
static inline void marker_update_probe_range(struct marker *begin,
struct marker *end)
{ }
+#define GET_MARKER(name)
#endif /* CONFIG_MARKERS */
/**
@@ -117,6 +154,20 @@ static inline void marker_update_probe_range(struct marker *begin,
__trace_mark(1, name, NULL, format, ## args)
/**
+ * trace_mark_tp - Marker in a tracepoint callback
+ * @name: marker name, not quoted.
+ * @tp_name: tracepoint name, not quoted.
+ * @tp_cb: tracepoint callback. Should have an associated global symbol so it
+ * is not optimized away by the compiler (should not be static).
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker in a tracepoint callback.
+ */
+#define trace_mark_tp(name, tp_name, tp_cb, format, args...) \
+ __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args)
+
+/**
* MARK_NOARGS - Format string for a marker with no argument.
*/
#define MARK_NOARGS " "
@@ -136,8 +187,6 @@ extern marker_probe_func __mark_empty_function;
extern void marker_probe_cb(const struct marker *mdata,
void *call_private, ...);
-extern void marker_probe_cb_noarg(const struct marker *mdata,
- void *call_private, ...);
/*
* Connect a probe to a marker.
@@ -160,4 +209,13 @@ extern int marker_probe_unregister_private_data(marker_probe_func *probe,
extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
int num);
+/*
+ * marker_synchronize_unregister must be called between the last marker probe
+ * unregistration and the first one of
+ * - the end of module exit function
+ * - the free of any resource used by the probes
+ * to ensure the code and data are valid for any possibly running probes.
+ */
+#define marker_synchronize_unregister() synchronize_sched()
+
#endif
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
new file mode 100644
index 00000000000..e9d3fdfe41d
--- /dev/null
+++ b/include/linux/mdio-gpio.h
@@ -0,0 +1,25 @@
+/*
+ * MDIO-GPIO bus platform data structures
+ *
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __LINUX_MDIO_GPIO_H
+#define __LINUX_MDIO_GPIO_H
+
+#include <linux/mdio-bitbang.h>
+
+struct mdio_gpio_platform_data {
+ /* GPIO numbers for bus pins */
+ unsigned int mdc;
+ unsigned int mdio;
+
+ unsigned int phy_mask;
+ int irqs[PHY_MAX_ADDR];
+};
+
+#endif /* __LINUX_MDIO_GPIO_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index fdf3967e139..1fbe14d3952 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -27,16 +27,13 @@ struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-#define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)
-
-extern struct page_cgroup *page_get_page_cgroup(struct page *page);
extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
+extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern void mem_cgroup_move_lists(struct page *page, bool active);
extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -44,7 +41,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
unsigned long *scanned, int order,
int mode, struct zone *z,
struct mem_cgroup *mem_cont,
- int active);
+ int active, int file);
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
@@ -69,21 +66,11 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority);
-extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
- struct zone *zone, int priority);
-extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
- struct zone *zone, int priority);
-
-#else /* CONFIG_CGROUP_MEM_RES_CTLR */
-static inline void page_reset_bad_cgroup(struct page *page)
-{
-}
+extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
+ int priority, enum lru_list lru);
-static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
-{
- return NULL;
-}
+#else /* CONFIG_CGROUP_MEM_RES_CTLR */
static inline int mem_cgroup_charge(struct page *page,
struct mm_struct *mm, gfp_t gfp_mask)
{
@@ -159,14 +146,9 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
{
}
-static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
- struct zone *zone, int priority)
-{
- return 0;
-}
-
-static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
- struct zone *zone, int priority)
+static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
+ struct zone *zone, int priority,
+ enum lru_list lru)
{
return 0;
}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 2f5f8a5ef2a..36c82c9e6ea 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -91,7 +91,7 @@ extern int memory_notify(unsigned long val, void *v);
#ifdef CONFIG_MEMORY_HOTPLUG
#define hotplug_memory_notifier(fn, pri) { \
- static struct notifier_block fn##_mem_nb = \
+ static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri }; \
register_memory_notifier(&fn##_mem_nb); \
}
diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h
new file mode 100644
index 00000000000..cad314c1243
--- /dev/null
+++ b/include/linux/mfd/da903x.h
@@ -0,0 +1,201 @@
+#ifndef __LINUX_PMIC_DA903X_H
+#define __LINUX_PMIC_DA903X_H
+
+/* Unified sub device IDs for DA9030/DA9034 */
+enum {
+ DA9030_ID_LED_1,
+ DA9030_ID_LED_2,
+ DA9030_ID_LED_3,
+ DA9030_ID_LED_4,
+ DA9030_ID_LED_PC,
+ DA9030_ID_VIBRA,
+ DA9030_ID_WLED,
+ DA9030_ID_BUCK1,
+ DA9030_ID_BUCK2,
+ DA9030_ID_LDO1,
+ DA9030_ID_LDO2,
+ DA9030_ID_LDO3,
+ DA9030_ID_LDO4,
+ DA9030_ID_LDO5,
+ DA9030_ID_LDO6,
+ DA9030_ID_LDO7,
+ DA9030_ID_LDO8,
+ DA9030_ID_LDO9,
+ DA9030_ID_LDO10,
+ DA9030_ID_LDO11,
+ DA9030_ID_LDO12,
+ DA9030_ID_LDO13,
+ DA9030_ID_LDO14,
+ DA9030_ID_LDO15,
+ DA9030_ID_LDO16,
+ DA9030_ID_LDO17,
+ DA9030_ID_LDO18,
+ DA9030_ID_LDO19,
+ DA9030_ID_LDO_INT, /* LDO Internal */
+
+ DA9034_ID_LED_1,
+ DA9034_ID_LED_2,
+ DA9034_ID_VIBRA,
+ DA9034_ID_WLED,
+ DA9034_ID_TOUCH,
+
+ DA9034_ID_BUCK1,
+ DA9034_ID_BUCK2,
+ DA9034_ID_LDO1,
+ DA9034_ID_LDO2,
+ DA9034_ID_LDO3,
+ DA9034_ID_LDO4,
+ DA9034_ID_LDO5,
+ DA9034_ID_LDO6,
+ DA9034_ID_LDO7,
+ DA9034_ID_LDO8,
+ DA9034_ID_LDO9,
+ DA9034_ID_LDO10,
+ DA9034_ID_LDO11,
+ DA9034_ID_LDO12,
+ DA9034_ID_LDO13,
+ DA9034_ID_LDO14,
+ DA9034_ID_LDO15,
+};
+
+/*
+ * DA9030/DA9034 LEDs sub-devices uses generic "struct led_info"
+ * as the platform_data
+ */
+
+/* DA9030 flags for "struct led_info"
+ */
+#define DA9030_LED_RATE_ON (0 << 5)
+#define DA9030_LED_RATE_052S (1 << 5)
+#define DA9030_LED_DUTY_1_16 (0 << 3)
+#define DA9030_LED_DUTY_1_8 (1 << 3)
+#define DA9030_LED_DUTY_1_4 (2 << 3)
+#define DA9030_LED_DUTY_1_2 (3 << 3)
+
+#define DA9030_VIBRA_MODE_1P3V (0 << 1)
+#define DA9030_VIBRA_MODE_2P7V (1 << 1)
+#define DA9030_VIBRA_FREQ_1HZ (0 << 2)
+#define DA9030_VIBRA_FREQ_2HZ (1 << 2)
+#define DA9030_VIBRA_FREQ_4HZ (2 << 2)
+#define DA9030_VIBRA_FREQ_8HZ (3 << 2)
+#define DA9030_VIBRA_DUTY_ON (0 << 4)
+#define DA9030_VIBRA_DUTY_75P (1 << 4)
+#define DA9030_VIBRA_DUTY_50P (2 << 4)
+#define DA9030_VIBRA_DUTY_25P (3 << 4)
+
+/* DA9034 flags for "struct led_info" */
+#define DA9034_LED_RAMP (1 << 7)
+
+/* DA9034 touch screen platform data */
+struct da9034_touch_pdata {
+ int interval_ms; /* sampling interval while pen down */
+ int x_inverted;
+ int y_inverted;
+};
+
+struct da903x_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct da903x_platform_data {
+ int num_subdevs;
+ struct da903x_subdev_info *subdevs;
+};
+
+/* bit definitions for DA9030 events */
+#define DA9030_EVENT_ONKEY (1 << 0)
+#define DA9030_EVENT_PWREN (1 << 1)
+#define DA9030_EVENT_EXTON (1 << 2)
+#define DA9030_EVENT_CHDET (1 << 3)
+#define DA9030_EVENT_TBAT (1 << 4)
+#define DA9030_EVENT_VBATMON (1 << 5)
+#define DA9030_EVENT_VBATMON_TXON (1 << 6)
+#define DA9030_EVENT_CHIOVER (1 << 7)
+#define DA9030_EVENT_TCTO (1 << 8)
+#define DA9030_EVENT_CCTO (1 << 9)
+#define DA9030_EVENT_ADC_READY (1 << 10)
+#define DA9030_EVENT_VBUS_4P4 (1 << 11)
+#define DA9030_EVENT_VBUS_4P0 (1 << 12)
+#define DA9030_EVENT_SESS_VALID (1 << 13)
+#define DA9030_EVENT_SRP_DETECT (1 << 14)
+#define DA9030_EVENT_WATCHDOG (1 << 15)
+#define DA9030_EVENT_LDO15 (1 << 16)
+#define DA9030_EVENT_LDO16 (1 << 17)
+#define DA9030_EVENT_LDO17 (1 << 18)
+#define DA9030_EVENT_LDO18 (1 << 19)
+#define DA9030_EVENT_LDO19 (1 << 20)
+#define DA9030_EVENT_BUCK2 (1 << 21)
+
+/* bit definitions for DA9034 events */
+#define DA9034_EVENT_ONKEY (1 << 0)
+#define DA9034_EVENT_EXTON (1 << 2)
+#define DA9034_EVENT_CHDET (1 << 3)
+#define DA9034_EVENT_TBAT (1 << 4)
+#define DA9034_EVENT_VBATMON (1 << 5)
+#define DA9034_EVENT_REV_IOVER (1 << 6)
+#define DA9034_EVENT_CH_IOVER (1 << 7)
+#define DA9034_EVENT_CH_TCTO (1 << 8)
+#define DA9034_EVENT_CH_CCTO (1 << 9)
+#define DA9034_EVENT_USB_DEV (1 << 10)
+#define DA9034_EVENT_OTGCP_IOVER (1 << 11)
+#define DA9034_EVENT_VBUS_4P55 (1 << 12)
+#define DA9034_EVENT_VBUS_3P8 (1 << 13)
+#define DA9034_EVENT_SESS_1P8 (1 << 14)
+#define DA9034_EVENT_SRP_READY (1 << 15)
+#define DA9034_EVENT_ADC_MAN (1 << 16)
+#define DA9034_EVENT_ADC_AUTO4 (1 << 17)
+#define DA9034_EVENT_ADC_AUTO5 (1 << 18)
+#define DA9034_EVENT_ADC_AUTO6 (1 << 19)
+#define DA9034_EVENT_PEN_DOWN (1 << 20)
+#define DA9034_EVENT_TSI_READY (1 << 21)
+#define DA9034_EVENT_UART_TX (1 << 22)
+#define DA9034_EVENT_UART_RX (1 << 23)
+#define DA9034_EVENT_HEADSET (1 << 25)
+#define DA9034_EVENT_HOOKSWITCH (1 << 26)
+#define DA9034_EVENT_WATCHDOG (1 << 27)
+
+extern int da903x_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+extern int da903x_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+/* Status Query Interface */
+#define DA9030_STATUS_ONKEY (1 << 0)
+#define DA9030_STATUS_PWREN1 (1 << 1)
+#define DA9030_STATUS_EXTON (1 << 2)
+#define DA9030_STATUS_CHDET (1 << 3)
+#define DA9030_STATUS_TBAT (1 << 4)
+#define DA9030_STATUS_VBATMON (1 << 5)
+#define DA9030_STATUS_VBATMON_TXON (1 << 6)
+#define DA9030_STATUS_MCLKDET (1 << 7)
+
+#define DA9034_STATUS_ONKEY (1 << 0)
+#define DA9034_STATUS_EXTON (1 << 2)
+#define DA9034_STATUS_CHDET (1 << 3)
+#define DA9034_STATUS_TBAT (1 << 4)
+#define DA9034_STATUS_VBATMON (1 << 5)
+#define DA9034_STATUS_PEN_DOWN (1 << 6)
+#define DA9034_STATUS_MCLKDET (1 << 7)
+#define DA9034_STATUS_USB_DEV (1 << 8)
+#define DA9034_STATUS_HEADSET (1 << 9)
+#define DA9034_STATUS_HOOKSWITCH (1 << 10)
+#define DA9034_STATUS_REMCON (1 << 11)
+#define DA9034_STATUS_VBUS_VALID_4P55 (1 << 12)
+#define DA9034_STATUS_VBUS_VALID_3P8 (1 << 13)
+#define DA9034_STATUS_SESS_VALID_1P8 (1 << 14)
+#define DA9034_STATUS_SRP_READY (1 << 15)
+
+extern int da903x_query_status(struct device *dev, unsigned int status);
+
+
+/* NOTE: the two functions below are not intended for use outside
+ * of the DA9034 sub-device drivers
+ */
+extern int da903x_write(struct device *dev, int reg, uint8_t val);
+extern int da903x_read(struct device *dev, int reg, uint8_t *val);
+extern int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask);
+extern int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+#endif /* __LINUX_PMIC_DA903X_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
index e83c7f2036f..b4629818aea 100644
--- a/include/linux/mfd/t7l66xb.h
+++ b/include/linux/mfd/t7l66xb.h
@@ -15,8 +15,6 @@
#include <linux/mfd/tmio.h>
struct t7l66xb_platform_data {
- int (*enable_clk32k)(struct platform_device *dev);
- void (*disable_clk32k)(struct platform_device *dev);
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
index fa06e0610b8..b4888209494 100644
--- a/include/linux/mfd/tc6387xb.h
+++ b/include/linux/mfd/tc6387xb.h
@@ -11,9 +11,6 @@
#define MFD_TC6387XB_H
struct tc6387xb_platform_data {
- int (*enable_clk32k)(struct platform_device *dev);
- void (*disable_clk32k)(struct platform_device *dev);
-
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
int (*suspend)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
index fec7b3f7a81..626e448205c 100644
--- a/include/linux/mfd/tc6393xb.h
+++ b/include/linux/mfd/tc6393xb.h
@@ -17,12 +17,12 @@
#ifndef MFD_TC6393XB_H
#define MFD_TC6393XB_H
+#include <linux/fb.h>
+
/* Also one should provide the CK3P6MI clock */
struct tc6393xb_platform_data {
u16 scr_pll2cr; /* PLL2 Control */
u16 scr_gper; /* GP Enable */
- u32 scr_gpo_doecr; /* GPO Data OE Control */
- u32 scr_gpo_dsr; /* GPO Data Set */
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
@@ -31,15 +31,28 @@ struct tc6393xb_platform_data {
int irq_base; /* base for subdevice irqs */
int gpio_base;
+ int (*setup)(struct platform_device *dev);
+ void (*teardown)(struct platform_device *dev);
struct tmio_nand_data *nand_data;
+ struct tmio_fb_data *fb_data;
+
+ unsigned resume_restore : 1; /* make special actions
+ to preserve the state
+ on suspend/resume */
};
+extern int tc6393xb_lcd_mode(struct platform_device *fb,
+ const struct fb_videomode *mode);
+extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
+
/*
* Relative to irq_base
*/
#define IRQ_TC6393_NAND 0
#define IRQ_TC6393_MMC 1
+#define IRQ_TC6393_OHCI 2
+#define IRQ_TC6393_FB 4
#define TC6393XB_NR_IRQS 8
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index ec612e66391..516d955ab8a 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -1,6 +1,8 @@
#ifndef MFD_TMIO_H
#define MFD_TMIO_H
+#include <linux/fb.h>
+
#define tmio_ioread8(addr) readb(addr)
#define tmio_ioread16(addr) readw(addr)
#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
@@ -25,4 +27,21 @@ struct tmio_nand_data {
unsigned int num_partitions;
};
+#define FBIO_TMIO_ACC_WRITE 0x7C639300
+#define FBIO_TMIO_ACC_SYNC 0x7C639301
+
+struct tmio_fb_data {
+ int (*lcd_set_power)(struct platform_device *fb_dev,
+ bool on);
+ int (*lcd_mode)(struct platform_device *fb_dev,
+ const struct fb_videomode *mode);
+ int num_modes;
+ struct fb_videomode *modes;
+
+ /* in mm: size of screen */
+ int height;
+ int width;
+};
+
+
#endif
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index 217bb22ebb8..af95a1d2f3a 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -1,7 +1,7 @@
/*
* audio.h -- Audio Driver for Wolfson WM8350 PMIC
*
- * Copyright 2007 Wolfson Microelectronics PLC
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -70,9 +70,9 @@
#define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */
#define WM8350_VMID_OFF 0
-#define WM8350_VMID_500K 1
-#define WM8350_VMID_100K 2
-#define WM8350_VMID_10K 3
+#define WM8350_VMID_300K 1
+#define WM8350_VMID_50K 2
+#define WM8350_VMID_5K 3
/*
* R40 (0x28) - Clock Control 1
@@ -591,8 +591,38 @@
#define WM8350_IRQ_CODEC_MICSCD 41
#define WM8350_IRQ_CODEC_MICD 42
+/*
+ * WM8350 Platform data.
+ *
+ * This must be initialised per platform for best audio performance.
+ * Please see WM8350 datasheet for information.
+ */
+struct wm8350_audio_platform_data {
+ int vmid_discharge_msecs; /* VMID --> OFF discharge time */
+ int drain_msecs; /* OFF drain time */
+ int cap_discharge_msecs; /* Cap ON (from OFF) discharge time */
+ int vmid_charge_msecs; /* vmid power up time */
+ u32 vmid_s_curve:2; /* vmid enable s curve speed */
+ u32 dis_out4:2; /* out4 discharge speed */
+ u32 dis_out3:2; /* out3 discharge speed */
+ u32 dis_out2:2; /* out2 discharge speed */
+ u32 dis_out1:2; /* out1 discharge speed */
+ u32 vroi_out4:1; /* out4 tie off */
+ u32 vroi_out3:1; /* out3 tie off */
+ u32 vroi_out2:1; /* out2 tie off */
+ u32 vroi_out1:1; /* out1 tie off */
+ u32 vroi_enable:1; /* enable tie off */
+ u32 codec_current_on:2; /* current level ON */
+ u32 codec_current_standby:2; /* current level STANDBY */
+ u32 codec_current_charge:2; /* codec current @ vmid charge */
+};
+
+struct snd_soc_codec;
+
struct wm8350_codec {
struct platform_device *pdev;
+ struct snd_soc_codec *codec;
+ struct wm8350_audio_platform_data *platform_data;
};
#endif
diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h
index dfda69e9f44..24add2bef6c 100644
--- a/include/linux/mfd/wm8350/rtc.h
+++ b/include/linux/mfd/wm8350/rtc.h
@@ -261,6 +261,8 @@
struct wm8350_rtc {
struct platform_device *pdev;
+ struct rtc_device *rtc;
+ int alarm_enabled; /* used over suspend/resume */
};
#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 03aea612d28..3f34005068d 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,7 +7,6 @@
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
#ifdef CONFIG_MIGRATION
-extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *);
@@ -21,8 +20,6 @@ extern int migrate_vmas(struct mm_struct *mm,
const nodemask_t *from, const nodemask_t *to,
unsigned long flags);
#else
-static inline int isolate_lru_page(struct page *p, struct list_head *list)
- { return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private) { return -ENOSYS; }
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 151b7e0182c..ad748588faf 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -135,6 +135,10 @@
#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */
+/* Flow control flags */
+#define FLOW_CTRL_TX 0x01
+#define FLOW_CTRL_RX 0x02
+
/* This structure is used in all SIOCxMIIxxx ioctl calls */
struct mii_ioctl_data {
__u16 phy_id;
@@ -235,5 +239,34 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
return 0;
}
+/**
+ * mii_resolve_flowctrl_fdx
+ * @lcladv: value of MII ADVERTISE register
+ * @rmtadv: value of MII LPA register
+ *
+ * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3
+ */
+static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
+{
+ u8 cap = 0;
+
+ if (lcladv & ADVERTISE_PAUSE_CAP) {
+ if (lcladv & ADVERTISE_PAUSE_ASYM) {
+ if (rmtadv & LPA_PAUSE_CAP)
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ else if (rmtadv & LPA_PAUSE_ASYM)
+ cap = FLOW_CTRL_RX;
+ } else {
+ if (rmtadv & LPA_PAUSE_CAP)
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ }
+ } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
+ if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
+ cap = FLOW_CTRL_TX;
+ }
+
+ return cap;
+}
+
#endif /* __KERNEL__ */
#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 77323a72dd3..cf9c679ab38 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -132,6 +132,15 @@ enum {
MLX4_MAILBOX_SIZE = 4096
};
+enum {
+ /* set port opcode modifiers */
+ MLX4_SET_PORT_GENERAL = 0x0,
+ MLX4_SET_PORT_RQP_CALC = 0x1,
+ MLX4_SET_PORT_MAC_TABLE = 0x2,
+ MLX4_SET_PORT_VLAN_TABLE = 0x3,
+ MLX4_SET_PORT_PRIO_MAP = 0x4,
+};
+
struct mlx4_dev;
struct mlx4_cmd_mailbox {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b2f94446831..8f659cc2996 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -60,6 +60,7 @@ enum {
MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7,
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
+ MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
MLX4_DEV_CAP_FLAG_APM = 1 << 17,
MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
@@ -145,6 +146,29 @@ enum {
MLX4_MTT_FLAG_PRESENT = 1
};
+enum mlx4_qp_region {
+ MLX4_QP_REGION_FW = 0,
+ MLX4_QP_REGION_ETH_ADDR,
+ MLX4_QP_REGION_FC_ADDR,
+ MLX4_QP_REGION_FC_EXCH,
+ MLX4_NUM_QP_REGION
+};
+
+enum mlx4_port_type {
+ MLX4_PORT_TYPE_IB = 1 << 0,
+ MLX4_PORT_TYPE_ETH = 1 << 1,
+};
+
+enum mlx4_special_vlan_idx {
+ MLX4_NO_VLAN_IDX = 0,
+ MLX4_VLAN_MISS_IDX,
+ MLX4_VLAN_REGULAR
+};
+
+enum {
+ MLX4_NUM_FEXCH = 64 * 1024,
+};
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
@@ -154,7 +178,10 @@ struct mlx4_caps {
u64 fw_ver;
int num_ports;
int vl_cap[MLX4_MAX_PORTS + 1];
- int mtu_cap[MLX4_MAX_PORTS + 1];
+ int ib_mtu_cap[MLX4_MAX_PORTS + 1];
+ __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1];
+ u64 def_mac[MLX4_MAX_PORTS + 1];
+ int eth_mtu_cap[MLX4_MAX_PORTS + 1];
int gid_table_len[MLX4_MAX_PORTS + 1];
int pkey_table_len[MLX4_MAX_PORTS + 1];
int local_ca_ack_delay;
@@ -169,7 +196,6 @@ struct mlx4_caps {
int max_rq_desc_sz;
int max_qp_init_rdma;
int max_qp_dest_rdma;
- int reserved_qps;
int sqp_start;
int num_srqs;
int max_srq_wqes;
@@ -180,6 +206,7 @@ struct mlx4_caps {
int reserved_cqs;
int num_eqs;
int reserved_eqs;
+ int num_comp_vectors;
int num_mpts;
int num_mtt_segs;
int fmr_reserved_mtts;
@@ -201,6 +228,15 @@ struct mlx4_caps {
u16 stat_rate_support;
u8 port_width_cap[MLX4_MAX_PORTS + 1];
int max_gso_sz;
+ int reserved_qps_cnt[MLX4_NUM_QP_REGION];
+ int reserved_qps;
+ int reserved_qps_base[MLX4_NUM_QP_REGION];
+ int log_num_macs;
+ int log_num_vlans;
+ int log_num_prios;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ u8 supported_type[MLX4_MAX_PORTS + 1];
+ u32 port_mask;
};
struct mlx4_buf_list {
@@ -293,6 +329,7 @@ struct mlx4_cq {
int arm_sn;
int cqn;
+ unsigned vector;
atomic_t refcount;
struct completion free;
@@ -355,6 +392,11 @@ struct mlx4_init_port_param {
u64 si_guid;
};
+#define mlx4_foreach_port(port, dev, type) \
+ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
+ if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
+ ~(dev)->caps.port_mask) & 1 << ((port) - 1))
+
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -397,10 +439,13 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- int collapsed);
+ unsigned vector, int collapsed);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp);
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
@@ -416,6 +461,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
+
int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
int npages, u64 iova, u32 *lkey, u32 *rkey);
int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c61ba10768e..aaa8b843be2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -132,6 +132,11 @@ extern unsigned int kobjsize(const void *objp);
#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
/*
+ * special vmas that are non-mergable, non-mlock()able
+ */
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+
+/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
@@ -140,6 +145,23 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
+/*
+ * This interface is used by x86 PAT code to identify a pfn mapping that is
+ * linear over entire vma. This is to optimize PAT code that deals with
+ * marking the physical region with a particular prot. This is not for generic
+ * mm use. Note also that this check will not work if the pfn mapping is
+ * linear for a vma starting at physical address 0. In which case PAT code
+ * falls back to slow path of reserving physical range page by page.
+ */
+static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
+{
+ return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
+}
+
+static inline int is_pfn_mapping(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_PFNMAP);
+}
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -700,10 +722,10 @@ static inline int page_mapped(struct page *page)
extern void show_free_areas(void);
#ifdef CONFIG_SHMEM
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
#else
static inline int shmem_lock(struct file *file, int lock,
- struct user_struct *user)
+ struct user_struct *user)
{
return 0;
}
@@ -776,6 +798,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_phys(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
@@ -1281,5 +1305,7 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);
+extern void *alloc_locked_buffer(size_t size);
+extern void free_locked_buffer(void *buffer, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 895bc4e9303..c948350c378 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,40 +1,100 @@
-static inline void
-add_page_to_active_list(struct zone *zone, struct page *page)
-{
- list_add(&page->lru, &zone->active_list);
- __inc_zone_state(zone, NR_ACTIVE);
-}
+#ifndef LINUX_MM_INLINE_H
+#define LINUX_MM_INLINE_H
-static inline void
-add_page_to_inactive_list(struct zone *zone, struct page *page)
+/**
+ * page_is_file_cache - should the page be on a file LRU or anon LRU?
+ * @page: the page to test
+ *
+ * Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
+ * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
+ * Used by functions that manipulate the LRU lists, to sort a page
+ * onto the right LRU list.
+ *
+ * We would like to get this info without a page flag, but the state
+ * needs to survive until the page is last deleted from the LRU, which
+ * could be as far down as __page_cache_release.
+ */
+static inline int page_is_file_cache(struct page *page)
{
- list_add(&page->lru, &zone->inactive_list);
- __inc_zone_state(zone, NR_INACTIVE);
+ if (PageSwapBacked(page))
+ return 0;
+
+ /* The page is page cache backed by a normal filesystem. */
+ return LRU_FILE;
}
static inline void
-del_page_from_active_list(struct zone *zone, struct page *page)
+add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
- list_del(&page->lru);
- __dec_zone_state(zone, NR_ACTIVE);
+ list_add(&page->lru, &zone->lru[l].list);
+ __inc_zone_state(zone, NR_LRU_BASE + l);
}
static inline void
-del_page_from_inactive_list(struct zone *zone, struct page *page)
+del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
list_del(&page->lru);
- __dec_zone_state(zone, NR_INACTIVE);
+ __dec_zone_state(zone, NR_LRU_BASE + l);
}
static inline void
del_page_from_lru(struct zone *zone, struct page *page)
{
+ enum lru_list l = LRU_BASE;
+
list_del(&page->lru);
- if (PageActive(page)) {
- __ClearPageActive(page);
- __dec_zone_state(zone, NR_ACTIVE);
+ if (PageUnevictable(page)) {
+ __ClearPageUnevictable(page);
+ l = LRU_UNEVICTABLE;
} else {
- __dec_zone_state(zone, NR_INACTIVE);
+ if (PageActive(page)) {
+ __ClearPageActive(page);
+ l += LRU_ACTIVE;
+ }
+ l += page_is_file_cache(page);
+ }
+ __dec_zone_state(zone, NR_LRU_BASE + l);
+}
+
+/**
+ * page_lru - which LRU list should a page be on?
+ * @page: the page to test
+ *
+ * Returns the LRU list a page should be on, as an index
+ * into the array of LRU lists.
+ */
+static inline enum lru_list page_lru(struct page *page)
+{
+ enum lru_list lru = LRU_BASE;
+
+ if (PageUnevictable(page))
+ lru = LRU_UNEVICTABLE;
+ else {
+ if (PageActive(page))
+ lru += LRU_ACTIVE;
+ lru += page_is_file_cache(page);
}
+
+ return lru;
}
+/**
+ * inactive_anon_is_low - check if anonymous pages need to be deactivated
+ * @zone: zone to check
+ *
+ * Returns true if the zone does not have enough inactive anon pages,
+ * meaning some active anon pages need to be deactivated.
+ */
+static inline int inactive_anon_is_low(struct zone *zone)
+{
+ unsigned long active, inactive;
+
+ active = zone_page_state(zone, NR_ACTIVE_ANON);
+ inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+
+ if (inactive * zone->inactive_ratio < active)
+ return 1;
+
+ return 0;
+}
+#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9d49fa36bbe..9cfc9b627fd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -94,9 +94,6 @@ struct page {
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
- unsigned long page_cgroup;
-#endif
};
/*
@@ -235,8 +232,9 @@ struct mm_struct {
struct core_state *core_state; /* coredumping support */
/* aio bits */
- rwlock_t ioctx_list_lock; /* aio lock */
- struct kioctx *ioctx_list;
+ spinlock_t ioctx_lock;
+ struct hlist_head ioctx_list;
+
#ifdef CONFIG_MM_OWNER
/*
* "owner" points to a task that is regarded as the canonical
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index ee6e822d599..403aa505f27 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -130,7 +130,7 @@ struct mmc_card {
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
#define mmc_card_name(c) ((c)->cid.prod_name)
-#define mmc_card_id(c) ((c)->dev.bus_id)
+#define mmc_card_id(c) (dev_name(&(c)->dev))
#define mmc_list_to_card(l) container_of(l, struct mmc_card, node)
#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bde891f6459..f842f234e44 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -176,7 +176,7 @@ static inline void *mmc_priv(struct mmc_host *host)
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
-#define mmc_hostname(x) ((x)->class_dev.bus_id)
+#define mmc_hostname(x) (dev_name(&(x)->class_dev))
extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 07bee4a0d45..451bdfc8583 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -63,7 +63,7 @@ struct sdio_func {
#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT)
-#define sdio_func_id(f) ((f)->dev.bus_id)
+#define sdio_func_id(f) (dev_name(&(f)->dev))
#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev)
#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d)
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 61d19e1b7a0..139d7c88d9c 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -34,11 +34,15 @@ extern void unregister_kmmio_probe(struct kmmio_probe *p);
/* Called from page fault handler. */
extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
-/* Called from ioremap.c */
#ifdef CONFIG_MMIOTRACE
+/* Called from ioremap.c */
extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
void __iomem *addr);
extern void mmiotrace_iounmap(volatile void __iomem *addr);
+
+/* For anyone to insert markers. Remember trailing newline. */
+extern int mmiotrace_printk(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
#else
static inline void mmiotrace_ioremap(resource_size_t offset,
unsigned long size, void __iomem *addr)
@@ -48,15 +52,22 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
static inline void mmiotrace_iounmap(volatile void __iomem *addr)
{
}
-#endif /* CONFIG_MMIOTRACE_HOOKS */
+
+static inline int mmiotrace_printk(const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 0)));
+
+static inline int mmiotrace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+#endif /* CONFIG_MMIOTRACE */
enum mm_io_opcode {
MMIO_READ = 0x1, /* struct mmiotrace_rw */
MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
MMIO_PROBE = 0x3, /* struct mmiotrace_map */
MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
- MMIO_MARKER = 0x5, /* raw char data */
- MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */
+ MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
};
struct mmiotrace_rw {
@@ -81,5 +92,6 @@ extern void enable_mmiotrace(void);
extern void disable_mmiotrace(void);
extern void mmio_trace_rw(struct mmiotrace_rw *rw);
extern void mmio_trace_mapping(struct mmiotrace_map *map);
+extern int mmio_trace_printk(const char *fmt, va_list args);
#endif /* MMIOTRACE_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 428328a05fa..35a7b5e1946 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -81,21 +81,31 @@ struct zone_padding {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
- NR_INACTIVE,
- NR_ACTIVE,
+ NR_LRU_BASE,
+ NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
+ NR_ACTIVE_ANON, /* " " " " " */
+ NR_INACTIVE_FILE, /* " " " " " */
+ NR_ACTIVE_FILE, /* " " " " " */
+#ifdef CONFIG_UNEVICTABLE_LRU
+ NR_UNEVICTABLE, /* " " " " " */
+ NR_MLOCK, /* mlock()ed pages found and moved off LRU */
+#else
+ NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
+ NR_MLOCK = NR_ACTIVE_FILE,
+#endif
NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
- /* Second 128 byte cacheline */
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_PAGETABLE, /* used for pagetables */
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_BOUNCE,
NR_VMSCAN_WRITE,
+ /* Second 128 byte cacheline */
NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
#ifdef CONFIG_NUMA
NUMA_HIT, /* allocated in intended node */
@@ -107,6 +117,55 @@ enum zone_stat_item {
#endif
NR_VM_ZONE_STAT_ITEMS };
+/*
+ * We do arithmetic on the LRU lists in various places in the code,
+ * so it is important to keep the active lists LRU_ACTIVE higher in
+ * the array than the corresponding inactive lists, and to keep
+ * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
+ *
+ * This has to be kept in sync with the statistics in zone_stat_item
+ * above and the descriptions in vmstat_text in mm/vmstat.c
+ */
+#define LRU_BASE 0
+#define LRU_ACTIVE 1
+#define LRU_FILE 2
+
+enum lru_list {
+ LRU_INACTIVE_ANON = LRU_BASE,
+ LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
+ LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
+ LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
+#ifdef CONFIG_UNEVICTABLE_LRU
+ LRU_UNEVICTABLE,
+#else
+ LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
+#endif
+ NR_LRU_LISTS
+};
+
+#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
+
+#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
+
+static inline int is_file_lru(enum lru_list l)
+{
+ return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
+}
+
+static inline int is_active_lru(enum lru_list l)
+{
+ return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
+}
+
+static inline int is_unevictable_lru(enum lru_list l)
+{
+#ifdef CONFIG_UNEVICTABLE_LRU
+ return (l == LRU_UNEVICTABLE);
+#else
+ return 0;
+#endif
+}
+
struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
@@ -251,10 +310,22 @@ struct zone {
/* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
- struct list_head active_list;
- struct list_head inactive_list;
- unsigned long nr_scan_active;
- unsigned long nr_scan_inactive;
+ struct {
+ struct list_head list;
+ unsigned long nr_scan;
+ } lru[NR_LRU_LISTS];
+
+ /*
+ * The pageout code in vmscan.c keeps track of how many of the
+ * mem/swap backed and file backed pages are refeferenced.
+ * The higher the rotated/scanned ratio, the more valuable
+ * that cache is.
+ *
+ * The anon LRU stats live in [0], file LRU stats in [1]
+ */
+ unsigned long recent_rotated[2];
+ unsigned long recent_scanned[2];
+
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
@@ -276,6 +347,12 @@ struct zone {
*/
int prev_priority;
+ /*
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+ * this zone's LRU. Maintained by the pageout code.
+ */
+ unsigned int inactive_ratio;
+
ZONE_PADDING(_pad2_)
/* Rarely used or read-mostly fields */
@@ -524,8 +601,11 @@ typedef struct pglist_data {
struct zone node_zones[MAX_NR_ZONES];
struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones;
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
+#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ struct page_cgroup *node_page_cgroup;
+#endif
#endif
struct bootmem_data *bdata;
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -854,6 +934,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
#endif
struct page;
+struct page_cgroup;
struct mem_section {
/*
* This is, logically, a pointer to an array of struct
@@ -871,6 +952,14 @@ struct mem_section {
/* See declaration of similar field in struct zone */
unsigned long *pageblock_flags;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ /*
+ * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
+ * section. (see memcontrol.h/page_cgroup.h about this.)
+ */
+ struct page_cgroup *page_cgroup;
+ unsigned long pad;
+#endif
};
#ifdef CONFIG_SPARSEMEM_EXTREME
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index d6a3f47e95c..97b91d1abb4 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -135,6 +135,7 @@ struct usb_device_id {
struct hid_device_id {
__u16 bus;
+ __u16 pad1;
__u32 vendor;
__u32 product;
kernel_ulong_t driver_data
@@ -284,7 +285,7 @@ struct pcmcia_device_id {
/* Input */
#define INPUT_DEVICE_ID_EV_MAX 0x1f
#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71
-#define INPUT_DEVICE_ID_KEY_MAX 0x1ff
+#define INPUT_DEVICE_ID_KEY_MAX 0x2ff
#define INPUT_DEVICE_ID_REL_MAX 0x0f
#define INPUT_DEVICE_ID_ABS_MAX 0x3f
#define INPUT_DEVICE_ID_MSC_MAX 0x07
diff --git a/include/linux/module.h b/include/linux/module.h
index 68e09557c95..3bfed013350 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -16,6 +16,7 @@
#include <linux/kobject.h>
#include <linux/moduleparam.h>
#include <linux/marker.h>
+#include <linux/tracepoint.h>
#include <asm/local.h>
#include <asm/module.h>
@@ -28,7 +29,7 @@
#define MODULE_SYMBOL_PREFIX ""
#endif
-#define MODULE_NAME_LEN (64 - sizeof(unsigned long))
+#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
struct kernel_symbol
{
@@ -59,6 +60,7 @@ struct module_kobject
struct kobject kobj;
struct module *mod;
struct kobject *drivers_dir;
+ struct module_param_attrs *mp;
};
/* These are either module local, or the kernel's dummy ones. */
@@ -241,7 +243,6 @@ struct module
/* Sysfs stuff. */
struct module_kobject mkobj;
- struct module_param_attrs *param_attrs;
struct module_attribute *modinfo_attrs;
const char *version;
const char *srcversion;
@@ -276,7 +277,7 @@ struct module
/* Exception table */
unsigned int num_exentries;
- const struct exception_table_entry *extable;
+ struct exception_table_entry *extable;
/* Startup function. */
int (*init)(void);
@@ -331,6 +332,10 @@ struct module
struct marker *markers;
unsigned int num_markers;
#endif
+#ifdef CONFIG_TRACEPOINTS
+ struct tracepoint *tracepoints;
+ unsigned int num_tracepoints;
+#endif
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
@@ -345,7 +350,6 @@ struct module
/* Reference counts */
struct module_ref ref[NR_CPUS];
#endif
-
};
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
@@ -454,6 +458,9 @@ extern void print_modules(void);
extern void module_update_markers(void);
+extern void module_update_tracepoints(void);
+extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
+
#else /* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
@@ -558,6 +565,15 @@ static inline void module_update_markers(void)
{
}
+static inline void module_update_tracepoints(void)
+{
+}
+
+static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
+{
+ return 0;
+}
+
#endif /* CONFIG_MODULES */
struct device_driver;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index ec624381c84..e4af3399ef4 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -13,6 +13,9 @@
#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
#endif
+/* Chosen so that structs with an unsigned long line up. */
+#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
+
#ifdef MODULE
#define ___module_cat(a,b) __mod_ ## a ## b
#define __module_cat(a,b) ___module_cat(a,b)
@@ -79,7 +82,8 @@ struct kparam_array
#define __module_param_call(prefix, name, set, get, arg, perm) \
/* Default value instead of permissions? */ \
static int __param_perm_check_##name __attribute__((unused)) = \
- BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \
+ BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
+ + BUILD_BUG_ON_ZERO(sizeof(""prefix) > MAX_PARAM_PREFIX_LEN); \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used \
@@ -100,6 +104,25 @@ struct kparam_array
#define module_param(name, type, perm) \
module_param_named(name, name, type, perm)
+#ifndef MODULE
+/**
+ * core_param - define a historical core kernel parameter.
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @type: the type (for param_set_##type and param_get_##type)
+ * @perm: visibility in sysfs
+ *
+ * core_param is just like module_param(), but cannot be modular and
+ * doesn't add a prefix (such as "printk."). This is for compatibility
+ * with __setup(), and it makes sense as truly core parameters aren't
+ * tied to the particular file they're in.
+ */
+#define core_param(name, var, type, perm) \
+ param_check_##type(name, &(var)); \
+ __module_param_call("", name, param_set_##type, param_get_##type, \
+ &var, perm)
+#endif /* !MODULE */
+
/* Actually copy string: maxlen param is usually sizeof(string). */
#define module_param_string(name, string, len, perm) \
static const struct kparam_string __param_string_##name \
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 30a1d63b6fb..cab2a85e2ee 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -5,8 +5,6 @@
*
* Author: Marco van Wieringen <mvw@planets.elm.net>
*
- * Version: $Id: mount.h,v 2.0 1996/11/17 16:48:14 mvw Exp mvw $
- *
*/
#ifndef _LINUX_MOUNT_H
#define _LINUX_MOUNT_H
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 6f4c180179e..5375faca1f7 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -117,6 +117,7 @@ struct sioc_mif_req6
#include <linux/pim.h>
#include <linux/skbuff.h> /* for struct sk_buff_head */
+#include <net/net_namespace.h>
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
@@ -187,6 +188,9 @@ struct mif_device
struct mfc6_cache
{
struct mfc6_cache *next; /* Next entry on cache line */
+#ifdef CONFIG_NET_NS
+ struct net *mfc6_net;
+#endif
struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
struct in6_addr mf6c_origin; /* Source of packet */
mifi_t mf6c_parent; /* Source interface */
@@ -209,6 +213,18 @@ struct mfc6_cache
} mfc_un;
};
+static inline
+struct net *mfc6_net(const struct mfc6_cache *mfc)
+{
+ return read_pnet(&mfc->mfc6_net);
+}
+
+static inline
+void mfc6_net_set(struct mfc6_cache *mfc, struct net *net)
+{
+ write_pnet(&mfc->mfc6_net, hold_net(net));
+}
+
#define MFC_STATIC 1
#define MFC_NOTIFY 2
@@ -229,13 +245,17 @@ struct mfc6_cache
#ifdef __KERNEL__
struct rtmsg;
-extern int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait);
+extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
+ struct rtmsg *rtm, int nowait);
#ifdef CONFIG_IPV6_MROUTE
-extern struct sock *mroute6_socket;
+static inline struct sock *mroute6_socket(struct net *net)
+{
+ return net->ipv6.mroute6_sk;
+}
extern int ip6mr_sk_done(struct sock *sk);
#else
-#define mroute6_socket NULL
+static inline struct sock *mroute6_socket(struct net *net) { return NULL; }
static inline int ip6mr_sk_done(struct sock *sk) { return 0; }
#endif
#endif
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index ba63858056c..e0a9b207920 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -46,11 +46,6 @@
#define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */
#define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG)
-/* valid file mode bits */
-#define MSDOS_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO)
-/* Convert attribute bits and a mask to the UNIX mode. */
-#define MSDOS_MKMODE(a, m) (m & (a & ATTR_RO ? S_IRUGO|S_IXUGO : S_IRWXUGO))
-
#define MSDOS_NAME 11 /* maximum name length */
#define MSDOS_LONGNAME 256 /* maximum name length */
#define MSDOS_SLOTS 21 /* max # of slots for short and long names */
@@ -167,282 +162,10 @@ struct msdos_dir_slot {
};
#ifdef __KERNEL__
-
-#include <linux/buffer_head.h>
-#include <linux/string.h>
-#include <linux/nls.h>
-#include <linux/fs.h>
-#include <linux/mutex.h>
-
-/*
- * vfat shortname flags
- */
-#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */
-#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */
-#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */
-#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */
-#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */
-
-struct fat_mount_options {
- uid_t fs_uid;
- gid_t fs_gid;
- unsigned short fs_fmask;
- unsigned short fs_dmask;
- unsigned short codepage; /* Codepage for shortname conversions */
- char *iocharset; /* Charset used for filename input/display */
- unsigned short shortname; /* flags for shortname display/create rule */
- unsigned char name_check; /* r = relaxed, n = normal, s = strict */
- unsigned short allow_utime;/* permission for setting the [am]time */
- unsigned quiet:1, /* set = fake successful chmods and chowns */
- showexec:1, /* set = only set x bit for com/exe/bat */
- sys_immutable:1, /* set = system files are immutable */
- dotsOK:1, /* set = hidden and system files are named '.filename' */
- isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
- utf8:1, /* Use of UTF-8 character set (Default) */
- unicode_xlate:1, /* create escape sequences for unhandled Unicode */
- numtail:1, /* Does first alias have a numeric '~1' type tail? */
- flush:1, /* write things quickly */
- nocase:1, /* Does this need case conversion? 0=need case conversion*/
- usefree:1, /* Use free_clusters for FAT32 */
- tz_utc:1; /* Filesystem timestamps are in UTC */
-};
-
-#define FAT_HASH_BITS 8
-#define FAT_HASH_SIZE (1UL << FAT_HASH_BITS)
-#define FAT_HASH_MASK (FAT_HASH_SIZE-1)
-
-/*
- * MS-DOS file system in-core superblock data
- */
-struct msdos_sb_info {
- unsigned short sec_per_clus; /* sectors/cluster */
- unsigned short cluster_bits; /* log2(cluster_size) */
- unsigned int cluster_size; /* cluster size */
- unsigned char fats,fat_bits; /* number of FATs, FAT bits (12 or 16) */
- unsigned short fat_start;
- unsigned long fat_length; /* FAT start & length (sec.) */
- unsigned long dir_start;
- unsigned short dir_entries; /* root dir start & entries */
- unsigned long data_start; /* first data sector */
- unsigned long max_cluster; /* maximum cluster number */
- unsigned long root_cluster; /* first cluster of the root directory */
- unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
- struct mutex fat_lock;
- unsigned int prev_free; /* previously allocated cluster number */
- unsigned int free_clusters; /* -1 if undefined */
- unsigned int free_clus_valid; /* is free_clusters valid? */
- struct fat_mount_options options;
- struct nls_table *nls_disk; /* Codepage used on disk */
- struct nls_table *nls_io; /* Charset used for input and display */
- const void *dir_ops; /* Opaque; default directory operations */
- int dir_per_block; /* dir entries per block */
- int dir_per_block_bits; /* log2(dir_per_block) */
-
- int fatent_shift;
- struct fatent_operations *fatent_ops;
-
- spinlock_t inode_hash_lock;
- struct hlist_head inode_hashtable[FAT_HASH_SIZE];
-};
-
-#define FAT_CACHE_VALID 0 /* special case for valid cache */
-
-/*
- * MS-DOS file system inode data in memory
- */
-struct msdos_inode_info {
- spinlock_t cache_lru_lock;
- struct list_head cache_lru;
- int nr_caches;
- /* for avoiding the race between fat_free() and fat_get_cluster() */
- unsigned int cache_valid_id;
-
- loff_t mmu_private;
- int i_start; /* first cluster or 0 */
- int i_logstart; /* logical first cluster */
- int i_attrs; /* unused attribute bits */
- loff_t i_pos; /* on-disk position of directory entry or 0 */
- struct hlist_node i_fat_hash; /* hash by i_location */
- struct inode vfs_inode;
-};
-
-struct fat_slot_info {
- loff_t i_pos; /* on-disk position of directory entry */
- loff_t slot_off; /* offset for slot or de start */
- int nr_slots; /* number of slots + 1(de) in filename */
- struct msdos_dir_entry *de;
- struct buffer_head *bh;
-};
-
-static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-static inline struct msdos_inode_info *MSDOS_I(struct inode *inode)
-{
- return container_of(inode, struct msdos_inode_info, vfs_inode);
-}
-
-/* Return the FAT attribute byte for this inode */
-static inline u8 fat_attr(struct inode *inode)
-{
- return ((inode->i_mode & S_IWUGO) ? ATTR_NONE : ATTR_RO) |
- (S_ISDIR(inode->i_mode) ? ATTR_DIR : ATTR_NONE) |
- MSDOS_I(inode)->i_attrs;
-}
-
-static inline unsigned char fat_checksum(const __u8 *name)
-{
- unsigned char s = name[0];
- s = (s<<7) + (s>>1) + name[1]; s = (s<<7) + (s>>1) + name[2];
- s = (s<<7) + (s>>1) + name[3]; s = (s<<7) + (s>>1) + name[4];
- s = (s<<7) + (s>>1) + name[5]; s = (s<<7) + (s>>1) + name[6];
- s = (s<<7) + (s>>1) + name[7]; s = (s<<7) + (s>>1) + name[8];
- s = (s<<7) + (s>>1) + name[9]; s = (s<<7) + (s>>1) + name[10];
- return s;
-}
-
-static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus)
-{
- return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus
- + sbi->data_start;
-}
-
-static inline void fat16_towchar(wchar_t *dst, const __u8 *src, size_t len)
-{
-#ifdef __BIG_ENDIAN
- while (len--) {
- *dst++ = src[0] | (src[1] << 8);
- src += 2;
- }
-#else
- memcpy(dst, src, len * 2);
-#endif
-}
-
-static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len)
-{
-#ifdef __BIG_ENDIAN
- while (len--) {
- dst[0] = *src & 0x00FF;
- dst[1] = (*src & 0xFF00) >> 8;
- dst += 2;
- src++;
- }
-#else
- memcpy(dst, src, len * 2);
-#endif
-}
-
/* media of boot sector */
static inline int fat_valid_media(u8 media)
{
return 0xf8 <= media || media == 0xf0;
}
-
-/* fat/cache.c */
-extern void fat_cache_inval_inode(struct inode *inode);
-extern int fat_get_cluster(struct inode *inode, int cluster,
- int *fclus, int *dclus);
-extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
- unsigned long *mapped_blocks);
-
-/* fat/dir.c */
-extern const struct file_operations fat_dir_operations;
-extern int fat_search_long(struct inode *inode, const unsigned char *name,
- int name_len, struct fat_slot_info *sinfo);
-extern int fat_dir_empty(struct inode *dir);
-extern int fat_subdirs(struct inode *dir);
-extern int fat_scan(struct inode *dir, const unsigned char *name,
- struct fat_slot_info *sinfo);
-extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
- struct msdos_dir_entry **de, loff_t *i_pos);
-extern int fat_alloc_new_dir(struct inode *dir, struct timespec *ts);
-extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
- struct fat_slot_info *sinfo);
-extern int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo);
-
-/* fat/fatent.c */
-struct fat_entry {
- int entry;
- union {
- u8 *ent12_p[2];
- __le16 *ent16_p;
- __le32 *ent32_p;
- } u;
- int nr_bhs;
- struct buffer_head *bhs[2];
-};
-
-static inline void fatent_init(struct fat_entry *fatent)
-{
- fatent->nr_bhs = 0;
- fatent->entry = 0;
- fatent->u.ent32_p = NULL;
- fatent->bhs[0] = fatent->bhs[1] = NULL;
-}
-
-static inline void fatent_set_entry(struct fat_entry *fatent, int entry)
-{
- fatent->entry = entry;
- fatent->u.ent32_p = NULL;
-}
-
-static inline void fatent_brelse(struct fat_entry *fatent)
-{
- int i;
- fatent->u.ent32_p = NULL;
- for (i = 0; i < fatent->nr_bhs; i++)
- brelse(fatent->bhs[i]);
- fatent->nr_bhs = 0;
- fatent->bhs[0] = fatent->bhs[1] = NULL;
-}
-
-extern void fat_ent_access_init(struct super_block *sb);
-extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
- int entry);
-extern int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
- int new, int wait);
-extern int fat_alloc_clusters(struct inode *inode, int *cluster,
- int nr_cluster);
-extern int fat_free_clusters(struct inode *inode, int cluster);
-extern int fat_count_free_clusters(struct super_block *sb);
-
-/* fat/file.c */
-extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern const struct file_operations fat_file_operations;
-extern const struct inode_operations fat_file_inode_operations;
-extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
-extern void fat_truncate(struct inode *inode);
-extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat);
-
-/* fat/inode.c */
-extern void fat_attach(struct inode *inode, loff_t i_pos);
-extern void fat_detach(struct inode *inode);
-extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos);
-extern struct inode *fat_build_inode(struct super_block *sb,
- struct msdos_dir_entry *de, loff_t i_pos);
-extern int fat_sync_inode(struct inode *inode);
-extern int fat_fill_super(struct super_block *sb, void *data, int silent,
- const struct inode_operations *fs_dir_inode_ops, int isvfat);
-
-extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2);
-/* fat/misc.c */
-extern void fat_fs_panic(struct super_block *s, const char *fmt, ...);
-extern void fat_clusters_flush(struct super_block *sb);
-extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
-extern int date_dos2unix(unsigned short time, unsigned short date, int tz_utc);
-extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date,
- int tz_utc);
-extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
-
-int fat_cache_init(void);
-void fat_cache_destroy(void);
-
-#endif /* __KERNEL__ */
-
-#endif
+#endif /* !__KERNEL__ */
+#endif /* !_LINUX_MSDOS_FS_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8f293922720..d2b8a1e8ca1 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,8 +10,11 @@ struct msi_msg {
};
/* Helper functions */
+struct irq_desc;
extern void mask_msi_irq(unsigned int irq);
extern void unmask_msi_irq(unsigned int irq);
+extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
+extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d6fb115f5a0..00e2b575021 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -12,6 +12,7 @@
#include <linux/mtd/flashchip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi_endian.h>
+#include <linux/mtd/xip.h>
#ifdef CONFIG_MTD_CFI_I1
#define cfi_interleave(cfi) 1
@@ -281,9 +282,25 @@ struct cfi_private {
/*
* Returns the command address according to the given geometry.
*/
-static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
+static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi)
{
- return (cmd_ofs * type) * interleave;
+ unsigned bankwidth = map_bankwidth(map);
+ unsigned interleave = cfi_interleave(cfi);
+ unsigned type = cfi->device_type;
+ uint32_t addr;
+
+ addr = (cmd_ofs * type) * interleave;
+
+ /* Modify the unlock address if we are in compatiblity mode.
+ * For 16bit devices on 8 bit busses
+ * and 32bit devices on 16 bit busses
+ * set the low bit of the alternating bit sequence of the address.
+ */
+ if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
+ addr |= (type >> 1)*interleave;
+
+ return addr;
}
/*
@@ -429,8 +446,7 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t
int type, map_word *prev_val)
{
map_word val;
- uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
-
+ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
val = cfi_build_cmd(cmd, map, cfi);
if (prev_val)
@@ -483,6 +499,13 @@ static inline void cfi_udelay(int us)
}
}
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+ struct cfi_private *cfi);
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi);
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi);
+
struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
const char* name);
struct cfi_fixup {
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index 08dd131301c..d4f38c5fd44 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -73,6 +73,10 @@ struct flchip {
int buffer_write_time;
int erase_time;
+ int word_write_time_max;
+ int buffer_write_time_max;
+ int erase_time_max;
+
void *priv;
};
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 92263654855..eae26bb6430 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -25,8 +25,10 @@
#define MTD_ERASE_DONE 0x08
#define MTD_ERASE_FAILED 0x10
+#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff
+
/* If the erase fails, fail_addr might indicate exactly which block failed. If
- fail_addr = 0xffffffff, the failure was not at the device level or was not
+ fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not
specific to any particular block. */
struct erase_info {
struct mtd_info *mtd;
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
new file mode 100644
index 00000000000..51534e50f7f
--- /dev/null
+++ b/include/linux/mtd/nand-gpio.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_MTD_NAND_GPIO_H
+#define __LINUX_MTD_NAND_GPIO_H
+
+#include <linux/mtd/nand.h>
+
+struct gpio_nand_platdata {
+ int gpio_nce;
+ int gpio_nwp;
+ int gpio_cle;
+ int gpio_ale;
+ int gpio_rdy;
+ void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
+ struct mtd_partition *parts;
+ unsigned int num_parts;
+ unsigned int options;
+ int chip_delay;
+};
+
+#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 81774e5facf..733d3f3b4eb 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -248,6 +248,7 @@ struct nand_hw_control {
* @read_page_raw: function to read a raw page without ECC
* @write_page_raw: function to write a raw page without ECC
* @read_page: function to read a page according to the ecc generator requirements
+ * @read_subpage: function to read parts of the page covered by ECC.
* @write_page: function to write a page according to the ecc generator requirements
* @read_oob: function to read chip OOB data
* @write_oob: function to write chip OOB data
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index d1b310c92eb..0c6bbe28f38 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -152,6 +152,8 @@
#define ONENAND_SYS_CFG1_INT (1 << 6)
#define ONENAND_SYS_CFG1_IOBE (1 << 5)
#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4)
+#define ONENAND_SYS_CFG1_HF (1 << 2)
+#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1)
/*
* Controller Status Register F240h (R)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 5014f7a9f5d..c92b4d43960 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -73,7 +73,6 @@ struct device;
struct device_node;
int __devinit of_mtd_parse_partitions(struct device *dev,
- struct mtd_info *mtd,
struct device_node *node,
struct mtd_partition **pparts);
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
new file mode 100644
index 00000000000..e77c1cea404
--- /dev/null
+++ b/include/linux/mtd/sh_flctl.h
@@ -0,0 +1,125 @@
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright © 2008 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __SH_FLCTL_H__
+#define __SH_FLCTL_H__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+/* FLCTL registers */
+#define FLCMNCR(f) (f->reg + 0x0)
+#define FLCMDCR(f) (f->reg + 0x4)
+#define FLCMCDR(f) (f->reg + 0x8)
+#define FLADR(f) (f->reg + 0xC)
+#define FLADR2(f) (f->reg + 0x3C)
+#define FLDATAR(f) (f->reg + 0x10)
+#define FLDTCNTR(f) (f->reg + 0x14)
+#define FLINTDMACR(f) (f->reg + 0x18)
+#define FLBSYTMR(f) (f->reg + 0x1C)
+#define FLBSYCNT(f) (f->reg + 0x20)
+#define FLDTFIFO(f) (f->reg + 0x24)
+#define FLECFIFO(f) (f->reg + 0x28)
+#define FLTRCR(f) (f->reg + 0x2C)
+#define FL4ECCRESULT0(f) (f->reg + 0x80)
+#define FL4ECCRESULT1(f) (f->reg + 0x84)
+#define FL4ECCRESULT2(f) (f->reg + 0x88)
+#define FL4ECCRESULT3(f) (f->reg + 0x8C)
+#define FL4ECCCR(f) (f->reg + 0x90)
+#define FL4ECCCNT(f) (f->reg + 0x94)
+#define FLERRADR(f) (f->reg + 0x98)
+
+/* FLCMNCR control bits */
+#define ECCPOS2 (0x1 << 25)
+#define _4ECCCNTEN (0x1 << 24)
+#define _4ECCEN (0x1 << 23)
+#define _4ECCCORRECT (0x1 << 22)
+#define SNAND_E (0x1 << 18) /* SNAND (0=512 1=2048)*/
+#define QTSEL_E (0x1 << 17)
+#define ENDIAN (0x1 << 16) /* 1 = little endian */
+#define FCKSEL_E (0x1 << 15)
+#define ECCPOS_00 (0x00 << 12)
+#define ECCPOS_01 (0x01 << 12)
+#define ECCPOS_02 (0x02 << 12)
+#define ACM_SACCES_MODE (0x01 << 10)
+#define NANWF_E (0x1 << 9)
+#define SE_D (0x1 << 8) /* Spare area disable */
+#define CE1_ENABLE (0x1 << 4) /* Chip Enable 1 */
+#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */
+#define TYPESEL_SET (0x1 << 0)
+
+/* FLCMDCR control bits */
+#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */
+#define ADRMD_E (0x1 << 26) /* Sector address access */
+#define CDSRC_E (0x1 << 25) /* Data buffer selection */
+#define DOSR_E (0x1 << 24) /* Status read check */
+#define SELRW (0x1 << 21) /* 0:read 1:write */
+#define DOADR_E (0x1 << 20) /* Address stage execute */
+#define ADRCNT_1 (0x00 << 18) /* Address data bytes: 1byte */
+#define ADRCNT_2 (0x01 << 18) /* Address data bytes: 2byte */
+#define ADRCNT_3 (0x02 << 18) /* Address data bytes: 3byte */
+#define ADRCNT_4 (0x03 << 18) /* Address data bytes: 4byte */
+#define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */
+#define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */
+
+/* FLTRCR control bits */
+#define TRSTRT (0x1 << 0) /* translation start */
+#define TREND (0x1 << 1) /* translation end */
+
+/* FL4ECCCR control bits */
+#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */
+#define _4ECCEND (0x1 << 1) /* 4 symbols end */
+#define _4ECCEXST (0x1 << 0) /* 4 symbols exist */
+
+#define INIT_FL4ECCRESULT_VAL 0x03FF03FF
+#define LOOP_TIMEOUT_MAX 0x00010000
+
+#define mtd_to_flctl(mtd) container_of(mtd, struct sh_flctl, mtd)
+
+struct sh_flctl {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ void __iomem *reg;
+
+ uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
+ int read_bytes;
+ int index;
+ int seqin_column; /* column in SEQIN cmd */
+ int seqin_page_addr; /* page_addr in SEQIN cmd */
+ uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */
+ int erase1_page_addr; /* page_addr in ERASE1 cmd */
+ uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */
+ uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */
+
+ int hwecc_cant_correct[4];
+
+ unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */
+ unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */
+};
+
+struct sh_flctl_platform_data {
+ struct mtd_partition *parts;
+ int nr_parts;
+ unsigned long flcmncr_val;
+
+ unsigned has_hwecc:1;
+};
+
+#endif /* __SH_FLCTL_H__ */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bc6da10ceee..7a0e5c4f807 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
/*
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
+ *
+ * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 68f8c3203c8..99eb80306dc 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -51,8 +51,10 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
/*
* Intent data
*/
-#define LOOKUP_OPEN (0x0100)
-#define LOOKUP_CREATE (0x0200)
+#define LOOKUP_OPEN 0x0100
+#define LOOKUP_CREATE 0x0200
+#define LOOKUP_EXCL 0x0400
+#define LOOKUP_RENAME_TARGET 0x0800
extern int user_path_at(int, const char __user *, unsigned, struct path *);
@@ -61,6 +63,8 @@ extern int user_path_at(int, const char __user *, unsigned, struct path *);
#define user_path_dir(name, path) \
user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
+extern int kern_path(const char *, unsigned, struct path *);
+
extern int path_lookup(const char *, unsigned, struct nameidata *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct nameidata *);
diff --git a/include/linux/net.h b/include/linux/net.h
index 6dc14a24004..4515efae4c3 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -40,7 +40,7 @@
#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
-#define SYS_PACCEPT 18 /* sys_paccept(2) */
+#define SYS_ACCEPT4 18 /* sys_accept4(2) */
typedef enum {
SS_FREE = 0, /* not allocated */
@@ -100,7 +100,7 @@ enum sock_type {
* remaining bits are used as flags. */
#define SOCK_TYPE_MASK 0xf
-/* Flags for socket, socketpair, paccept */
+/* Flags for socket, socketpair, accept4 */
#define SOCK_CLOEXEC O_CLOEXEC
#ifndef SOCK_NONBLOCK
#define SOCK_NONBLOCK O_NONBLOCK
@@ -223,8 +223,6 @@ extern int sock_map_fd(struct socket *sock, int flags);
extern struct socket *sockfd_lookup(int fd, int *err);
#define sockfd_put(sock) fput(sock->file)
extern int net_ratelimit(void);
-extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags);
#define net_random() random32()
#define net_srandom(seed) srandom32((__force u32)seed)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 64875859d65..41e1224651c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -43,6 +43,9 @@
#include <net/net_namespace.h>
#include <net/dsa.h>
+#ifdef CONFIG_DCB
+#include <net/dcbnl.h>
+#endif
struct vlan_group;
struct ethtool_ops;
@@ -311,14 +314,16 @@ struct napi_struct {
spinlock_t poll_lock;
int poll_owner;
struct net_device *dev;
- struct list_head dev_list;
#endif
+ struct list_head dev_list;
+ struct sk_buff *gro_list;
};
enum
{
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
+ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
};
extern void __napi_schedule(struct napi_struct *n);
@@ -372,22 +377,8 @@ static inline int napi_reschedule(struct napi_struct *napi)
*
* Mark NAPI processing as complete.
*/
-static inline void __napi_complete(struct napi_struct *n)
-{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- list_del(&n->poll_list);
- smp_mb__before_clear_bit();
- clear_bit(NAPI_STATE_SCHED, &n->state);
-}
-
-static inline void napi_complete(struct napi_struct *n)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __napi_complete(n);
- local_irq_restore(flags);
-}
+extern void __napi_complete(struct napi_struct *n);
+extern void napi_complete(struct napi_struct *n);
/**
* napi_disable - prevent NAPI from scheduling
@@ -451,6 +442,147 @@ struct netdev_queue {
struct Qdisc *qdisc_sleeping;
} ____cacheline_aligned_in_smp;
+
+/*
+ * This structure defines the management hooks for network devices.
+ * The following hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer.
+ *
+ * int (*ndo_init)(struct net_device *dev);
+ * This function is called once when network device is registered.
+ * The network device can use this to any late stage initializaton
+ * or semantic validattion. It can fail with an error code which will
+ * be propogated back to register_netdev
+ *
+ * void (*ndo_uninit)(struct net_device *dev);
+ * This function is called when device is unregistered or when registration
+ * fails. It is not called if init fails.
+ *
+ * int (*ndo_open)(struct net_device *dev);
+ * This function is called when network device transistions to the up
+ * state.
+ *
+ * int (*ndo_stop)(struct net_device *dev);
+ * This function is called when network device transistions to the down
+ * state.
+ *
+ * int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
+ * Called when a packet needs to be transmitted.
+ * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
+ * Required can not be NULL.
+ *
+ * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
+ * Called to decide which queue to when device supports multiple
+ * transmit queues.
+ *
+ * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
+ * This function is called to allow device receiver to make
+ * changes to configuration when multicast or promiscious is enabled.
+ *
+ * void (*ndo_set_rx_mode)(struct net_device *dev);
+ * This function is called device changes address list filtering.
+ *
+ * void (*ndo_set_multicast_list)(struct net_device *dev);
+ * This function is called when the multicast address list changes.
+ *
+ * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
+ * This function is called when the Media Access Control address
+ * needs to be changed. If not this interface is not defined, the
+ * mac address can not be changed.
+ *
+ * int (*ndo_validate_addr)(struct net_device *dev);
+ * Test if Media Access Control address is valid for the device.
+ *
+ * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Called when a user request an ioctl which can't be handled by
+ * the generic interface code. If not defined ioctl's return
+ * not supported error code.
+ *
+ * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
+ * Used to set network devices bus interface parameters. This interface
+ * is retained for legacy reason, new devices should use the bus
+ * interface (PCI) for low level management.
+ *
+ * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
+ * Called when a user wants to change the Maximum Transfer Unit
+ * of a device. If not defined, any request to change MTU will
+ * will return an error.
+ *
+ * void (*ndo_tx_timeout)(struct net_device *dev);
+ * Callback uses when the transmitter has not made any progress
+ * for dev->watchdog ticks.
+ *
+ * struct net_device_stats* (*get_stats)(struct net_device *dev);
+ * Called when a user wants to get the network device usage
+ * statistics. If not defined, the counters in dev->stats will
+ * be used.
+ *
+ * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
+ * If device support VLAN receive accleration
+ * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
+ * when vlan groups for the device changes. Note: grp is NULL
+ * if no vlan's groups are being used.
+ *
+ * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
+ * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
+ * this function is called when a VLAN id is registered.
+ *
+ * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
+ * this function is called when a VLAN id is unregistered.
+ *
+ * void (*ndo_poll_controller)(struct net_device *dev);
+ */
+#define HAVE_NET_DEVICE_OPS
+struct net_device_ops {
+ int (*ndo_init)(struct net_device *dev);
+ void (*ndo_uninit)(struct net_device *dev);
+ int (*ndo_open)(struct net_device *dev);
+ int (*ndo_stop)(struct net_device *dev);
+ int (*ndo_start_xmit) (struct sk_buff *skb,
+ struct net_device *dev);
+ u16 (*ndo_select_queue)(struct net_device *dev,
+ struct sk_buff *skb);
+#define HAVE_CHANGE_RX_FLAGS
+ void (*ndo_change_rx_flags)(struct net_device *dev,
+ int flags);
+#define HAVE_SET_RX_MODE
+ void (*ndo_set_rx_mode)(struct net_device *dev);
+#define HAVE_MULTICAST
+ void (*ndo_set_multicast_list)(struct net_device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*ndo_set_mac_address)(struct net_device *dev,
+ void *addr);
+#define HAVE_VALIDATE_ADDR
+ int (*ndo_validate_addr)(struct net_device *dev);
+#define HAVE_PRIVATE_IOCTL
+ int (*ndo_do_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*ndo_set_config)(struct net_device *dev,
+ struct ifmap *map);
+#define HAVE_CHANGE_MTU
+ int (*ndo_change_mtu)(struct net_device *dev,
+ int new_mtu);
+ int (*ndo_neigh_setup)(struct net_device *dev,
+ struct neigh_parms *);
+#define HAVE_TX_TIMEOUT
+ void (*ndo_tx_timeout) (struct net_device *dev);
+
+ struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
+
+ void (*ndo_vlan_rx_register)(struct net_device *dev,
+ struct vlan_group *grp);
+ void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
+ unsigned short vid);
+ void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+ unsigned short vid);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#define HAVE_NETDEV_POLL
+ void (*ndo_poll_controller)(struct net_device *dev);
+#endif
+};
+
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
@@ -495,14 +627,7 @@ struct net_device
unsigned long state;
struct list_head dev_list;
-#ifdef CONFIG_NETPOLL
struct list_head napi_list;
-#endif
-
- /* The device initialization function. Called only once. */
- int (*init)(struct net_device *dev);
-
- /* ------- Fields preinitialized in Space.c finish here ------- */
/* Net device features */
unsigned long features;
@@ -521,6 +646,7 @@ struct net_device
#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
/* do not use LLTX in new drivers */
#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
+#define NETIF_F_GRO 16384 /* Generic receive offload */
#define NETIF_F_LRO 32768 /* large receive offload */
/* Segmentation offload features */
@@ -541,12 +667,18 @@ struct net_device
#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+ /*
+ * If one device supports one of these features, then enable them
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
+ NETIF_F_SG | NETIF_F_HIGHDMA | \
+ NETIF_F_FRAGLIST)
+
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
-
- struct net_device_stats* (*get_stats)(struct net_device *dev);
struct net_device_stats stats;
#ifdef CONFIG_WIRELESS_EXT
@@ -556,18 +688,13 @@ struct net_device
/* Instance data managed by the core of Wireless Extensions. */
struct iw_public_data * wireless_data;
#endif
+ /* Management operations */
+ const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
/* Hardware header description */
const struct header_ops *header_ops;
- /*
- * This marks the end of the "visible" part of the structure. All
- * fields hereafter are internal to the system, and may change at
- * will (read: may be cleaned up at will).
- */
-
-
unsigned int flags; /* interface flags (a la BSD) */
unsigned short gflags;
unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
@@ -626,7 +753,7 @@ struct net_device
unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
- because most packets are unicast) */
+ because most packets are unicast) */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
@@ -645,18 +772,12 @@ struct net_device
/*
* One part is mostly used on xmit path (device)
*/
- void *priv; /* pointer to private data */
- int (*hard_start_xmit) (struct sk_buff *skb,
- struct net_device *dev);
/* These may be needed for future network-power-down code. */
unsigned long trans_start; /* Time (in jiffies) of last Tx */
int watchdog_timeo; /* used by dev_watchdog() */
struct timer_list watchdog_timer;
-/*
- * refcnt is a very hot point, so align it on SMP
- */
/* Number of references to this device */
atomic_t refcnt ____cacheline_aligned_in_smp;
@@ -675,56 +796,12 @@ struct net_device
NETREG_RELEASED, /* called free_netdev */
} reg_state;
- /* Called after device is detached from network. */
- void (*uninit)(struct net_device *dev);
- /* Called after last user reference disappears. */
- void (*destructor)(struct net_device *dev);
-
- /* Pointers to interface service routines. */
- int (*open)(struct net_device *dev);
- int (*stop)(struct net_device *dev);
-#define HAVE_NETDEV_POLL
-#define HAVE_CHANGE_RX_FLAGS
- void (*change_rx_flags)(struct net_device *dev,
- int flags);
-#define HAVE_SET_RX_MODE
- void (*set_rx_mode)(struct net_device *dev);
-#define HAVE_MULTICAST
- void (*set_multicast_list)(struct net_device *dev);
-#define HAVE_SET_MAC_ADDR
- int (*set_mac_address)(struct net_device *dev,
- void *addr);
-#define HAVE_VALIDATE_ADDR
- int (*validate_addr)(struct net_device *dev);
-#define HAVE_PRIVATE_IOCTL
- int (*do_ioctl)(struct net_device *dev,
- struct ifreq *ifr, int cmd);
-#define HAVE_SET_CONFIG
- int (*set_config)(struct net_device *dev,
- struct ifmap *map);
-#define HAVE_CHANGE_MTU
- int (*change_mtu)(struct net_device *dev, int new_mtu);
+ /* Called from unregister, can be used to call free_netdev */
+ void (*destructor)(struct net_device *dev);
-#define HAVE_TX_TIMEOUT
- void (*tx_timeout) (struct net_device *dev);
-
- void (*vlan_rx_register)(struct net_device *dev,
- struct vlan_group *grp);
- void (*vlan_rx_add_vid)(struct net_device *dev,
- unsigned short vid);
- void (*vlan_rx_kill_vid)(struct net_device *dev,
- unsigned short vid);
-
- int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
#ifdef CONFIG_NETPOLL
struct netpoll_info *npinfo;
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- void (*poll_controller)(struct net_device *dev);
-#endif
-
- u16 (*select_queue)(struct net_device *dev,
- struct sk_buff *skb);
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
@@ -755,6 +832,49 @@ struct net_device
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
+
+#ifdef CONFIG_DCB
+ /* Data Center Bridging netlink ops */
+ struct dcbnl_rtnl_ops *dcbnl_ops;
+#endif
+
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ struct {
+ int (*init)(struct net_device *dev);
+ void (*uninit)(struct net_device *dev);
+ int (*open)(struct net_device *dev);
+ int (*stop)(struct net_device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct net_device *dev);
+ u16 (*select_queue)(struct net_device *dev,
+ struct sk_buff *skb);
+ void (*change_rx_flags)(struct net_device *dev,
+ int flags);
+ void (*set_rx_mode)(struct net_device *dev);
+ void (*set_multicast_list)(struct net_device *dev);
+ int (*set_mac_address)(struct net_device *dev,
+ void *addr);
+ int (*validate_addr)(struct net_device *dev);
+ int (*do_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*set_config)(struct net_device *dev,
+ struct ifmap *map);
+ int (*change_mtu)(struct net_device *dev, int new_mtu);
+ int (*neigh_setup)(struct net_device *dev,
+ struct neigh_parms *);
+ void (*tx_timeout) (struct net_device *dev);
+ struct net_device_stats* (*get_stats)(struct net_device *dev);
+ void (*vlan_rx_register)(struct net_device *dev,
+ struct vlan_group *grp);
+ void (*vlan_rx_add_vid)(struct net_device *dev,
+ unsigned short vid);
+ void (*vlan_rx_kill_vid)(struct net_device *dev,
+ unsigned short vid);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ void (*poll_controller)(struct net_device *dev);
+#endif
+ };
+#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -850,22 +970,8 @@ static inline void *netdev_priv(const struct net_device *dev)
* netif_napi_add() must be used to initialize a napi context prior to calling
* *any* of the other napi related functions.
*/
-static inline void netif_napi_add(struct net_device *dev,
- struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int),
- int weight)
-{
- INIT_LIST_HEAD(&napi->poll_list);
- napi->poll = poll;
- napi->weight = weight;
-#ifdef CONFIG_NETPOLL
- napi->dev = dev;
- list_add(&napi->dev_list, &dev->napi_list);
- spin_lock_init(&napi->poll_lock);
- napi->poll_owner = -1;
-#endif
- set_bit(NAPI_STATE_SCHED, &napi->state);
-}
+void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight);
/**
* netif_napi_del - remove a napi context
@@ -873,12 +979,20 @@ static inline void netif_napi_add(struct net_device *dev,
*
* netif_napi_del() removes a napi context from the network device napi list
*/
-static inline void netif_napi_del(struct napi_struct *napi)
-{
-#ifdef CONFIG_NETPOLL
- list_del(&napi->dev_list);
-#endif
-}
+void netif_napi_del(struct napi_struct *napi);
+
+struct napi_gro_cb {
+ /* This is non-zero if the packet may be of the same flow. */
+ int same_flow;
+
+ /* This is non-zero if the packet cannot be merged with the new skb. */
+ int flush;
+
+ /* Number of segments aggregated. */
+ int count;
+};
+
+#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
@@ -890,6 +1004,9 @@ struct packet_type {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
@@ -1243,6 +1360,9 @@ extern int netif_rx(struct sk_buff *skb);
extern int netif_rx_ni(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
+extern void napi_gro_flush(struct napi_struct *napi);
+extern int napi_gro_receive(struct napi_struct *napi,
+ struct sk_buff *skb);
extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
@@ -1435,8 +1555,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
}
/* Test if receive needs to be scheduled but only if up */
-static inline int netif_rx_schedule_prep(struct net_device *dev,
- struct napi_struct *napi)
+static inline int netif_rx_schedule_prep(struct napi_struct *napi)
{
return napi_schedule_prep(napi);
}
@@ -1444,27 +1563,24 @@ static inline int netif_rx_schedule_prep(struct net_device *dev,
/* Add interface to tail of rx poll list. This assumes that _prep has
* already been called and returned 1.
*/
-static inline void __netif_rx_schedule(struct net_device *dev,
- struct napi_struct *napi)
+static inline void __netif_rx_schedule(struct napi_struct *napi)
{
__napi_schedule(napi);
}
/* Try to reschedule poll. Called by irq handler. */
-static inline void netif_rx_schedule(struct net_device *dev,
- struct napi_struct *napi)
+static inline void netif_rx_schedule(struct napi_struct *napi)
{
- if (netif_rx_schedule_prep(dev, napi))
- __netif_rx_schedule(dev, napi);
+ if (netif_rx_schedule_prep(napi))
+ __netif_rx_schedule(napi);
}
/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
-static inline int netif_rx_reschedule(struct net_device *dev,
- struct napi_struct *napi)
+static inline int netif_rx_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
- __netif_rx_schedule(dev, napi);
+ __netif_rx_schedule(napi);
return 1;
}
return 0;
@@ -1473,8 +1589,7 @@ static inline int netif_rx_reschedule(struct net_device *dev,
/* same as netif_rx_complete, except that local_irq_save(flags)
* has already been issued
*/
-static inline void __netif_rx_complete(struct net_device *dev,
- struct napi_struct *napi)
+static inline void __netif_rx_complete(struct napi_struct *napi)
{
__napi_complete(napi);
}
@@ -1484,14 +1599,9 @@ static inline void __netif_rx_complete(struct net_device *dev,
* it completes the work. The device cannot be out of poll list at this
* moment, it is BUG().
*/
-static inline void netif_rx_complete(struct net_device *dev,
- struct napi_struct *napi)
+static inline void netif_rx_complete(struct napi_struct *napi)
{
- unsigned long flags;
-
- local_irq_save(flags);
- __netif_rx_complete(dev, napi);
- local_irq_restore(flags);
+ napi_complete(napi);
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
@@ -1529,7 +1639,6 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
- * @cpu: cpu number of lock owner
*
* Get network device transmit lock
*/
@@ -1669,6 +1778,8 @@ extern void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
+extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
+
extern int netdev_max_backlog;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
@@ -1698,7 +1809,9 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
extern void linkwatch_run_queue(void);
-extern int netdev_compute_features(unsigned long all, unsigned long one);
+unsigned long netdev_increment_features(unsigned long all, unsigned long one,
+ unsigned long mask);
+unsigned long netdev_fix_features(unsigned long features, const char *name);
static inline int net_gso_ok(int features, int gso_type)
{
@@ -1715,6 +1828,8 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
+ (skb_shinfo(skb)->frag_list &&
+ !(dev->features & NETIF_F_FRAGLIST)) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
@@ -1733,26 +1848,31 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
struct net_device *dev = skb->dev;
struct net_device *master = dev->master;
- if (master &&
- (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
- if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
- skb->protocol == __constant_htons(ETH_P_ARP))
- return 0;
+ if (master) {
+ if (master->priv_flags & IFF_MASTER_ARPMON)
+ dev->last_rx = jiffies;
- if (master->priv_flags & IFF_MASTER_ALB) {
- if (skb->pkt_type != PACKET_BROADCAST &&
- skb->pkt_type != PACKET_MULTICAST)
+ if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
+ if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
+ skb->protocol == __constant_htons(ETH_P_ARP))
return 0;
- }
- if (master->priv_flags & IFF_MASTER_8023AD &&
- skb->protocol == __constant_htons(ETH_P_SLOW))
- return 0;
- return 1;
+ if (master->priv_flags & IFF_MASTER_ALB) {
+ if (skb->pkt_type != PACKET_BROADCAST &&
+ skb->pkt_type != PACKET_MULTICAST)
+ return 0;
+ }
+ if (master->priv_flags & IFF_MASTER_8023AD &&
+ skb->protocol == __constant_htons(ETH_P_SLOW))
+ return 0;
+
+ return 1;
+ }
}
return 0;
}
+extern struct pernet_operations __net_initdata loopback_net_ops;
#endif /* __KERNEL__ */
#endif /* _LINUX_DEV_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 0d8424f7689..7d8e0455cca 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -78,6 +78,9 @@ extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
int echo);
extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
+extern void nfnl_lock(void);
+extern void nfnl_unlock(void);
+
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index c19595c8930..29fe9ea1d34 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -141,6 +141,7 @@ enum ctattr_protonat {
#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
enum ctattr_natseq {
+ CTA_NAT_SEQ_UNSPEC,
CTA_NAT_SEQ_CORRECTION_POS,
CTA_NAT_SEQ_OFFSET_BEFORE,
CTA_NAT_SEQ_OFFSET_AFTER,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index be41b609c88..e52ce475d19 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -251,7 +251,7 @@ struct xt_target_param {
*/
struct xt_tgchk_param {
const char *table;
- void *entryinfo;
+ const void *entryinfo;
const struct xt_target *target;
void *targinfo;
unsigned int hook_mask;
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index d45e29cd1cf..e40ddb94b1a 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -300,7 +300,8 @@ struct ebt_table
#define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \
~(__alignof__(struct ebt_replace)-1))
-extern int ebt_register_table(struct ebt_table *table);
+extern struct ebt_table *ebt_register_table(struct net *net,
+ struct ebt_table *table);
extern void ebt_unregister_table(struct ebt_table *table);
extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
const struct net_device *in, const struct net_device *out,
diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h
index b9478a25530..1037fb2cd20 100644
--- a/include/linux/netfilter_ipv4/ipt_policy.h
+++ b/include/linux/netfilter_ipv4/ipt_policy.h
@@ -1,6 +1,8 @@
#ifndef _IPT_POLICY_H
#define _IPT_POLICY_H
+#include <linux/netfilter/xt_policy.h>
+
#define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
/* ipt_policy_flags */
diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h
index 6bab3163d2f..b1c449d7ec8 100644
--- a/include/linux/netfilter_ipv6/ip6t_policy.h
+++ b/include/linux/netfilter_ipv6/ip6t_policy.h
@@ -1,6 +1,8 @@
#ifndef _IP6T_POLICY_H
#define _IP6T_POLICY_H
+#include <linux/netfilter/xt_policy.h>
+
#define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
/* ip6t_policy_flags */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9ff1b54908f..51b09a1f46c 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -242,7 +242,8 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
- memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
+ if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
+ memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
return nlh;
}
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e3d79593fb3..e38d3c9dccd 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -94,11 +94,6 @@ static inline void netpoll_poll_unlock(void *have)
rcu_read_unlock();
}
-static inline void netpoll_netdev_init(struct net_device *dev)
-{
- INIT_LIST_HEAD(&dev->napi_list);
-}
-
#else
static inline int netpoll_rx(struct sk_buff *skb)
{
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 78a5922a2f1..db867b04ac3 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -83,7 +83,7 @@ struct nfs_open_context {
struct rpc_cred *cred;
struct nfs4_state *state;
fl_owner_t lockowner;
- int mode;
+ fmode_t mode;
unsigned long flags;
#define NFS_CONTEXT_ERROR_WRITE (0)
@@ -130,14 +130,17 @@ struct nfs_inode {
*
* We need to revalidate the cached attrs for this inode if
*
- * jiffies - read_cache_jiffies > attrtimeo
+ * jiffies - read_cache_jiffies >= attrtimeo
+ *
+ * Please note the comparison is greater than or equal
+ * so that zero timeout values can be specified.
*/
unsigned long read_cache_jiffies;
unsigned long attrtimeo;
unsigned long attrtimeo_timestamp;
__u64 change_attr; /* v4 only */
- unsigned long last_updated;
+ unsigned long attr_gencount;
/* "Generation counter" for the attribute cache. This is
* bumped whenever we update the metadata on the
* server.
@@ -180,7 +183,7 @@ struct nfs_inode {
/* NFSv4 state */
struct list_head open_states;
struct nfs_delegation *delegation;
- int delegation_state;
+ fmode_t delegation_state;
struct rw_semaphore rwsem;
#endif /* CONFIG_NFS_V4*/
struct inode vfs_inode;
@@ -200,11 +203,10 @@ struct nfs_inode {
/*
* Bit offsets in flags field
*/
-#define NFS_INO_REVALIDATING (0) /* revalidating attrs */
-#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */
-#define NFS_INO_STALE (2) /* possible stale inode */
-#define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */
-#define NFS_INO_MOUNTPOINT (4) /* inode is remote mountpoint */
+#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
+#define NFS_INO_STALE (1) /* possible stale inode */
+#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
+#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
@@ -343,17 +345,13 @@ extern int nfs_setattr(struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
-extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode);
+extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
extern u64 nfs_compat_user_ino64(u64 fileid);
+extern void nfs_fattr_init(struct nfs_fattr *fattr);
/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
extern __be32 root_nfs_parse_addr(char *name); /*__init*/
-
-static inline void nfs_fattr_init(struct nfs_fattr *fattr)
-{
- fattr->valid = 0;
- fattr->time_start = jiffies;
-}
+extern unsigned long nfs_inc_attr_generation_counter(void);
/*
* linux/fs/nfs/file.c
@@ -372,8 +370,12 @@ static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
static inline struct rpc_cred *nfs_file_cred(struct file *file)
{
- if (file != NULL)
- return nfs_file_open_context(file)->cred;
+ if (file != NULL) {
+ struct nfs_open_context *ctx =
+ nfs_file_open_context(file);
+ if (ctx)
+ return ctx->cred;
+ }
return NULL;
}
@@ -534,12 +536,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode)
#endif /* CONFIG_NFS_V3_ACL */
/*
- * linux/fs/mount_clnt.c
- */
-extern int nfs_mount(struct sockaddr *, size_t, char *, char *,
- int, int, struct nfs_fh *);
-
-/*
* inline functions
*/
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c9beacd16c0..9bb81aec91c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -42,12 +42,6 @@ struct nfs_client {
struct rb_root cl_openowner_id;
struct rb_root cl_lockowner_id;
- /*
- * The following rwsem ensures exclusive access to the server
- * while we recover the state following a lease expiration.
- */
- struct rw_semaphore cl_sem;
-
struct list_head cl_delegations;
struct rb_root cl_state_owners;
spinlock_t cl_lock;
@@ -119,7 +113,6 @@ struct nfs_server {
void (*destroy)(struct nfs_server *);
atomic_t active; /* Keep trace of any activity to this server */
- wait_queue_head_t active_wq; /* Wait for any activity to stop */
/* mountd-related mount options */
struct sockaddr_storage mountd_address;
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h
index df7c6b7a7eb..4499016e6d0 100644
--- a/include/linux/nfs_mount.h
+++ b/include/linux/nfs_mount.h
@@ -45,7 +45,7 @@ struct nfs_mount_data {
char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */
};
-/* bits in the flags field */
+/* bits in the flags field visible to user space */
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
@@ -65,4 +65,9 @@ struct nfs_mount_data {
#define NFS_MOUNT_UNSHARED 0x8000 /* 5 */
#define NFS_MOUNT_FLAGMASK 0xFFFF
+/* The following are for internal use only */
+#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
+#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
+#define NFS_MOUNT_NORESVPORT 0x40000
+
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 8c77c11224d..a550b528319 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -36,6 +36,7 @@ struct nfs_fattr {
__u32 nlink;
__u32 uid;
__u32 gid;
+ dev_t rdev;
__u64 size;
union {
struct {
@@ -46,7 +47,6 @@ struct nfs_fattr {
__u64 used;
} nfs3;
} du;
- dev_t rdev;
struct nfs_fsid fsid;
__u64 fileid;
struct timespec atime;
@@ -56,6 +56,7 @@ struct nfs_fattr {
__u64 change_attr; /* NFSv4 change attribute */
__u64 pre_change_attr;/* pre-op NFSv4 change attribute */
unsigned long time_start;
+ unsigned long gencount;
};
#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */
@@ -119,13 +120,14 @@ struct nfs_openargs {
const struct nfs_fh * fh;
struct nfs_seqid * seqid;
int open_flags;
+ fmode_t fmode;
__u64 clientid;
__u64 id;
union {
struct iattr * attrs; /* UNCHECKED, GUARDED */
nfs4_verifier verifier; /* EXCLUSIVE */
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
- int delegation_type; /* CLAIM_PREVIOUS */
+ fmode_t delegation_type; /* CLAIM_PREVIOUS */
} u;
const struct qstr * name;
const struct nfs_server *server; /* Needed for ID mapping */
@@ -142,7 +144,7 @@ struct nfs_openres {
struct nfs_fattr * dir_attr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
- int delegation_type;
+ fmode_t delegation_type;
nfs4_stateid delegation;
__u32 do_recall;
__u64 maxsize;
@@ -170,7 +172,7 @@ struct nfs_closeargs {
struct nfs_fh * fh;
nfs4_stateid * stateid;
struct nfs_seqid * seqid;
- int open_flags;
+ fmode_t fmode;
const u32 * bitmask;
};
@@ -672,16 +674,16 @@ struct nfs4_rename_res {
struct nfs_fattr * new_fattr;
};
-#define NFS4_SETCLIENTID_NAMELEN (56)
+#define NFS4_SETCLIENTID_NAMELEN (127)
struct nfs4_setclientid {
const nfs4_verifier * sc_verifier;
unsigned int sc_name_len;
- char sc_name[NFS4_SETCLIENTID_NAMELEN];
+ char sc_name[NFS4_SETCLIENTID_NAMELEN + 1];
u32 sc_prog;
unsigned int sc_netid_len;
- char sc_netid[RPCBIND_MAXNETIDLEN];
+ char sc_netid[RPCBIND_MAXNETIDLEN + 1];
unsigned int sc_uaddr_len;
- char sc_uaddr[RPCBIND_MAXUADDRLEN];
+ char sc_uaddr[RPCBIND_MAXUADDRLEN + 1];
u32 sc_cb_ident;
};
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index d0fe2e37845..128298c0362 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -124,6 +124,8 @@ struct nfs4_client {
nfs4_verifier cl_verifier; /* generated by client */
time_t cl_time; /* time of last lease renewal */
__be32 cl_addr; /* client ipaddress */
+ u32 cl_flavor; /* setclientid pseudoflavor */
+ char *cl_principal; /* setclientid principal name */
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 9bad65400fb..e86ed59f9ad 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -3,7 +3,26 @@
/*
* 802.11 netlink interface public header
*
- * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2006, 2007, 2008 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2008 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2008 Luis Carlos Cobo <luisca@cozybit.com>
+ * Copyright 2008 Michael Buesch <mb@bu3sch.de>
+ * Copyright 2008 Luis R. Rodriguez <lrodriguez@atheros.com>
+ * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
+ * Copyright 2008 Colin McCabe <colin@cozybit.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
*/
/**
@@ -25,8 +44,10 @@
*
* @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request
* to get a list of all present wiphys.
- * @NL80211_CMD_SET_WIPHY: set wiphy name, needs %NL80211_ATTR_WIPHY and
- * %NL80211_ATTR_WIPHY_NAME.
+ * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or
+ * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME,
+ * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or
+ * %NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET.
* @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request
* or rename notification. Has attributes %NL80211_ATTR_WIPHY and
* %NL80211_ATTR_WIPHY_NAME.
@@ -106,6 +127,12 @@
* to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
* store this as a valid request and then query userspace for it.
*
+ * @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the
+ * interface identified by %NL80211_ATTR_IFINDEX
+ *
+ * @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the
+ * interface identified by %NL80211_ATTR_IFINDEX
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -148,6 +175,9 @@ enum nl80211_commands {
NL80211_CMD_SET_REG,
NL80211_CMD_REQ_SET_REG,
+ NL80211_CMD_GET_MESH_PARAMS,
+ NL80211_CMD_SET_MESH_PARAMS,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -169,6 +199,15 @@ enum nl80211_commands {
* @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf.
* /sys/class/ieee80211/<phyname>/index
* @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming)
+ * @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters
+ * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz
+ * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ
+ * if HT20 or HT40 are allowed (i.e., 802.11n disabled if not included):
+ * NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including
+ * this attribute)
+ * NL80211_CHAN_HT20 = HT20 only
+ * NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel
+ * NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel
*
* @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
* @NL80211_ATTR_IFNAME: network interface name
@@ -234,6 +273,9 @@ enum nl80211_commands {
* (u8, 0 or 1)
* @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled
* (u8, 0 or 1)
+ * @NL80211_ATTR_BSS_BASIC_RATES: basic rates, array of basic
+ * rates in format defined by IEEE 802.11 7.3.2.2 but without the length
+ * restriction (at most %NL80211_MAX_SUPP_RATES).
*
* @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from
* association request when used with NL80211_CMD_NEW_STATION)
@@ -296,6 +338,14 @@ enum nl80211_attrs {
NL80211_ATTR_REG_ALPHA2,
NL80211_ATTR_REG_RULES,
+ NL80211_ATTR_MESH_PARAMS,
+
+ NL80211_ATTR_BSS_BASIC_RATES,
+
+ NL80211_ATTR_WIPHY_TXQ_PARAMS,
+ NL80211_ATTR_WIPHY_FREQ,
+ NL80211_ATTR_WIPHY_CHANNEL_TYPE,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -307,6 +357,10 @@ enum nl80211_attrs {
* here
*/
#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY
+#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES
+#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS
+#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ
+#define NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_REG_RULES 32
@@ -371,6 +425,32 @@ enum nl80211_sta_flags {
};
/**
+ * enum nl80211_rate_info - bitrate information
+ *
+ * These attribute types are used with %NL80211_STA_INFO_TXRATE
+ * when getting information about the bitrate of a station.
+ *
+ * @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved
+ * @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s)
+ * @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8)
+ * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate
+ * @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval
+ * @NL80211_RATE_INFO_MAX: highest rate_info number currently defined
+ * @__NL80211_RATE_INFO_AFTER_LAST: internal use
+ */
+enum nl80211_rate_info {
+ __NL80211_RATE_INFO_INVALID,
+ NL80211_RATE_INFO_BITRATE,
+ NL80211_RATE_INFO_MCS,
+ NL80211_RATE_INFO_40_MHZ_WIDTH,
+ NL80211_RATE_INFO_SHORT_GI,
+
+ /* keep last */
+ __NL80211_RATE_INFO_AFTER_LAST,
+ NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_sta_info - station information
*
* These attribute types are used with %NL80211_ATTR_STA_INFO
@@ -382,6 +462,9 @@ enum nl80211_sta_flags {
* @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
+ * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
+ * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
+ * containing info as possible, see &enum nl80211_sta_info_txrate.
*/
enum nl80211_sta_info {
__NL80211_STA_INFO_INVALID,
@@ -391,6 +474,8 @@ enum nl80211_sta_info {
NL80211_STA_INFO_LLID,
NL80211_STA_INFO_PLID,
NL80211_STA_INFO_PLINK_STATE,
+ NL80211_STA_INFO_SIGNAL,
+ NL80211_STA_INFO_TX_BITRATE,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -452,17 +537,29 @@ enum nl80211_mpath_info {
* an array of nested frequency attributes
* @NL80211_BAND_ATTR_RATES: supported bitrates in this band,
* an array of nested bitrate attributes
+ * @NL80211_BAND_ATTR_HT_MCS_SET: 16-byte attribute containing the MCS set as
+ * defined in 802.11n
+ * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE
+ * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n
+ * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n
*/
enum nl80211_band_attr {
__NL80211_BAND_ATTR_INVALID,
NL80211_BAND_ATTR_FREQS,
NL80211_BAND_ATTR_RATES,
+ NL80211_BAND_ATTR_HT_MCS_SET,
+ NL80211_BAND_ATTR_HT_CAPA,
+ NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
+ NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
+
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
};
+#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA
+
/**
* enum nl80211_frequency_attr - frequency attributes
* @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
@@ -474,6 +571,8 @@ enum nl80211_band_attr {
* on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory
* on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm
+ * (100 * dBm).
*/
enum nl80211_frequency_attr {
__NL80211_FREQUENCY_ATTR_INVALID,
@@ -482,12 +581,15 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_PASSIVE_SCAN,
NL80211_FREQUENCY_ATTR_NO_IBSS,
NL80211_FREQUENCY_ATTR_RADAR,
+ NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1
};
+#define NL80211_FREQUENCY_ATTR_MAX_TX_POWER NL80211_FREQUENCY_ATTR_MAX_TX_POWER
+
/**
* enum nl80211_bitrate_attr - bitrate attributes
* @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps
@@ -594,4 +696,119 @@ enum nl80211_mntr_flags {
NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1
};
+/**
+ * enum nl80211_meshconf_params - mesh configuration parameters
+ *
+ * Mesh configuration parameters
+ *
+ * @__NL80211_MESHCONF_INVALID: internal use
+ *
+ * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in
+ * millisecond units, used by the Peer Link Open message
+ *
+ * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the inital confirm timeout, in
+ * millisecond units, used by the peer link management to close a peer link
+ *
+ * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in
+ * millisecond units
+ *
+ * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed
+ * on this mesh interface
+ *
+ * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link
+ * open retries that can be sent to establish a new peer link instance in a
+ * mesh
+ *
+ * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh
+ * point.
+ *
+ * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically
+ * open peer links when we detect compatible mesh peers.
+ *
+ * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames
+ * containing a PREQ that an MP can send to a particular destination (path
+ * target)
+ *
+ * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths
+ * (in milliseconds)
+ *
+ * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait
+ * until giving up on a path discovery (in milliseconds)
+ *
+ * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh
+ * points receiving a PREQ shall consider the forwarding information from the
+ * root to be valid. (TU = time unit)
+ *
+ * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in
+ * TUs) during which an MP can send only one action frame containing a PREQ
+ * reference element
+ *
+ * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
+ * that it takes for an HWMP information element to propagate across the mesh
+ *
+ * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
+ *
+ * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_meshconf_params {
+ __NL80211_MESHCONF_INVALID,
+ NL80211_MESHCONF_RETRY_TIMEOUT,
+ NL80211_MESHCONF_CONFIRM_TIMEOUT,
+ NL80211_MESHCONF_HOLDING_TIMEOUT,
+ NL80211_MESHCONF_MAX_PEER_LINKS,
+ NL80211_MESHCONF_MAX_RETRIES,
+ NL80211_MESHCONF_TTL,
+ NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+ NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+ NL80211_MESHCONF_PATH_REFRESH_TIME,
+ NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+ NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
+ NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
+ NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
+
+ /* keep last */
+ __NL80211_MESHCONF_ATTR_AFTER_LAST,
+ NL80211_MESHCONF_ATTR_MAX = __NL80211_MESHCONF_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nl80211_txq_attr - TX queue parameter attributes
+ * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved
+ * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*)
+ * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning
+ * disabled
+ * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form
+ * 2^n-1 in the range 1..32767]
+ * @NL80211_TXQ_ATTR_CWMAX: Maximum contention window [a value of the form
+ * 2^n-1 in the range 1..32767]
+ * @NL80211_TXQ_ATTR_AIFS: Arbitration interframe space [0..255]
+ * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal
+ * @NL80211_TXQ_ATTR_MAX: Maximum TXQ attribute number
+ */
+enum nl80211_txq_attr {
+ __NL80211_TXQ_ATTR_INVALID,
+ NL80211_TXQ_ATTR_QUEUE,
+ NL80211_TXQ_ATTR_TXOP,
+ NL80211_TXQ_ATTR_CWMIN,
+ NL80211_TXQ_ATTR_CWMAX,
+ NL80211_TXQ_ATTR_AIFS,
+
+ /* keep last */
+ __NL80211_TXQ_ATTR_AFTER_LAST,
+ NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1
+};
+
+enum nl80211_txq_q {
+ NL80211_TXQ_Q_VO,
+ NL80211_TXQ_Q_VI,
+ NL80211_TXQ_Q_BE,
+ NL80211_TXQ_Q_BK
+};
+
+enum nl80211_channel_type {
+ NL80211_CHAN_NO_HT,
+ NL80211_CHAN_HT20,
+ NL80211_CHAN_HT40MINUS,
+ NL80211_CHAN_HT40PLUS
+};
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index c8a768e5964..afad7dec1b3 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -27,7 +27,6 @@ struct nsproxy {
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns;
- struct user_namespace *user_ns;
struct net *net_ns;
};
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/of.h b/include/linux/of.h
index 79886ade070..6a7efa242f5 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -57,6 +57,12 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
+extern struct device_node *of_find_node_with_property(
+ struct device_node *from, const char *prop_name);
+#define for_each_node_with_property(dn, prop_name) \
+ for (dn = of_find_node_with_property(NULL, prop_name); dn; \
+ dn = of_find_node_with_property(dn, prop_name))
+
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
@@ -71,5 +77,8 @@ extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern int of_parse_phandles_with_args(struct device_node *np,
+ const char *list_name, const char *cells_name, int index,
+ struct device_node **out_node, const void **out_args);
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 67db101d0eb..fc2472c3c25 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -14,9 +14,22 @@
#ifndef __LINUX_OF_GPIO_H
#define __LINUX_OF_GPIO_H
+#include <linux/compiler.h>
+#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gpio.h>
+struct device_node;
+
+/*
+ * This is Linux-specific flags. By default controllers' and Linux' mapping
+ * match, but GPIO controllers are free to translate their own flags to
+ * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
+ */
+enum of_gpio_flags {
+ OF_GPIO_ACTIVE_LOW = 0x1,
+};
+
#ifdef CONFIG_OF_GPIO
/*
@@ -26,7 +39,7 @@ struct of_gpio_chip {
struct gpio_chip gc;
int gpio_cells;
int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np,
- const void *gpio_spec);
+ const void *gpio_spec, enum of_gpio_flags *flags);
};
static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc)
@@ -50,20 +63,43 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
return container_of(of_gc, struct of_mm_gpio_chip, of_gc);
}
-extern int of_get_gpio(struct device_node *np, int index);
+extern int of_get_gpio_flags(struct device_node *np, int index,
+ enum of_gpio_flags *flags);
+extern unsigned int of_gpio_count(struct device_node *np);
+
extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc,
struct device_node *np,
- const void *gpio_spec);
+ const void *gpio_spec,
+ enum of_gpio_flags *flags);
#else
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_gpio(struct device_node *np, int index)
+static inline int of_get_gpio_flags(struct device_node *np, int index,
+ enum of_gpio_flags *flags)
{
return -ENOSYS;
}
+static inline unsigned int of_gpio_count(struct device_node *np)
+{
+ return 0;
+}
+
#endif /* CONFIG_OF_GPIO */
+/**
+ * of_get_gpio - Get a GPIO number to use with GPIO API
+ * @np: device node to get GPIO from
+ * @index: index of the GPIO
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition.
+ */
+static inline int of_get_gpio(struct device_node *np, int index)
+{
+ return of_get_gpio_flags(np, index, NULL);
+}
+
#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index a8efcfeea73..3d327b67d7e 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -26,8 +26,7 @@ extern struct bus_type of_platform_bus_type;
/*
* An of_platform_driver driver is attached to a basic of_device on
- * the "platform bus" (of_platform_bus_type) (or ISA, EBUS and SBUS
- * busses on sparc).
+ * the "platform bus" (of_platform_bus_type).
*/
struct of_platform_driver
{
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index bcb8f725427..1ce9fe572e5 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -86,15 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops);
void oprofile_arch_exit(void);
/**
- * Add data to the event buffer.
- * The data passed is free-form, but typically consists of
- * file offsets, dcookies, context information, and ESCAPE codes.
- */
-void add_event_entry(unsigned long data);
-
-/**
- * Add a sample. This may be called from any context. Pass
- * smp_processor_id() as cpu.
+ * Add a sample. This may be called from any context.
*/
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
@@ -162,5 +154,14 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz
/** lock for read/write safety */
extern spinlock_t oprofilefs_lock;
+
+/**
+ * Add the contents of a circular buffer to the event buffer.
+ */
+void oprofile_put_buff(unsigned long *buf, unsigned int start,
+ unsigned int stop, unsigned int max);
+
+unsigned long oprofile_get_cpu_buffer_size(void);
+void oprofile_cpu_buffer_inc_smpl_lost(void);
#endif /* OPROFILE_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c74d3e87531..b12f93a3c34 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -93,6 +93,11 @@ enum pageflags {
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_buddy, /* Page is free, on buddy lists */
+ PG_swapbacked, /* Page is backed by RAM/swap */
+#ifdef CONFIG_UNEVICTABLE_LRU
+ PG_unevictable, /* Page is "unevictable" */
+ PG_mlocked, /* Page is vma mlocked */
+#endif
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
PG_uncached, /* Page has been mapped as uncached */
#endif
@@ -161,6 +166,18 @@ static inline int Page##uname(struct page *page) \
#define TESTSCFLAG(uname, lname) \
TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
+#define SETPAGEFLAG_NOOP(uname) \
+static inline void SetPage##uname(struct page *page) { }
+
+#define CLEARPAGEFLAG_NOOP(uname) \
+static inline void ClearPage##uname(struct page *page) { }
+
+#define __CLEARPAGEFLAG_NOOP(uname) \
+static inline void __ClearPage##uname(struct page *page) { }
+
+#define TESTCLEARFLAG_FALSE(uname) \
+static inline int TestClearPage##uname(struct page *page) { return 0; }
+
struct page; /* forward declaration */
TESTPAGEFLAG(Locked, locked)
@@ -169,6 +186,7 @@ PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+ TESTCLEARFLAG(Active, active)
__PAGEFLAG(Slab, slab)
PAGEFLAG(Checked, checked) /* Used by some filesystems */
PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
@@ -176,6 +194,7 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
__SETPAGEFLAG(Private, private)
+PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG(SlobPage, slob_page)
__PAGEFLAG(SlobFree, slob_free)
@@ -211,6 +230,25 @@ PAGEFLAG(SwapCache, swapcache)
PAGEFLAG_FALSE(SwapCache)
#endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
+ TESTCLEARFLAG(Unevictable, unevictable)
+
+#define MLOCK_PAGES 1
+PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
+ TESTSCFLAG(Mlocked, mlocked)
+
+#else
+
+#define MLOCK_PAGES 0
+PAGEFLAG_FALSE(Mlocked)
+ SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
+
+PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
+ SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
+ __CLEARPAGEFLAG_NOOP(Unevictable)
+#endif
+
#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
PAGEFLAG(Uncached, uncached)
#else
@@ -326,15 +364,25 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define __PG_UNEVICTABLE (1 << PG_unevictable)
+#define __PG_MLOCKED (1 << PG_mlocked)
+#else
+#define __PG_UNEVICTABLE 0
+#define __PG_MLOCKED 0
+#endif
+
#define PAGE_FLAGS (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \
1 << PG_buddy | 1 << PG_writeback | \
- 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active)
+ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
+ __PG_UNEVICTABLE | __PG_MLOCKED)
/*
* Flags checked in bad_page(). Pages on the free list should not have
* these flags set. It they are, there is a problem.
*/
-#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | 1 << PG_reclaim | 1 << PG_dirty)
+#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | \
+ 1 << PG_reclaim | 1 << PG_dirty | 1 << PG_swapbacked)
/*
* Flags checked when a page is freed. Pages being freed should not have
@@ -347,7 +395,8 @@ static inline void __ClearPageTail(struct page *page)
* Pages being prepped should not have these flags set. It they are, there
* is a problem.
*/
-#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | 1 << PG_reserved | 1 << PG_dirty)
+#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | \
+ 1 << PG_reserved | 1 << PG_dirty | 1 << PG_swapbacked)
#endif /* !__GENERATING_BOUNDS_H */
#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
new file mode 100644
index 00000000000..1e6d34bfa09
--- /dev/null
+++ b/include/linux/page_cgroup.h
@@ -0,0 +1,108 @@
+#ifndef __LINUX_PAGE_CGROUP_H
+#define __LINUX_PAGE_CGROUP_H
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#include <linux/bit_spinlock.h>
+/*
+ * Page Cgroup can be considered as an extended mem_map.
+ * A page_cgroup page is associated with every page descriptor. The
+ * page_cgroup helps us identify information about the cgroup
+ * All page cgroups are allocated at boot or memory hotplug event,
+ * then the page cgroup for pfn always exists.
+ */
+struct page_cgroup {
+ unsigned long flags;
+ struct mem_cgroup *mem_cgroup;
+ struct page *page;
+ struct list_head lru; /* per cgroup LRU list */
+};
+
+void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
+void __init page_cgroup_init(void);
+struct page_cgroup *lookup_page_cgroup(struct page *page);
+
+enum {
+ /* flags for mem_cgroup */
+ PCG_LOCK, /* page cgroup is locked */
+ PCG_CACHE, /* charged as cache */
+ PCG_USED, /* this object is in use. */
+ /* flags for LRU placement */
+ PCG_ACTIVE, /* page is active in this cgroup */
+ PCG_FILE, /* page is file system backed */
+ PCG_UNEVICTABLE, /* page is unevictableable */
+};
+
+#define TESTPCGFLAG(uname, lname) \
+static inline int PageCgroup##uname(struct page_cgroup *pc) \
+ { return test_bit(PCG_##lname, &pc->flags); }
+
+#define SETPCGFLAG(uname, lname) \
+static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
+ { set_bit(PCG_##lname, &pc->flags); }
+
+#define CLEARPCGFLAG(uname, lname) \
+static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
+ { clear_bit(PCG_##lname, &pc->flags); }
+
+/* Cache flag is set only once (at allocation) */
+TESTPCGFLAG(Cache, CACHE)
+
+TESTPCGFLAG(Used, USED)
+CLEARPCGFLAG(Used, USED)
+
+/* LRU management flags (from global-lru definition) */
+TESTPCGFLAG(File, FILE)
+SETPCGFLAG(File, FILE)
+CLEARPCGFLAG(File, FILE)
+
+TESTPCGFLAG(Active, ACTIVE)
+SETPCGFLAG(Active, ACTIVE)
+CLEARPCGFLAG(Active, ACTIVE)
+
+TESTPCGFLAG(Unevictable, UNEVICTABLE)
+SETPCGFLAG(Unevictable, UNEVICTABLE)
+CLEARPCGFLAG(Unevictable, UNEVICTABLE)
+
+static inline int page_cgroup_nid(struct page_cgroup *pc)
+{
+ return page_to_nid(pc->page);
+}
+
+static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
+{
+ return page_zonenum(pc->page);
+}
+
+static inline void lock_page_cgroup(struct page_cgroup *pc)
+{
+ bit_spin_lock(PCG_LOCK, &pc->flags);
+}
+
+static inline int trylock_page_cgroup(struct page_cgroup *pc)
+{
+ return bit_spin_trylock(PCG_LOCK, &pc->flags);
+}
+
+static inline void unlock_page_cgroup(struct page_cgroup *pc)
+{
+ bit_spin_unlock(PCG_LOCK, &pc->flags);
+}
+
+#else /* CONFIG_CGROUP_MEM_RES_CTLR */
+struct page_cgroup;
+
+static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
+{
+}
+
+static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_cgroup_init(void)
+{
+}
+
+#endif
+#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 5da31c12101..709742be02f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,6 +32,34 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
}
}
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
+
+static inline void mapping_set_unevictable(struct address_space *mapping)
+{
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ if (likely(mapping))
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
+}
+#else
+static inline void mapping_set_unevictable(struct address_space *mapping) { }
+static inline void mapping_clear_unevictable(struct address_space *mapping) { }
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ return 0;
+}
+#endif
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
@@ -271,19 +299,19 @@ extern int __lock_page_killable(struct page *page);
extern void __lock_page_nosync(struct page *page);
extern void unlock_page(struct page *page);
-static inline void set_page_locked(struct page *page)
+static inline void __set_page_locked(struct page *page)
{
- set_bit(PG_locked, &page->flags);
+ __set_bit(PG_locked, &page->flags);
}
-static inline void clear_page_locked(struct page *page)
+static inline void __clear_page_locked(struct page *page)
{
- clear_bit(PG_locked, &page->flags);
+ __clear_bit(PG_locked, &page->flags);
}
static inline int trylock_page(struct page *page)
{
- return !test_and_set_bit(PG_locked, &page->flags);
+ return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
}
/*
@@ -410,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
/*
* Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run set_page_locked() against it.
+ * the page is new, so we can just run __set_page_locked() against it.
*/
static inline int add_to_page_cache(struct page *page,
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
{
int error;
- set_page_locked(page);
+ __set_page_locked(page);
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
if (unlikely(error))
- clear_page_locked(page);
+ __clear_page_locked(page);
return error;
}
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 8eb7fa76c1d..e90a2cb0291 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -23,9 +23,9 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_release_nonlru(struct pagevec *pvec);
void __pagevec_free(struct pagevec *pvec);
-void __pagevec_lru_add(struct pagevec *pvec);
-void __pagevec_lru_add_active(struct pagevec *pvec);
+void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
void pagevec_strip(struct pagevec *pvec);
+void pagevec_swap_free(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,
@@ -81,10 +81,36 @@ static inline void pagevec_free(struct pagevec *pvec)
__pagevec_free(pvec);
}
-static inline void pagevec_lru_add(struct pagevec *pvec)
+static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
+{
+ ____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
+}
+
+static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec)
+{
+ ____pagevec_lru_add(pvec, LRU_ACTIVE_ANON);
+}
+
+static inline void __pagevec_lru_add_file(struct pagevec *pvec)
+{
+ ____pagevec_lru_add(pvec, LRU_INACTIVE_FILE);
+}
+
+static inline void __pagevec_lru_add_active_file(struct pagevec *pvec)
+{
+ ____pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
+}
+
+static inline void pagevec_lru_add_file(struct pagevec *pvec)
+{
+ if (pagevec_count(pvec))
+ __pagevec_lru_add_file(pvec);
+}
+
+static inline void pagevec_lru_add_anon(struct pagevec *pvec)
{
if (pagevec_count(pvec))
- __pagevec_lru_add(pvec);
+ __pagevec_lru_add_anon(pvec);
}
#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 6a0d7cdb577..e1f83c5065c 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -1,5 +1,3 @@
-/* $Id: parport.h,v 1.1 1998/05/17 10:57:52 andrea Exp andrea $ */
-
/*
* Any part of this program may be used in documents licensed under
* the GNU Free Documentation License, Version 1.1 or any later version
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 98dc6243a70..03b0b8c3c81 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -51,6 +51,7 @@
#include <linux/kobject.h>
#include <asm/atomic.h>
#include <linux/device.h>
+#include <linux/io.h>
/* Include the ID list */
#include <linux/pci_ids.h>
@@ -64,6 +65,11 @@ struct pci_slot {
struct kobject kobj;
};
+static inline const char *pci_slot_name(const struct pci_slot *slot)
+{
+ return kobject_name(&slot->kobj);
+}
+
/* File state for mmap()s on /proc/bus/pci/X/Y */
enum pci_mmap_state {
pci_mmap_io,
@@ -128,6 +134,11 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
};
+enum pci_irq_reroute_variant {
+ INTEL_IRQ_REROUTE_VARIANT = 1,
+ MAX_IRQ_REROUTE_VARIANTS = 3
+};
+
typedef unsigned short __bitwise pci_bus_flags_t;
enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
@@ -212,8 +223,10 @@ struct pci_dev {
unsigned int no_msi:1; /* device may not use msi */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
+ unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
unsigned int msi_enabled:1;
unsigned int msix_enabled:1;
+ unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
unsigned int is_pcie:1;
pci_dev_flags_t dev_flags;
@@ -347,7 +360,6 @@ struct pci_bus_region {
struct pci_dynids {
spinlock_t lock; /* protects list, index */
struct list_head list; /* for IDs added at runtime */
- unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
};
/* ---------------------------------------------------------------- */
@@ -456,8 +468,8 @@ struct pci_driver {
/**
* PCI_VDEVICE - macro used to describe a specific pci device in short form
- * @vend: the vendor name
- * @dev: the 16 bit PCI Device ID
+ * @vendor: the vendor name
+ * @device: the 16 bit PCI Device ID
*
* This macro is used to create a struct pci_device_id that matches a
* specific PCI device. The subvendor, and subdevice fields will be set
@@ -509,9 +521,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
- const char *name);
+ const char *name,
+ struct hotplug_slot *hotplug);
void pci_destroy_slot(struct pci_slot *slot);
-void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
+void pci_renumber_slot(struct pci_slot *slot, int slot_nr);
int pci_scan_slot(struct pci_bus *bus, int devfn);
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -539,6 +552,13 @@ struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
unsigned int devfn);
#endif /* CONFIG_PCI_LEGACY */
+enum pci_lost_interrupt_reason {
+ PCI_LOST_IRQ_NO_INFORMATION = 0,
+ PCI_LOST_IRQ_DISABLE_MSI,
+ PCI_LOST_IRQ_DISABLE_MSIX,
+ PCI_LOST_IRQ_DISABLE_ACPI,
+};
+enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
int pci_find_capability(struct pci_dev *dev, int cap);
int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
int pci_find_ext_capability(struct pci_dev *dev, int cap);
@@ -626,11 +646,15 @@ int pcix_get_mmrbc(struct pci_dev *dev);
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
+int pci_reset_function(struct pci_dev *dev);
+int pci_execute_reset_function(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
/* ROM control related routines */
+int pci_enable_rom(struct pci_dev *pdev);
+void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
size_t pci_get_rom_size(void __iomem *rom, size_t size);
@@ -643,6 +667,7 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_wake_from_d3(struct pci_dev *dev, bool enable);
pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
int pci_back_from_sleep(struct pci_dev *dev);
@@ -723,7 +748,7 @@ enum pci_dma_burst_strategy {
};
struct msix_entry {
- u16 vector; /* kernel uses to write allocated vector */
+ u32 vector; /* kernel uses to write allocated vector */
u16 entry; /* driver uses to specify entry, OS writes */
};
@@ -1116,5 +1141,20 @@ static inline void pci_mmcfg_early_init(void) { }
static inline void pci_mmcfg_late_init(void) { }
#endif
+#ifdef CONFIG_HAS_IOMEM
+static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+ /*
+ * Make sure the BAR is actually a memory resource, not an IO resource
+ */
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ return ioremap_nocache(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a08cd06b541..a00bd1a0f15 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -142,8 +142,6 @@ struct hotplug_slot_info {
/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
- * @name: the name of the slot being registered. This string must
- * be unique amoung slots registered on this system.
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
* @info: pointer to the &struct hotplug_slot_info for the initial values for
* this slot.
@@ -153,7 +151,6 @@ struct hotplug_slot_info {
* needs.
*/
struct hotplug_slot {
- char *name;
struct hotplug_slot_ops *ops;
struct hotplug_slot_info *info;
void (*release) (struct hotplug_slot *slot);
@@ -165,7 +162,13 @@ struct hotplug_slot {
};
#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
-extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
+static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
+{
+ return pci_slot_name(slot->pci_slot);
+}
+
+extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
+ const char *name);
extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1176f1f177e..b6e69445428 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -587,6 +587,7 @@
#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
#define PCI_DEVICE_ID_MATROX_G400 0x0525
+#define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530
#define PCI_DEVICE_ID_MATROX_G550 0x2527
#define PCI_DEVICE_ID_MATROX_VIA 0x4536
@@ -1943,6 +1944,14 @@
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
+#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
+#define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C
#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
@@ -2295,6 +2304,10 @@
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
+#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
+#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
+#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
@@ -2367,6 +2380,7 @@
#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
@@ -2447,15 +2461,16 @@
#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
+#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
-#define PCI_DEVICE_ID_INTEL_PCH_0 0x3b10
-#define PCI_DEVICE_ID_INTEL_PCH_1 0x3b11
-#define PCI_DEVICE_ID_INTEL_PCH_2 0x3b30
+#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
+#define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_PCH_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 450684f7eaa..e5effd47ed7 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -377,6 +377,7 @@
#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */
#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
+#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
#define PCI_EXP_DEVCTL 8 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
@@ -389,6 +390,7 @@
#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
+#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
#define PCI_EXP_DEVSTA 10 /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */
#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */
@@ -419,6 +421,10 @@
#define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */
#define PCI_EXP_RTCAP 30 /* Root Capabilities */
#define PCI_EXP_RTSTA 32 /* Root Status */
+#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -429,6 +435,7 @@
#define PCI_EXT_CAP_ID_VC 2
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
+#define PCI_EXT_CAP_ID_ARI 14
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -536,5 +543,14 @@
#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
+/* Alternative Routing-ID Interpretation */
+#define PCI_ARI_CAP 0x04 /* ARI Capability Register */
+#define PCI_ARI_CAP_MFVC 0x0001 /* MFVC Function Groups Capability */
+#define PCI_ARI_CAP_ACS 0x0002 /* ACS Function Groups Capability */
+#define PCI_ARI_CAP_NFN(x) (((x) >> 8) & 0xff) /* Next Function Number */
+#define PCI_ARI_CTRL 0x06 /* ARI Control Register */
+#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */
+#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
+#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index bb01f8b92b5..7646637221f 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -1,9 +1,13 @@
#ifndef _LINUX_PFN_H_
#define _LINUX_PFN_H_
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
#endif
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index c9609f9aeda..4157faa857b 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -72,6 +72,7 @@ struct phonetmsg {
} pn_msg_u;
};
#define PN_COMMON_MESSAGE 0xF0
+#define PN_COMMGR 0x10
#define PN_PREFIX 0xE0 /* resource for extended messages */
#define pn_submsg_id pn_msg_u.base.pn_submsg_id
#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 77c4ed60b98..d7e54d98869 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -467,6 +467,8 @@ int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
+int genphy_suspend(struct phy_device *phydev);
+int genphy_resume(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
void phy_prepare_link(struct phy_device *phydev,
diff --git a/include/linux/pid.h b/include/linux/pid.h
index d7e98ff8021..bb206c56d1f 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -147,9 +147,9 @@ pid_t pid_vnr(struct pid *pid);
#define do_each_pid_task(pid, type, task) \
do { \
struct hlist_node *pos___; \
- if (pid != NULL) \
+ if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), pos___, \
- &pid->tasks[type], pids[type].node) {
+ &(pid)->tasks[type], pids[type].node) {
/*
* Both old and new leaders may be attached to
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 1af82c4e17d..d82fe825d62 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -84,12 +84,6 @@ static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
return tsk->nsproxy->pid_ns;
}
-static inline struct task_struct *task_child_reaper(struct task_struct *tsk)
-{
- BUG_ON(tsk != current);
- return tsk->nsproxy->pid_ns->child_reaper;
-}
-
void pidhash_init(void);
void pidmap_init(void);
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 7cf7824df77..e6aa8482ad7 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -394,6 +394,20 @@ enum
#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
+
+/* Cgroup classifier */
+
+enum
+{
+ TCA_CGROUP_UNSPEC,
+ TCA_CGROUP_ACT,
+ TCA_CGROUP_POLICE,
+ TCA_CGROUP_EMATCHES,
+ __TCA_CGROUP_MAX,
+};
+
+#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
+
/* Extended Matches */
struct tcf_ematch_tree_hdr
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 5d921fa91a5..e3f133adba7 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -500,4 +500,20 @@ struct tc_netem_corrupt
#define NETEM_DIST_SCALE 8192
+/* DRR */
+
+enum
+{
+ TCA_DRR_UNSPEC,
+ TCA_DRR_QUANTUM,
+ __TCA_DRR_MAX
+};
+
+#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
+
+struct tc_drr_stats
+{
+ u32 deficit;
+};
+
#endif
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 95ac21ab3a0..4b8cc6a3247 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -37,6 +37,8 @@ extern int platform_add_devices(struct platform_device **, int);
extern struct platform_device *platform_device_register_simple(const char *, int id,
struct resource *, unsigned int);
+extern struct platform_device *platform_device_register_data(struct device *,
+ const char *, int, const void *, size_t);
extern struct platform_device *platform_device_alloc(const char *name, int id);
extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 4dcce54b6d7..42de4003c4e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -419,7 +419,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
#define suspend_report_result(fn, ret) \
do { \
- __suspend_report_result(__FUNCTION__, fn, ret); \
+ __suspend_report_result(__func__, fn, ret); \
} while (0)
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index be764e514e3..ca3c8877302 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -22,9 +22,11 @@ struct pnp_dev;
* Resource Management
*/
#ifdef CONFIG_PNP
-struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int);
+struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned long type,
+ unsigned int num);
#else
-static inline struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned int type, unsigned int num)
+static inline struct resource *pnp_get_resource(struct pnp_dev *dev,
+ unsigned long type, unsigned int num)
{
return NULL;
}
@@ -483,14 +485,4 @@ static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
#endif /* CONFIG_PNP */
-#define pnp_err(format, arg...) printk(KERN_ERR "pnp: " format "\n" , ## arg)
-#define pnp_info(format, arg...) printk(KERN_INFO "pnp: " format "\n" , ## arg)
-#define pnp_warn(format, arg...) printk(KERN_WARNING "pnp: " format "\n" , ## arg)
-
-#ifdef CONFIG_PNP_DEBUG
-#define pnp_dbg(format, arg...) printk(KERN_DEBUG "pnp: " format "\n" , ## arg)
-#else
-#define pnp_dbg(format, arg...) do {} while (0)
-#endif
-
#endif /* _LINUX_PNP_H */
diff --git a/include/linux/poll.h b/include/linux/poll.h
index ef453828877..badd98ab06f 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -114,11 +114,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
-extern int do_select(int n, fd_set_bits *fds, s64 *timeout);
+extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
- s64 *timeout);
+ struct timespec *end_time);
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, s64 *timeout);
+ fd_set __user *exp, struct timespec *end_time);
+
+extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
#endif /* KERNEL */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index a7dd38f30ad..4f71bf4e628 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -45,9 +45,11 @@ struct k_itimer {
int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */
- int it_sigev_signo; /* signo word of sigevent struct */
- sigval_t it_sigev_value; /* value word of sigevent struct */
- struct task_struct *it_process; /* process to send signal to */
+ struct signal_struct *it_signal;
+ union {
+ struct pid *it_pid; /* pid of process to send signal to */
+ struct task_struct *it_process; /* for clock_nanosleep */
+ };
struct sigqueue *sigq; /* signal queue entry. */
union {
struct {
@@ -115,4 +117,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
long clock_nanosleep_restart(struct restart_block *restart_block);
+void update_rlimit_cpu(unsigned long rlim_new);
+
#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ea96ead1d39..f9348cba6dc 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -165,6 +165,12 @@ struct power_supply_info {
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
+#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
+extern int power_supply_is_system_supplied(void);
+#else
+static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
+#endif
+
extern int power_supply_register(struct device *parent,
struct power_supply *psy);
extern void power_supply_unregister(struct power_supply *psy);
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 5ad79198d6f..48d887e3c6e 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -78,4 +78,11 @@
#define PR_GET_SECUREBITS 27
#define PR_SET_SECUREBITS 28
+/*
+ * Get/set the timerslack as used by poll/select/nanosleep
+ * A value of 0 means "use default"
+ */
+#define PR_SET_TIMERSLACK 29
+#define PR_GET_TIMERSLACK 30
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index fb61850d1cf..b8bdb96eff7 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -97,12 +97,9 @@ struct vmcore {
#ifdef CONFIG_PROC_FS
-extern struct proc_dir_entry *proc_root_kcore;
-
extern spinlock_t proc_subdir_lock;
extern void proc_root_init(void);
-extern void proc_misc_init(void);
void proc_flush_task(struct task_struct *task);
struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
@@ -138,9 +135,6 @@ extern struct inode *proc_get_inode(struct super_block *, unsigned int, struct p
extern int proc_readdir(struct file *, void *, filldir_t);
extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
-extern const struct file_operations proc_kcore_operations;
-extern const struct file_operations ppc_htab_operations;
-
extern int pid_ns_prepare_proc(struct pid_namespace *ns);
extern void pid_ns_release_proc(struct pid_namespace *ns);
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 7e7087239af..a0fc32279fc 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -19,10 +19,16 @@ struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
void create_prof_cpu_mask(struct proc_dir_entry *de);
+int create_proc_profile(void);
#else
static inline void create_prof_cpu_mask(struct proc_dir_entry *de)
{
}
+
+static inline int create_proc_profile(void)
+{
+ return 0;
+}
#endif
enum profile_type {
@@ -35,7 +41,8 @@ enum profile_type {
extern int prof_on __read_mostly;
/* init basic kernel profiler */
-void __init profile_init(void);
+int profile_init(void);
+int profile_setup(char *str);
void profile_tick(int type);
/*
@@ -84,9 +91,9 @@ struct pt_regs;
#define prof_on 0
-static inline void profile_init(void)
+static inline int profile_init(void)
{
- return;
+ return 0;
}
static inline void profile_tick(int type)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ea7416c901d..98b93ca4db0 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -94,7 +94,7 @@ extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
extern void __ptrace_unlink(struct task_struct *child);
-extern void ptrace_untrace(struct task_struct *child);
+extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
#define PTRACE_MODE_READ 1
#define PTRACE_MODE_ATTACH 2
/* Returns 0 on success, -errno on denial. */
@@ -314,6 +314,27 @@ static inline void user_enable_block_step(struct task_struct *task)
#define arch_ptrace_stop(code, info) do { } while (0)
#endif
+#ifndef arch_ptrace_untrace
+/*
+ * Do machine-specific work before untracing child.
+ *
+ * This is called for a normal detach as well as from ptrace_exit()
+ * when the tracing task dies.
+ *
+ * Called with write_lock(&tasklist_lock) held.
+ */
+#define arch_ptrace_untrace(task) do { } while (0)
+#endif
+
+#ifndef arch_ptrace_fork
+/*
+ * Do machine-specific work to initialize a new task.
+ *
+ * This is called from copy_process().
+ */
+#define arch_ptrace_fork(child, clone_flags) do { } while (0)
+#endif
+
extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 376a05048bc..40401b55448 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -28,8 +28,6 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
- *
- * Version: $Id: quota.h,v 2.0 1996/11/17 16:48:14 mvw Exp mvw $
*/
#ifndef _LINUX_QUOTA_
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index ca6b9b5c8d5..a558a4c1d35 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -3,9 +3,6 @@
* macros expand to the right source-code.
*
* Author: Marco van Wieringen <mvw@planets.elm.net>
- *
- * Version: $Id: quotaops.h,v 1.2 1998/01/15 16:22:26 ecd Exp $
- *
*/
#ifndef _LINUX_QUOTAOPS_
#define _LINUX_QUOTAOPS_
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
index 7e375111d00..f38b9c586af 100644
--- a/include/linux/raid/linear.h
+++ b/include/linux/raid/linear.h
@@ -5,8 +5,8 @@
struct dev_info {
mdk_rdev_t *rdev;
- sector_t size;
- sector_t offset;
+ sector_t num_sectors;
+ sector_t start_sector;
};
typedef struct dev_info dev_info_t;
@@ -15,9 +15,11 @@ struct linear_private_data
{
struct linear_private_data *prev; /* earlier version */
dev_info_t **hash_table;
- sector_t hash_spacing;
+ sector_t spacing;
sector_t array_sectors;
- int preshift; /* shift before dividing by hash_spacing */
+ int sector_shift; /* shift before dividing
+ * by spacing
+ */
dev_info_t disks[0];
};
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index dc0e3fcb9f2..82bea14cae1 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -19,27 +19,7 @@
#define _MD_H
#include <linux/blkdev.h>
-#include <linux/major.h>
-#include <linux/ioctl.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/hdreg.h>
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
-#include <linux/delay.h>
-#include <net/checksum.h>
-#include <linux/random.h>
-#include <linux/kernel_stat.h>
-#include <asm/io.h>
-#include <linux/completion.h>
-#include <linux/mempool.h>
-#include <linux/list.h>
-#include <linux/reboot.h>
-#include <linux/vmalloc.h>
-#include <linux/blkpg.h>
-#include <linux/bio.h>
/*
* 'md_p.h' holds the 'physical' layout of RAID devices
@@ -74,19 +54,17 @@
extern int mdp_major;
-extern int register_md_personality (struct mdk_personality *p);
-extern int unregister_md_personality (struct mdk_personality *p);
-extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev),
+extern int register_md_personality(struct mdk_personality *p);
+extern int unregister_md_personality(struct mdk_personality *p);
+extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
mddev_t *mddev, const char *name);
-extern void md_unregister_thread (mdk_thread_t *thread);
+extern void md_unregister_thread(mdk_thread_t *thread);
extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_check_recovery(mddev_t *mddev);
extern void md_write_start(mddev_t *mddev, struct bio *bi);
extern void md_write_end(mddev_t *mddev);
-extern void md_handle_safemode(mddev_t *mddev);
extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
-extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev);
-extern void md_unplug_mddev(mddev_t *mddev);
+extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index c200b9a34af..8fc909ef678 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -115,6 +115,9 @@ struct mdk_rdev_s
* in superblock.
*/
struct work_struct del_work; /* used for delayed sysfs removal */
+
+ struct sysfs_dirent *sysfs_state; /* handle for 'state'
+ * sysfs entry */
};
struct mddev_s
@@ -128,7 +131,6 @@ struct mddev_s
#define MD_CHANGE_DEVS 0 /* Some device status has changed */
#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
#define MD_CHANGE_PENDING 2 /* superblock update in progress */
-#define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */
int ro;
@@ -239,6 +241,10 @@ struct mddev_s
sector_t resync_max; /* resync should pause
* when it gets here */
+ struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
+ * file in sysfs.
+ */
+
spinlock_t write_lock;
wait_queue_head_t sb_wait; /* for waiting on superblock updates */
atomic_t pending_writes; /* number of active superblock writes */
diff --git a/include/linux/random.h b/include/linux/random.h
index 36f125c0c60..adbf3bd3c6b 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -8,6 +8,7 @@
#define _LINUX_RANDOM_H
#include <linux/ioctl.h>
+#include <linux/irqnr.h>
/* ioctl()'s for the random number generator */
@@ -44,6 +45,56 @@ struct rand_pool_info {
extern void rand_initialize_irq(int irq);
+struct timer_rand_state;
+#ifndef CONFIG_SPARSE_IRQ
+
+extern struct timer_rand_state *irq_timer_state[];
+
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ if (irq >= nr_irqs)
+ return NULL;
+
+ return irq_timer_state[irq];
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+ if (irq >= nr_irqs)
+ return;
+
+ irq_timer_state[irq] = state;
+}
+
+#else
+
+#include <linux/irq.h>
+static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return NULL;
+
+ return desc->timer_rand_state;
+}
+
+static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return;
+
+ desc->timer_rand_state = state;
+}
+#endif
+
+
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq);
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 18a5b9ba9d4..00044b85645 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -17,11 +17,4 @@ struct ratelimit_state {
struct ratelimit_state name = {interval, burst,}
extern int __ratelimit(struct ratelimit_state *rs);
-
-static inline int ratelimit(void)
-{
- static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
- DEFAULT_RATELIMIT_BURST);
- return __ratelimit(&rs);
-}
#endif
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 5f89b62e698..301dda829e3 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -41,7 +41,7 @@
#include <linux/seqlock.h>
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
-#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
new file mode 100644
index 00000000000..f9ddd03961a
--- /dev/null
+++ b/include/linux/rculist_nulls.h
@@ -0,0 +1,110 @@
+#ifndef _LINUX_RCULIST_NULLS_H
+#define _LINUX_RCULIST_NULLS_H
+
+#ifdef __KERNEL__
+
+/*
+ * RCU-protected list version
+ */
+#include <linux/list_nulls.h>
+#include <linux/rcupdate.h>
+
+/**
+ * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_nulls_unhashed() on the node return true after this. It is
+ * useful for RCU based read lockfree traversal if the writer side
+ * must know if the list entry is still hashed or already unhashed.
+ *
+ * In particular, it means that we can not poison the forward pointers
+ * that may still be used for walking the hash list and we can only
+ * zero the pprev pointer so list_unhashed() will return true after
+ * this.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another
+ * list-mutation primitive, such as hlist_nulls_add_head_rcu() or
+ * hlist_nulls_del_rcu(), running on this same list. However, it is
+ * perfectly legal to run concurrently with the _rcu list-traversal
+ * primitives, such as hlist_nulls_for_each_entry_rcu().
+ */
+static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
+{
+ if (!hlist_nulls_unhashed(n)) {
+ __hlist_nulls_del(n);
+ n->pprev = NULL;
+ }
+}
+
+/**
+ * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_nulls_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry().
+ */
+static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
+{
+ __hlist_nulls_del(n);
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_nulls_add_head_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ rcu_assign_pointer(h->first, n);
+ if (!is_a_nulls(first))
+ first->pprev = &n->next;
+}
+/**
+ * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_nulls_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ *
+ */
+#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = rcu_dereference((head)->first); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference(pos->next))
+
+#endif
+#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 86f1f5e43e3..1168fbcea8d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,15 @@ struct rcu_head {
void (*func)(struct rcu_head *head);
};
-#ifdef CONFIG_CLASSIC_RCU
+#if defined(CONFIG_CLASSIC_RCU)
#include <linux/rcuclassic.h>
-#else /* #ifdef CONFIG_CLASSIC_RCU */
+#elif defined(CONFIG_TREE_RCU)
+#include <linux/rcutree.h>
+#elif defined(CONFIG_PREEMPT_RCU)
#include <linux/rcupreempt.h>
-#endif /* #else #ifdef CONFIG_CLASSIC_RCU */
+#else
+#error "Unknown RCU implementation specified to kernel configuration"
+#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -142,6 +146,7 @@ struct rcu_head {
* on the write-side to insure proper synchronization.
*/
#define rcu_read_lock_sched() preempt_disable()
+#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
/*
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
@@ -149,6 +154,7 @@ struct rcu_head {
* See rcu_read_lock_sched for more information.
*/
#define rcu_read_unlock_sched() preempt_enable()
+#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
new file mode 100644
index 00000000000..d4368b7975c
--- /dev/null
+++ b/include/linux/rcutree.h
@@ -0,0 +1,329 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ */
+
+#ifndef __LINUX_RCUTREE_H
+#define __LINUX_RCUTREE_H
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+
+/*
+ * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
+ * In theory, it should be possible to add more levels straightforwardly.
+ * In practice, this has not been tested, so there is probably some
+ * bug somewhere.
+ */
+#define MAX_RCU_LVLS 3
+#define RCU_FANOUT (CONFIG_RCU_FANOUT)
+#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
+#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
+
+#if NR_CPUS <= RCU_FANOUT
+# define NUM_RCU_LVLS 1
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (NR_CPUS)
+# define NUM_RCU_LVL_2 0
+# define NUM_RCU_LVL_3 0
+#elif NR_CPUS <= RCU_FANOUT_SQ
+# define NUM_RCU_LVLS 2
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
+# define NUM_RCU_LVL_2 (NR_CPUS)
+# define NUM_RCU_LVL_3 0
+#elif NR_CPUS <= RCU_FANOUT_CUBE
+# define NUM_RCU_LVLS 3
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
+# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
+# define NUM_RCU_LVL_3 NR_CPUS
+#else
+# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+#endif /* #if (NR_CPUS) <= RCU_FANOUT */
+
+#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
+
+/*
+ * Dynticks per-CPU state.
+ */
+struct rcu_dynticks {
+ int dynticks_nesting; /* Track nesting level, sort of. */
+ int dynticks; /* Even value for dynticks-idle, else odd. */
+ int dynticks_nmi; /* Even value for either dynticks-idle or */
+ /* not in nmi handler, else odd. So this */
+ /* remains even for nmi from irq handler. */
+};
+
+/*
+ * Definition for node within the RCU grace-period-detection hierarchy.
+ */
+struct rcu_node {
+ spinlock_t lock;
+ unsigned long qsmask; /* CPUs or groups that need to switch in */
+ /* order for current grace period to proceed.*/
+ unsigned long qsmaskinit;
+ /* Per-GP initialization for qsmask. */
+ unsigned long grpmask; /* Mask to apply to parent qsmask. */
+ int grplo; /* lowest-numbered CPU or group here. */
+ int grphi; /* highest-numbered CPU or group here. */
+ u8 grpnum; /* CPU/group number for next level up. */
+ u8 level; /* root is at level 0. */
+ struct rcu_node *parent;
+} ____cacheline_internodealigned_in_smp;
+
+/* Index values for nxttail array in struct rcu_data. */
+#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
+#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
+#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
+#define RCU_NEXT_TAIL 3
+#define RCU_NEXT_SIZE 4
+
+/* Per-CPU data for read-copy update. */
+struct rcu_data {
+ /* 1) quiescent-state and grace-period handling : */
+ long completed; /* Track rsp->completed gp number */
+ /* in order to detect GP end. */
+ long gpnum; /* Highest gp number that this CPU */
+ /* is aware of having started. */
+ long passed_quiesc_completed;
+ /* Value of completed at time of qs. */
+ bool passed_quiesc; /* User-mode/idle loop etc. */
+ bool qs_pending; /* Core waits for quiesc state. */
+ bool beenonline; /* CPU online at least once. */
+ struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
+ unsigned long grpmask; /* Mask to apply to leaf qsmask. */
+
+ /* 2) batch handling */
+ /*
+ * If nxtlist is not NULL, it is partitioned as follows.
+ * Any of the partitions might be empty, in which case the
+ * pointer to that partition will be equal to the pointer for
+ * the following partition. When the list is empty, all of
+ * the nxttail elements point to nxtlist, which is NULL.
+ *
+ * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
+ * Entries that might have arrived after current GP ended
+ * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
+ * Entries known to have arrived before current GP ended
+ * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
+ * Entries that batch # <= ->completed - 1: waiting for current GP
+ * [nxtlist, *nxttail[RCU_DONE_TAIL]):
+ * Entries that batch # <= ->completed
+ * The grace period for these entries has completed, and
+ * the other grace-period-completed entries may be moved
+ * here temporarily in rcu_process_callbacks().
+ */
+ struct rcu_head *nxtlist;
+ struct rcu_head **nxttail[RCU_NEXT_SIZE];
+ long qlen; /* # of queued callbacks */
+ long blimit; /* Upper limit on a processed batch */
+
+#ifdef CONFIG_NO_HZ
+ /* 3) dynticks interface. */
+ struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
+ int dynticks_snap; /* Per-GP tracking for dynticks. */
+ int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
+#endif /* #ifdef CONFIG_NO_HZ */
+
+ /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
+#ifdef CONFIG_NO_HZ
+ unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
+#endif /* #ifdef CONFIG_NO_HZ */
+ unsigned long offline_fqs; /* Kicked due to being offline. */
+ unsigned long resched_ipi; /* Sent a resched IPI. */
+
+ /* 5) state to allow this CPU to force_quiescent_state on others */
+ long n_rcu_pending; /* rcu_pending() calls since boot. */
+ long n_rcu_pending_force_qs; /* when to force quiescent states. */
+
+ int cpu;
+};
+
+/* Values for signaled field in struct rcu_state. */
+#define RCU_GP_INIT 0 /* Grace period being initialized. */
+#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
+#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
+#ifdef CONFIG_NO_HZ
+#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
+#else /* #ifdef CONFIG_NO_HZ */
+#define RCU_SIGNAL_INIT RCU_FORCE_QS
+#endif /* #else #ifdef CONFIG_NO_HZ */
+
+#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
+#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
+#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
+ /* to take at least one */
+ /* scheduling clock irq */
+ /* before ratting on them. */
+
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/*
+ * RCU global state, including node hierarchy. This hierarchy is
+ * represented in "heap" form in a dense array. The root (first level)
+ * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
+ * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
+ * and the third level in ->node[m+1] and following (->node[m+1] referenced
+ * by ->level[2]). The number of levels is determined by the number of
+ * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
+ * consisting of a single rcu_node.
+ */
+struct rcu_state {
+ struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
+ struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
+ u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
+ u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
+ struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
+
+ /* The following fields are guarded by the root rcu_node's lock. */
+
+ u8 signaled ____cacheline_internodealigned_in_smp;
+ /* Force QS state. */
+ long gpnum; /* Current gp number. */
+ long completed; /* # of last completed gp. */
+ spinlock_t onofflock; /* exclude on/offline and */
+ /* starting new GP. */
+ spinlock_t fqslock; /* Only one task forcing */
+ /* quiescent states. */
+ unsigned long jiffies_force_qs; /* Time at which to invoke */
+ /* force_quiescent_state(). */
+ unsigned long n_force_qs; /* Number of calls to */
+ /* force_quiescent_state(). */
+ unsigned long n_force_qs_lh; /* ~Number of calls leaving */
+ /* due to lock unavailable. */
+ unsigned long n_force_qs_ngp; /* Number of calls leaving */
+ /* due to no GP active. */
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ unsigned long gp_start; /* Time at which GP started, */
+ /* but in jiffies. */
+ unsigned long jiffies_stall; /* Time at which to check */
+ /* for CPU stalls. */
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+#ifdef CONFIG_NO_HZ
+ long dynticks_completed; /* Value of completed @ snap. */
+#endif /* #ifdef CONFIG_NO_HZ */
+};
+
+extern struct rcu_state rcu_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_data);
+
+extern struct rcu_state rcu_bh_state;
+DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
+
+/*
+ * Increment the quiescent state counter.
+ * The counter is a bit degenerated: We do not need to know
+ * how many quiescent states passed, just if there was at least
+ * one since the start of the grace period. Thus just a flag.
+ */
+static inline void rcu_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+static inline void rcu_bh_qsctr_inc(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+ rdp->passed_quiesc = 1;
+ rdp->passed_quiesc_completed = rdp->completed;
+}
+
+extern int rcu_pending(int cpu);
+extern int rcu_needs_cpu(int cpu);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern struct lockdep_map rcu_lock_map;
+# define rcu_read_acquire() \
+ lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
+# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
+#else
+# define rcu_read_acquire() do { } while (0)
+# define rcu_read_release() do { } while (0)
+#endif
+
+static inline void __rcu_read_lock(void)
+{
+ preempt_disable();
+ __acquire(RCU);
+ rcu_read_acquire();
+}
+static inline void __rcu_read_unlock(void)
+{
+ rcu_read_release();
+ __release(RCU);
+ preempt_enable();
+}
+static inline void __rcu_read_lock_bh(void)
+{
+ local_bh_disable();
+ __acquire(RCU_BH);
+ rcu_read_acquire();
+}
+static inline void __rcu_read_unlock_bh(void)
+{
+ rcu_read_release();
+ __release(RCU_BH);
+ local_bh_enable();
+}
+
+#define __synchronize_sched() synchronize_rcu()
+
+#define call_rcu_sched(head, func) call_rcu(head, func)
+
+static inline void rcu_init_sched(void)
+{
+}
+
+extern void __rcu_init(void);
+extern void rcu_check_callbacks(int cpu, int user);
+extern void rcu_restart_cpu(int cpu);
+
+extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
+
+#ifdef CONFIG_NO_HZ
+void rcu_enter_nohz(void);
+void rcu_exit_nohz(void);
+#else /* CONFIG_NO_HZ */
+static inline void rcu_enter_nohz(void)
+{
+}
+static inline void rcu_exit_nohz(void)
+{
+}
+#endif /* CONFIG_NO_HZ */
+
+#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index e9963af16cd..bc5114d35e9 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -87,7 +87,7 @@ void reiserfs_warning(struct super_block *s, const char *fmt, ...);
if( !( cond ) ) \
reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \
__FILE__ ":%i:%s: " format "\n", \
- in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __FUNCTION__ , ##args )
+ in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __func__ , ##args )
#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 315517e8bfa..bda6b562a1e 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -178,6 +178,7 @@ struct reiserfs_journal {
struct reiserfs_journal_cnode *j_first; /* oldest journal block. start here for traverse */
struct block_device *j_dev_bd;
+ fmode_t j_dev_mode;
int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
unsigned long j_state;
diff --git a/include/linux/resource.h b/include/linux/resource.h
index aaa423a6f3d..40fc7e62608 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -59,10 +59,10 @@ struct rlimit {
#define _STK_LIM (8*1024*1024)
/*
- * GPG wants 32kB of mlocked memory, to make sure pass phrases
+ * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
* and other sensitive information are never written to disk.
*/
-#define MLOCK_LIMIT (8 * PAGE_SIZE)
+#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
/*
* Due to binary compatibility, the actual resource numbers
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 4cd64b0d982..164332cbb77 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -108,6 +108,7 @@ struct rfkill {
struct device dev;
struct list_head node;
+ enum rfkill_state state_for_resume;
};
#define to_rfkill(d) container_of(d, struct rfkill, dev)
@@ -148,11 +149,4 @@ static inline char *rfkill_get_led_name(struct rfkill *rfkill)
#endif
}
-/* rfkill notification chain */
-#define RFKILL_STATE_CHANGED 0x0001 /* state of a normal rfkill
- switch has changed */
-
-int register_rfkill_notifier(struct notifier_block *nb);
-int unregister_rfkill_notifier(struct notifier_block *nb);
-
#endif /* RFKILL_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
new file mode 100644
index 00000000000..b3b35966008
--- /dev/null
+++ b/include/linux/ring_buffer.h
@@ -0,0 +1,140 @@
+#ifndef _LINUX_RING_BUFFER_H
+#define _LINUX_RING_BUFFER_H
+
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+
+struct ring_buffer;
+struct ring_buffer_iter;
+
+/*
+ * Don't reference this struct directly, use functions below.
+ */
+struct ring_buffer_event {
+ u32 type:2, len:3, time_delta:27;
+ u32 array[];
+};
+
+/**
+ * enum ring_buffer_type - internal ring buffer types
+ *
+ * @RINGBUF_TYPE_PADDING: Left over page padding
+ * array is ignored
+ * size is variable depending on how much
+ * padding is needed
+ *
+ * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
+ * array[0] = time delta (28 .. 59)
+ * size = 8 bytes
+ *
+ * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
+ * array[0] = tv_nsec
+ * array[1..2] = tv_sec
+ * size = 16 bytes
+ *
+ * @RINGBUF_TYPE_DATA: Data record
+ * If len is zero:
+ * array[0] holds the actual length
+ * array[1..(length+3)/4] holds data
+ * size = 4 + 4 + length (bytes)
+ * else
+ * length = len << 2
+ * array[0..(length+3)/4-1] holds data
+ * size = 4 + length (bytes)
+ */
+enum ring_buffer_type {
+ RINGBUF_TYPE_PADDING,
+ RINGBUF_TYPE_TIME_EXTEND,
+ /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
+ RINGBUF_TYPE_TIME_STAMP,
+ RINGBUF_TYPE_DATA,
+};
+
+unsigned ring_buffer_event_length(struct ring_buffer_event *event);
+void *ring_buffer_event_data(struct ring_buffer_event *event);
+
+/**
+ * ring_buffer_event_time_delta - return the delta timestamp of the event
+ * @event: the event to get the delta timestamp of
+ *
+ * The delta timestamp is the 27 bit timestamp since the last event.
+ */
+static inline unsigned
+ring_buffer_event_time_delta(struct ring_buffer_event *event)
+{
+ return event->time_delta;
+}
+
+/*
+ * size is in bytes for each per CPU buffer.
+ */
+struct ring_buffer *
+ring_buffer_alloc(unsigned long size, unsigned flags);
+void ring_buffer_free(struct ring_buffer *buffer);
+
+int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
+
+struct ring_buffer_event *
+ring_buffer_lock_reserve(struct ring_buffer *buffer,
+ unsigned long length,
+ unsigned long *flags);
+int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags);
+int ring_buffer_write(struct ring_buffer *buffer,
+ unsigned long length, void *data);
+
+struct ring_buffer_event *
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
+struct ring_buffer_event *
+ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
+
+struct ring_buffer_iter *
+ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
+void ring_buffer_read_finish(struct ring_buffer_iter *iter);
+
+struct ring_buffer_event *
+ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
+struct ring_buffer_event *
+ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
+void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
+int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
+
+unsigned long ring_buffer_size(struct ring_buffer *buffer);
+
+void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
+void ring_buffer_reset(struct ring_buffer *buffer);
+
+int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
+ struct ring_buffer *buffer_b, int cpu);
+
+int ring_buffer_empty(struct ring_buffer *buffer);
+int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
+
+void ring_buffer_record_disable(struct ring_buffer *buffer);
+void ring_buffer_record_enable(struct ring_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
+
+unsigned long ring_buffer_entries(struct ring_buffer *buffer);
+unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
+unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
+
+u64 ring_buffer_time_stamp(int cpu);
+void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
+
+void tracing_on(void);
+void tracing_off(void);
+void tracing_off_permanent(void);
+
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
+void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
+int ring_buffer_read_page(struct ring_buffer *buffer,
+ void **data_page, int cpu, int full);
+
+enum ring_buffer_flags {
+ RB_FL_OVERWRITE = 1 << 0,
+};
+
+#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 90987b7bcc1..32c0547ffaf 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -427,9 +427,9 @@ void rio_dev_put(struct rio_dev *);
* Get the unique RIO device identifier. Returns the device
* identifier string.
*/
-static inline char *rio_name(struct rio_dev *rdev)
+static inline const char *rio_name(struct rio_dev *rdev)
{
- return rdev->dev.bus_id;
+ return dev_name(&rdev->dev);
}
/**
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fed6f5e0b41..89f0564b10c 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -39,18 +39,6 @@ struct anon_vma {
#ifdef CONFIG_MMU
-extern struct kmem_cache *anon_vma_cachep;
-
-static inline struct anon_vma *anon_vma_alloc(void)
-{
- return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
-}
-
-static inline void anon_vma_free(struct anon_vma *anon_vma)
-{
- kmem_cache_free(anon_vma_cachep, anon_vma);
-}
-
static inline void anon_vma_lock(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
@@ -75,6 +63,9 @@ void anon_vma_unlink(struct vm_area_struct *);
void anon_vma_link(struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);
+extern struct anon_vma *page_lock_anon_vma(struct page *page);
+extern void page_unlock_anon_vma(struct anon_vma *anon_vma);
+
/*
* rmap interfaces called when adding or removing pte of page
*/
@@ -117,6 +108,19 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*/
int page_mkclean(struct page *);
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * called in munlock()/munmap() path to check for other vmas holding
+ * the page mlocked.
+ */
+int try_to_munlock(struct page *);
+#else
+static inline int try_to_munlock(struct page *page)
+{
+ return 0; /* a.k.a. SWAP_SUCCESS */
+}
+#endif
+
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
@@ -140,5 +144,6 @@ static inline int page_mkclean(struct page *page)
#define SWAP_SUCCESS 0
#define SWAP_AGAIN 1
#define SWAP_FAIL 2
+#define SWAP_MLOCK 3
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 382bb795116..f19b00b7d53 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -54,7 +54,7 @@ struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__)
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2b3d51c6ec9..e88f7058b3a 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -107,6 +107,11 @@ enum {
RTM_GETADDRLABEL,
#define RTM_GETADDRLABEL RTM_GETADDRLABEL
+ RTM_GETDCB = 78,
+#define RTM_GETDCB RTM_GETDCB
+ RTM_SETDCB,
+#define RTM_SETDCB RTM_SETDCB
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1a7e8461db5..bd5ff78798c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio;
+struct bts_tracer;
/*
* List of flags we want to share for kernel threads,
@@ -247,6 +248,7 @@ extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
extern int runqueue_is_locked(void);
+extern void task_rq_unlock_wait(struct task_struct *p);
extern cpumask_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -258,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
}
#endif
-extern unsigned long rt_needs_cpu(int cpu);
-
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
@@ -287,7 +287,6 @@ extern void trap_init(void);
extern void account_process_tick(struct task_struct *task, int user);
extern void update_process_times(int user);
extern void scheduler_tick(void);
-extern void hrtick_resched(void);
extern void sched_show_task(struct task_struct *p);
@@ -403,12 +402,21 @@ extern int get_dumpable(struct mm_struct *mm);
#define MMF_DUMP_MAPPED_PRIVATE 4
#define MMF_DUMP_MAPPED_SHARED 5
#define MMF_DUMP_ELF_HEADERS 6
+#define MMF_DUMP_HUGETLB_PRIVATE 7
+#define MMF_DUMP_HUGETLB_SHARED 8
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
-#define MMF_DUMP_FILTER_BITS 5
+#define MMF_DUMP_FILTER_BITS 7
#define MMF_DUMP_FILTER_MASK \
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
- ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
+ ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
+ (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
+
+#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
+# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
+#else
+# define MMF_DUMP_MASK_DEFAULT_ELF 0
+#endif
struct sighand_struct {
atomic_t count;
@@ -425,6 +433,39 @@ struct pacct_struct {
unsigned long ac_minflt, ac_majflt;
};
+/**
+ * struct task_cputime - collected CPU time counts
+ * @utime: time spent in user mode, in &cputime_t units
+ * @stime: time spent in kernel mode, in &cputime_t units
+ * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
+ *
+ * This structure groups together three kinds of CPU time that are
+ * tracked for threads and thread groups. Most things considering
+ * CPU time want to group these counts together and treat all three
+ * of them in parallel.
+ */
+struct task_cputime {
+ cputime_t utime;
+ cputime_t stime;
+ unsigned long long sum_exec_runtime;
+};
+/* Alternate field names when used to cache expirations. */
+#define prof_exp stime
+#define virt_exp utime
+#define sched_exp sum_exec_runtime
+
+/**
+ * struct thread_group_cputime - thread group interval timer counts
+ * @totals: thread group interval timers; substructure for
+ * uniprocessor kernel, per-cpu for SMP kernel.
+ *
+ * This structure contains the version of task_cputime, above, that is
+ * used for thread group CPU clock calculations.
+ */
+struct thread_group_cputime {
+ struct task_cputime *totals;
+};
+
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
@@ -470,6 +511,17 @@ struct signal_struct {
cputime_t it_prof_expires, it_virt_expires;
cputime_t it_prof_incr, it_virt_incr;
+ /*
+ * Thread group totals for process CPU clocks.
+ * See thread_group_cputime(), et al, for details.
+ */
+ struct thread_group_cputime cputime;
+
+ /* Earliest-expiration cache. */
+ struct task_cputime cputime_expires;
+
+ struct list_head cpu_timers[3];
+
/* job control IDs */
/*
@@ -500,7 +552,7 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
- cputime_t utime, stime, cutime, cstime;
+ cputime_t cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -509,14 +561,6 @@ struct signal_struct {
struct task_io_accounting ioac;
/*
- * Cumulative ns of scheduled CPU time for dead threads in the
- * group, not including a zombie group leader. (This only differs
- * from jiffies_to_ns(utime + stime) if sched_clock uses something
- * other than jiffies.)
- */
- unsigned long long sum_sched_runtime;
-
- /*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
* to get both rlim_cur and rlim_max atomically, and either one
@@ -527,14 +571,6 @@ struct signal_struct {
*/
struct rlimit rlim[RLIM_NLIMITS];
- struct list_head cpu_timers[3];
-
- /* keep the process-shared keyrings here so that they do the right
- * thing in threads created with CLONE_THREAD */
-#ifdef CONFIG_KEYS
- struct key *session_keyring; /* keyring inherited over fork */
- struct key *process_keyring; /* keyring private to this process */
-#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pacct_struct pacct; /* per-process accounting information */
#endif
@@ -587,6 +623,10 @@ struct user_struct {
atomic_t inotify_watches; /* How many inotify watches does this user have? */
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
#endif
+#ifdef CONFIG_EPOLL
+ atomic_t epoll_devs; /* The number of epoll descriptors currently open */
+ atomic_t epoll_watches; /* The number of file descriptors currently watched */
+#endif
#ifdef CONFIG_POSIX_MQUEUE
/* protected by mq_lock */
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
@@ -601,6 +641,7 @@ struct user_struct {
/* Hash table maintenance information */
struct hlist_node uidhash_node;
uid_t uid;
+ struct user_namespace *user_ns;
#ifdef CONFIG_USER_SCHED
struct task_group *tg;
@@ -618,6 +659,7 @@ extern struct user_struct *find_user(uid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
+
struct backing_dev_info;
struct reclaim_state;
@@ -625,8 +667,7 @@ struct reclaim_state;
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
- unsigned long long cpu_time, /* time spent on the cpu */
- run_delay; /* time spent waiting on a runqueue */
+ unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
@@ -638,10 +679,6 @@ struct sched_info {
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
-#ifdef CONFIG_SCHEDSTATS
-extern const struct file_operations proc_schedstat_operations;
-#endif /* CONFIG_SCHEDSTATS */
-
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
spinlock_t lock;
@@ -845,38 +882,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
#endif /* !CONFIG_SMP */
struct io_context; /* See blkdev.h */
-#define NGROUPS_SMALL 32
-#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
-struct group_info {
- int ngroups;
- atomic_t usage;
- gid_t small_block[NGROUPS_SMALL];
- int nblocks;
- gid_t *blocks[0];
-};
-
-/*
- * get_group_info() must be called with the owning task locked (via task_lock())
- * when task != current. The reason being that the vast majority of callers are
- * looking at current->group_info, which can not be changed except by the
- * current task. Changing current->group_info requires the task lock, too.
- */
-#define get_group_info(group_info) do { \
- atomic_inc(&(group_info)->usage); \
-} while (0)
-#define put_group_info(group_info) do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
- groups_free(group_info); \
-} while (0)
-
-extern struct group_info *groups_alloc(int gidsetsize);
-extern void groups_free(struct group_info *group_info);
-extern int set_current_groups(struct group_info *group_info);
-extern int groups_search(struct group_info *group_info, gid_t grp);
-/* access the groups "array" with this macro */
-#define GROUP_AT(gi, i) \
- ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
extern void prefetch_stack(struct task_struct *t);
@@ -898,7 +904,6 @@ struct sched_class {
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);
- int (*select_task_rq)(struct task_struct *p, int sync);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
@@ -906,6 +911,8 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
#ifdef CONFIG_SMP
+ int (*select_task_rq)(struct task_struct *p, int sync);
+
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,
struct sched_domain *sd, enum cpu_idle_type idle,
@@ -917,16 +924,17 @@ struct sched_class {
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
-#endif
- void (*set_curr_task) (struct rq *rq);
- void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
- void (*task_new) (struct rq *rq, struct task_struct *p);
void (*set_cpus_allowed)(struct task_struct *p,
const cpumask_t *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
+#endif
+
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
@@ -1119,6 +1127,19 @@ struct task_struct {
struct list_head ptraced;
struct list_head ptrace_entry;
+#ifdef CONFIG_X86_PTRACE_BTS
+ /*
+ * This is the tracer handle for the ptrace BTS extension.
+ * This field actually belongs to the ptracer task.
+ */
+ struct bts_tracer *bts;
+ /*
+ * The buffer to hold the BTS data.
+ */
+ void *bts_buffer;
+ size_t bts_size;
+#endif /* CONFIG_X86_PTRACE_BTS */
+
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
@@ -1136,22 +1157,16 @@ struct task_struct {
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
- cputime_t it_prof_expires, it_virt_expires;
- unsigned long long it_sched_expires;
+ struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
/* process credentials */
- uid_t uid,euid,suid,fsuid;
- gid_t gid,egid,sgid,fsgid;
- struct group_info *group_info;
- kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
- struct user_struct *user;
- unsigned securebits;
-#ifdef CONFIG_KEYS
- unsigned char jit_keyring; /* default keyring to attach requested keys to */
- struct key *request_key_auth; /* assumed request_key authority */
- struct key *thread_keyring; /* keyring private to this thread */
-#endif
+ const struct cred *real_cred; /* objective and real subjective task
+ * credentials (COW) */
+ const struct cred *cred; /* effective (overridable) subjective task
+ * credentials (COW) */
+ struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
+
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
@@ -1188,9 +1203,6 @@ struct task_struct {
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
-#ifdef CONFIG_SECURITY
- void *security;
-#endif
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
uid_t loginuid;
@@ -1303,6 +1315,31 @@ struct task_struct {
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
#endif
+ /*
+ * time slack values; these are used to round up poll() and
+ * select() etc timeout values. These are in nanoseconds.
+ */
+ unsigned long timer_slack_ns;
+ unsigned long default_timer_slack_ns;
+
+ struct list_head *scm_work_list;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Index of current stored adress in ret_stack */
+ int curr_ret_stack;
+ /* Stack of return addresses for return function tracing */
+ struct ftrace_ret_stack *ret_stack;
+ /*
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+ atomic_t trace_overrun;
+ /* Pause for the tracing */
+ atomic_t tracing_graph_pause;
+#endif
+#ifdef CONFIG_TRACING
+ /* state flags for use by tracers */
+ unsigned long trace;
+#endif
};
/*
@@ -1587,6 +1624,7 @@ extern unsigned long long cpu_clock(int cpu);
extern unsigned long long
task_sched_runtime(struct task_struct *task);
+extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
@@ -1621,6 +1659,7 @@ extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_shares_ratelimit;
+extern unsigned int sysctl_sched_shares_thresh;
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
@@ -1720,7 +1759,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
return u;
}
extern void free_uid(struct user_struct *);
-extern void switch_uid(struct user_struct *);
extern void release_uids(struct user_namespace *ns);
#include <asm/current.h>
@@ -1739,9 +1777,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
extern void sched_fork(struct task_struct *p, int clone_flags);
extern void sched_dead(struct task_struct *p);
-extern int in_group_p(gid_t);
-extern int in_egroup_p(gid_t);
-
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
@@ -1873,6 +1908,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
+extern bool is_single_threaded(struct task_struct *);
+
/*
* Careful: do_each_thread/while_each_thread is a double loop so
* 'break' will not work as expected - use goto instead.
@@ -2097,6 +2134,30 @@ static inline int spin_needbreak(spinlock_t *lock)
}
/*
+ * Thread group CPU time accounting.
+ */
+
+extern int thread_group_cputime_alloc(struct task_struct *);
+extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
+
+static inline void thread_group_cputime_init(struct signal_struct *sig)
+{
+ sig->cputime.totals = NULL;
+}
+
+static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
+{
+ if (curr->signal->cputime.totals)
+ return 0;
+ return thread_group_cputime_alloc(curr);
+}
+
+static inline void thread_group_cputime_free(struct signal_struct *sig)
+{
+ free_percpu(sig->cputime.totals);
+}
+
+/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
* This is required every time the blocked sigset_t changes.
@@ -2158,6 +2219,7 @@ extern void normalize_rt_tasks(void);
extern struct task_group init_task_group;
#ifdef CONFIG_USER_SCHED
extern struct task_group root_task_group;
+extern void set_tg_uid(struct user_struct *user);
#endif
extern struct task_group *sched_create_group(struct task_group *parent);
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index 92f09bdf117..d2c5ed845bc 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -32,7 +32,7 @@
setting is locked or not. A setting which is locked cannot be
changed from user-level. */
#define issecure_mask(X) (1 << (X))
-#define issecure(X) (issecure_mask(X) & current->securebits)
+#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
issecure_mask(SECURE_NO_SETUID_FIXUP) | \
diff --git a/include/linux/security.h b/include/linux/security.h
index f5c4a51eb42..3416cb85e77 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -37,6 +37,10 @@
/* Maximum number of letters for an LSM name string */
#define SECURITY_NAME_MAX 10
+/* If capable should audit the security request */
+#define SECURITY_CAP_NOAUDIT 0
+#define SECURITY_CAP_AUDIT 1
+
struct ctl_table;
struct audit_krule;
@@ -44,25 +48,25 @@ struct audit_krule;
* These functions are in security/capability.c and are used
* as the default capabilities functions
*/
-extern int cap_capable(struct task_struct *tsk, int cap);
+extern int cap_capable(struct task_struct *tsk, int cap, int audit);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
-extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
-extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
-extern int cap_bprm_set_security(struct linux_binprm *bprm);
-extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
+extern int cap_capset(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
+extern int cap_bprm_set_creds(struct linux_binprm *bprm);
extern int cap_bprm_secureexec(struct linux_binprm *bprm);
extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
extern int cap_inode_need_killpriv(struct dentry *dentry);
extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
-extern void cap_task_reparent_to_init(struct task_struct *p);
+extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5, long *rc_p);
+ unsigned long arg4, unsigned long arg5);
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
@@ -105,7 +109,7 @@ extern unsigned long mmap_min_addr;
struct sched_param;
struct request_sock;
-/* bprm_apply_creds unsafe reasons */
+/* bprm->unsafe reasons */
#define LSM_UNSAFE_SHARE 1
#define LSM_UNSAFE_PTRACE 2
#define LSM_UNSAFE_PTRACE_CAP 4
@@ -149,36 +153,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
*
* Security hooks for program execution operations.
*
- * @bprm_alloc_security:
- * Allocate and attach a security structure to the @bprm->security field.
- * The security field is initialized to NULL when the bprm structure is
- * allocated.
- * @bprm contains the linux_binprm structure to be modified.
- * Return 0 if operation was successful.
- * @bprm_free_security:
- * @bprm contains the linux_binprm structure to be modified.
- * Deallocate and clear the @bprm->security field.
- * @bprm_apply_creds:
- * Compute and set the security attributes of a process being transformed
- * by an execve operation based on the old attributes (current->security)
- * and the information saved in @bprm->security by the set_security hook.
- * Since this hook function (and its caller) are void, this hook can not
- * return an error. However, it can leave the security attributes of the
- * process unchanged if an access failure occurs at this point.
- * bprm_apply_creds is called under task_lock. @unsafe indicates various
- * reasons why it may be unsafe to change security state.
- * @bprm contains the linux_binprm structure.
- * @bprm_post_apply_creds:
- * Runs after bprm_apply_creds with the task_lock dropped, so that
- * functions which cannot be called safely under the task_lock can
- * be used. This hook is a good place to perform state changes on
- * the process such as closing open file descriptors to which access
- * is no longer granted if the attributes were changed.
- * Note that a security module might need to save state between
- * bprm_apply_creds and bprm_post_apply_creds to store the decision
- * on whether the process may proceed.
- * @bprm contains the linux_binprm structure.
- * @bprm_set_security:
+ * @bprm_set_creds:
* Save security information in the bprm->security field, typically based
* on information about the bprm->file, for later use by the apply_creds
* hook. This hook may also optionally check permissions (e.g. for
@@ -191,15 +166,30 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
* @bprm_check_security:
- * This hook mediates the point when a search for a binary handler will
- * begin. It allows a check the @bprm->security value which is set in
- * the preceding set_security call. The primary difference from
- * set_security is that the argv list and envp list are reliably
- * available in @bprm. This hook may be called multiple times
- * during a single execve; and in each pass set_security is called
- * first.
+ * This hook mediates the point when a search for a binary handler will
+ * begin. It allows a check the @bprm->security value which is set in the
+ * preceding set_creds call. The primary difference from set_creds is
+ * that the argv list and envp list are reliably available in @bprm. This
+ * hook may be called multiple times during a single execve; and in each
+ * pass set_creds is called first.
* @bprm contains the linux_binprm structure.
* Return 0 if the hook is successful and permission is granted.
+ * @bprm_committing_creds:
+ * Prepare to install the new security attributes of a process being
+ * transformed by an execve operation, based on the old credentials
+ * pointed to by @current->cred and the information set in @bprm->cred by
+ * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
+ * This hook is a good place to perform state changes on the process such
+ * as closing open file descriptors to which access will no longer be
+ * granted when the attributes are changed. This is called immediately
+ * before commit_creds().
+ * @bprm_committed_creds:
+ * Tidy up after the installation of the new security attributes of a
+ * process being transformed by an execve operation. The new credentials
+ * have, by this point, been set to @current->cred. @bprm points to the
+ * linux_binprm structure. This hook is a good place to perform state
+ * changes on the process such as clearing out non-inheritable signal
+ * state. This is called immediately after commit_creds().
* @bprm_secureexec:
* Return a boolean value (0 or 1) indicating whether a "secure exec"
* is required. The flag is passed in the auxiliary table
@@ -585,15 +575,31 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* manual page for definitions of the @clone_flags.
* @clone_flags contains the flags indicating what should be shared.
* Return 0 if permission is granted.
- * @task_alloc_security:
- * @p contains the task_struct for child process.
- * Allocate and attach a security structure to the p->security field. The
- * security field is initialized to NULL when the task structure is
- * allocated.
- * Return 0 if operation was successful.
- * @task_free_security:
- * @p contains the task_struct for process.
- * Deallocate and clear the p->security field.
+ * @cred_free:
+ * @cred points to the credentials.
+ * Deallocate and clear the cred->security field in a set of credentials.
+ * @cred_prepare:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Prepare a new set of credentials by copying the data from the old set.
+ * @cred_commit:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * Install a new set of credentials.
+ * @kernel_act_as:
+ * Set the credentials for a kernel service to act as (subjective context).
+ * @new points to the credentials to be modified.
+ * @secid specifies the security ID to be set
+ * The current task must be the one that nominated @secid.
+ * Return 0 if successful.
+ * @kernel_create_files_as:
+ * Set the file creation context in a set of credentials to be the same as
+ * the objective context of the specified inode.
+ * @new points to the credentials to be modified.
+ * @inode points to the inode to use as a reference.
+ * The current task must be the one that nominated @inode.
+ * Return 0 if successful.
* @task_setuid:
* Check permission before setting one or more of the user identity
* attributes of the current process. The @flags parameter indicates
@@ -606,15 +612,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @id2 contains a uid.
* @flags contains one of the LSM_SETID_* values.
* Return 0 if permission is granted.
- * @task_post_setuid:
+ * @task_fix_setuid:
* Update the module's state after setting one or more of the user
* identity attributes of the current process. The @flags parameter
* indicates which of the set*uid system calls invoked this hook. If
- * @flags is LSM_SETID_FS, then @old_ruid is the old fs uid and the other
- * parameters are not used.
- * @old_ruid contains the old real uid (or fs uid if LSM_SETID_FS).
- * @old_euid contains the old effective uid (or -1 if LSM_SETID_FS).
- * @old_suid contains the old saved uid (or -1 if LSM_SETID_FS).
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaces
* @flags contains one of the LSM_SETID_* values.
* Return 0 on success.
* @task_setgid:
@@ -717,13 +721,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @arg3 contains a argument.
* @arg4 contains a argument.
* @arg5 contains a argument.
- * @rc_p contains a pointer to communicate back the forced return code
- * Return 0 if permission is granted, and non-zero if the security module
- * has taken responsibility (setting *rc_p) for the prctl call.
- * @task_reparent_to_init:
- * Set the security attributes in @p->security for a kernel thread that
- * is being reparented to the init task.
- * @p contains the task_struct for the kernel thread.
+ * Return -ENOSYS if no-one wanted to handle this op, any other value to
+ * cause prctl() to return immediately with that value.
* @task_to_inode:
* Set the security attributes for an inode based on an associated task's
* security attributes, e.g. for /proc/pid inodes.
@@ -1000,7 +999,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* See whether a specific operational right is granted to a process on a
* key.
* @key_ref refers to the key (key pointer + possession attribute bit).
- * @context points to the process to provide the context against which to
+ * @cred points to the credentials to provide the context against which to
* evaluate the security data on the key.
* @perm describes the combination of permissions required of this key.
* Return 1 if permission granted, 0 if permission denied and -ve it the
@@ -1162,6 +1161,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @child process.
* Security modules may also want to perform a process tracing check
* during an execve in the set_security or apply_creds hooks of
+ * tracing check during an execve in the bprm_set_creds hook of
* binprm_security_ops if the process is being traced and its security
* attributes would be changed by the execve.
* @child contains the task_struct structure for the target process.
@@ -1185,29 +1185,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inheritable contains the inheritable capability set.
* @permitted contains the permitted capability set.
* Return 0 if the capability sets were successfully obtained.
- * @capset_check:
- * Check permission before setting the @effective, @inheritable, and
- * @permitted capability sets for the @target process.
- * Caveat: @target is also set to current if a set of processes is
- * specified (i.e. all processes other than current and init or a
- * particular process group). Hence, the capset_set hook may need to
- * revalidate permission to the actual target process.
- * @target contains the task_struct structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 if permission is granted.
- * @capset_set:
+ * @capset:
* Set the @effective, @inheritable, and @permitted capability sets for
- * the @target process. Since capset_check cannot always check permission
- * to the real @target process, this hook may also perform permission
- * checking to determine if the current process is allowed to set the
- * capability sets of the @target process. However, this hook has no way
- * of returning an error due to the structure of the sys_capset code.
- * @target contains the task_struct structure for target process.
+ * the current process.
+ * @new contains the new credentials structure for target process.
+ * @old contains the current credentials structure for target process.
* @effective contains the effective capability set.
* @inheritable contains the inheritable capability set.
* @permitted contains the permitted capability set.
+ * Return 0 and update @new if permission is granted.
* @capable:
* Check whether the @tsk process has the @cap capability.
* @tsk contains the task_struct for the process.
@@ -1299,15 +1285,12 @@ struct security_operations {
int (*capget) (struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted);
- int (*capset_check) (struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted);
- void (*capset_set) (struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted);
- int (*capable) (struct task_struct *tsk, int cap);
+ int (*capset) (struct cred *new,
+ const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
+ int (*capable) (struct task_struct *tsk, int cap, int audit);
int (*acct) (struct file *file);
int (*sysctl) (struct ctl_table *table, int op);
int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
@@ -1316,18 +1299,16 @@ struct security_operations {
int (*settime) (struct timespec *ts, struct timezone *tz);
int (*vm_enough_memory) (struct mm_struct *mm, long pages);
- int (*bprm_alloc_security) (struct linux_binprm *bprm);
- void (*bprm_free_security) (struct linux_binprm *bprm);
- void (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe);
- void (*bprm_post_apply_creds) (struct linux_binprm *bprm);
- int (*bprm_set_security) (struct linux_binprm *bprm);
+ int (*bprm_set_creds) (struct linux_binprm *bprm);
int (*bprm_check_security) (struct linux_binprm *bprm);
int (*bprm_secureexec) (struct linux_binprm *bprm);
+ void (*bprm_committing_creds) (struct linux_binprm *bprm);
+ void (*bprm_committed_creds) (struct linux_binprm *bprm);
int (*sb_alloc_security) (struct super_block *sb);
void (*sb_free_security) (struct super_block *sb);
int (*sb_copy_data) (char *orig, char *copy);
- int (*sb_kern_mount) (struct super_block *sb, void *data);
+ int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
int (*sb_statfs) (struct dentry *dentry);
int (*sb_mount) (char *dev_name, struct path *path,
@@ -1406,14 +1387,18 @@ struct security_operations {
int (*file_send_sigiotask) (struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive) (struct file *file);
- int (*dentry_open) (struct file *file);
+ int (*dentry_open) (struct file *file, const struct cred *cred);
int (*task_create) (unsigned long clone_flags);
- int (*task_alloc_security) (struct task_struct *p);
- void (*task_free_security) (struct task_struct *p);
+ void (*cred_free) (struct cred *cred);
+ int (*cred_prepare)(struct cred *new, const struct cred *old,
+ gfp_t gfp);
+ void (*cred_commit)(struct cred *new, const struct cred *old);
+ int (*kernel_act_as)(struct cred *new, u32 secid);
+ int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
- int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ ,
- uid_t old_euid, uid_t old_suid, int flags);
+ int (*task_fix_setuid) (struct cred *new, const struct cred *old,
+ int flags);
int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags);
int (*task_setpgid) (struct task_struct *p, pid_t pgid);
int (*task_getpgid) (struct task_struct *p);
@@ -1433,8 +1418,7 @@ struct security_operations {
int (*task_wait) (struct task_struct *p);
int (*task_prctl) (int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
- unsigned long arg5, long *rc_p);
- void (*task_reparent_to_init) (struct task_struct *p);
+ unsigned long arg5);
void (*task_to_inode) (struct task_struct *p, struct inode *inode);
int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
@@ -1539,10 +1523,10 @@ struct security_operations {
/* key management security hooks */
#ifdef CONFIG_KEYS
- int (*key_alloc) (struct key *key, struct task_struct *tsk, unsigned long flags);
+ int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags);
void (*key_free) (struct key *key);
int (*key_permission) (key_ref_t key_ref,
- struct task_struct *context,
+ const struct cred *cred,
key_perm_t perm);
int (*key_getsecurity)(struct key *key, char **_buffer);
#endif /* CONFIG_KEYS */
@@ -1568,15 +1552,12 @@ int security_capget(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
-int security_capset_check(struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted);
-void security_capset_set(struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted);
+int security_capset(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
int security_capable(struct task_struct *tsk, int cap);
+int security_capable_noaudit(struct task_struct *tsk, int cap);
int security_acct(struct file *file);
int security_sysctl(struct ctl_table *table, int op);
int security_quotactl(int cmds, int type, int id, struct super_block *sb);
@@ -1585,17 +1566,16 @@ int security_syslog(int type);
int security_settime(struct timespec *ts, struct timezone *tz);
int security_vm_enough_memory(long pages);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
-int security_bprm_alloc(struct linux_binprm *bprm);
-void security_bprm_free(struct linux_binprm *bprm);
-void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
-void security_bprm_post_apply_creds(struct linux_binprm *bprm);
-int security_bprm_set(struct linux_binprm *bprm);
+int security_vm_enough_memory_kern(long pages);
+int security_bprm_set_creds(struct linux_binprm *bprm);
int security_bprm_check(struct linux_binprm *bprm);
+void security_bprm_committing_creds(struct linux_binprm *bprm);
+void security_bprm_committed_creds(struct linux_binprm *bprm);
int security_bprm_secureexec(struct linux_binprm *bprm);
int security_sb_alloc(struct super_block *sb);
void security_sb_free(struct super_block *sb);
int security_sb_copy_data(char *orig, char *copy);
-int security_sb_kern_mount(struct super_block *sb, void *data);
+int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(char *dev_name, struct path *path,
@@ -1662,13 +1642,16 @@ int security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
-int security_dentry_open(struct file *file);
+int security_dentry_open(struct file *file, const struct cred *cred);
int security_task_create(unsigned long clone_flags);
-int security_task_alloc(struct task_struct *p);
-void security_task_free(struct task_struct *p);
+void security_cred_free(struct cred *cred);
+int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
+void security_commit_creds(struct cred *new, const struct cred *old);
+int security_kernel_act_as(struct cred *new, u32 secid);
+int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
-int security_task_post_setuid(uid_t old_ruid, uid_t old_euid,
- uid_t old_suid, int flags);
+int security_task_fix_setuid(struct cred *new, const struct cred *old,
+ int flags);
int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
@@ -1687,8 +1670,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid);
int security_task_wait(struct task_struct *p);
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
- unsigned long arg4, unsigned long arg5, long *rc_p);
-void security_task_reparent_to_init(struct task_struct *p);
+ unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
@@ -1763,25 +1745,23 @@ static inline int security_capget(struct task_struct *target,
return cap_capget(target, effective, inheritable, permitted);
}
-static inline int security_capset_check(struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted)
+static inline int security_capset(struct cred *new,
+ const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
{
- return cap_capset_check(target, effective, inheritable, permitted);
+ return cap_capset(new, old, effective, inheritable, permitted);
}
-static inline void security_capset_set(struct task_struct *target,
- kernel_cap_t *effective,
- kernel_cap_t *inheritable,
- kernel_cap_t *permitted)
+static inline int security_capable(struct task_struct *tsk, int cap)
{
- cap_capset_set(target, effective, inheritable, permitted);
+ return cap_capable(tsk, cap, SECURITY_CAP_AUDIT);
}
-static inline int security_capable(struct task_struct *tsk, int cap)
+static inline int security_capable_noaudit(struct task_struct *tsk, int cap)
{
- return cap_capable(tsk, cap);
+ return cap_capable(tsk, cap, SECURITY_CAP_NOAUDIT);
}
static inline int security_acct(struct file *file)
@@ -1817,40 +1797,39 @@ static inline int security_settime(struct timespec *ts, struct timezone *tz)
static inline int security_vm_enough_memory(long pages)
{
+ WARN_ON(current->mm == NULL);
return cap_vm_enough_memory(current->mm, pages);
}
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
+ WARN_ON(mm == NULL);
return cap_vm_enough_memory(mm, pages);
}
-static inline int security_bprm_alloc(struct linux_binprm *bprm)
+static inline int security_vm_enough_memory_kern(long pages)
{
- return 0;
+ /* If current->mm is a kernel thread then we will pass NULL,
+ for this specific case that is fine */
+ return cap_vm_enough_memory(current->mm, pages);
}
-static inline void security_bprm_free(struct linux_binprm *bprm)
-{ }
-
-static inline void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe)
+static inline int security_bprm_set_creds(struct linux_binprm *bprm)
{
- cap_bprm_apply_creds(bprm, unsafe);
+ return cap_bprm_set_creds(bprm);
}
-static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm)
+static inline int security_bprm_check(struct linux_binprm *bprm)
{
- return;
+ return 0;
}
-static inline int security_bprm_set(struct linux_binprm *bprm)
+static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
{
- return cap_bprm_set_security(bprm);
}
-static inline int security_bprm_check(struct linux_binprm *bprm)
+static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
{
- return 0;
}
static inline int security_bprm_secureexec(struct linux_binprm *bprm)
@@ -1871,7 +1850,7 @@ static inline int security_sb_copy_data(char *orig, char *copy)
return 0;
}
-static inline int security_sb_kern_mount(struct super_block *sb, void *data)
+static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
return 0;
}
@@ -2167,7 +2146,8 @@ static inline int security_file_receive(struct file *file)
return 0;
}
-static inline int security_dentry_open(struct file *file)
+static inline int security_dentry_open(struct file *file,
+ const struct cred *cred)
{
return 0;
}
@@ -2177,13 +2157,31 @@ static inline int security_task_create(unsigned long clone_flags)
return 0;
}
-static inline int security_task_alloc(struct task_struct *p)
+static inline void security_cred_free(struct cred *cred)
+{ }
+
+static inline int security_prepare_creds(struct cred *new,
+ const struct cred *old,
+ gfp_t gfp)
{
return 0;
}
-static inline void security_task_free(struct task_struct *p)
-{ }
+static inline void security_commit_creds(struct cred *new,
+ const struct cred *old)
+{
+}
+
+static inline int security_kernel_act_as(struct cred *cred, u32 secid)
+{
+ return 0;
+}
+
+static inline int security_kernel_create_files_as(struct cred *cred,
+ struct inode *inode)
+{
+ return 0;
+}
static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
int flags)
@@ -2191,10 +2189,11 @@ static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
return 0;
}
-static inline int security_task_post_setuid(uid_t old_ruid, uid_t old_euid,
- uid_t old_suid, int flags)
+static inline int security_task_fix_setuid(struct cred *new,
+ const struct cred *old,
+ int flags)
{
- return cap_task_post_setuid(old_ruid, old_euid, old_suid, flags);
+ return cap_task_fix_setuid(new, old, flags);
}
static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2,
@@ -2281,14 +2280,9 @@ static inline int security_task_wait(struct task_struct *p)
static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
- unsigned long arg5, long *rc_p)
-{
- return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p);
-}
-
-static inline void security_task_reparent_to_init(struct task_struct *p)
+ unsigned long arg5)
{
- cap_task_reparent_to_init(p);
+ return cap_task_prctl(option, arg2, arg3, arg3, arg5);
}
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
@@ -2714,16 +2708,16 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi
#ifdef CONFIG_KEYS
#ifdef CONFIG_SECURITY
-int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long flags);
+int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
void security_key_free(struct key *key);
int security_key_permission(key_ref_t key_ref,
- struct task_struct *context, key_perm_t perm);
+ const struct cred *cred, key_perm_t perm);
int security_key_getsecurity(struct key *key, char **_buffer);
#else
static inline int security_key_alloc(struct key *key,
- struct task_struct *tsk,
+ const struct cred *cred,
unsigned long flags)
{
return 0;
@@ -2734,7 +2728,7 @@ static inline void security_key_free(struct key *key)
}
static inline int security_key_permission(key_ref_t key_ref,
- struct task_struct *context,
+ const struct cred *cred,
key_perm_t perm)
{
return 0;
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index a1783b229ef..b3dfa72f13b 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -34,6 +34,7 @@ struct seq_operations {
#define SEQ_SKIP 1
+char *mangle_path(char *s, char *p, char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
@@ -60,6 +61,19 @@ static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
return seq_bitmap(m, mask->bits, MAX_NUMNODES);
}
+int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
+ unsigned int nr_bits);
+
+static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask)
+{
+ return seq_bitmap_list(m, mask->bits, NR_CPUS);
+}
+
+static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
+{
+ return seq_bitmap_list(m, mask->bits, MAX_NUMNODES);
+}
+
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_release(struct inode *, struct file *);
void *__seq_open_private(struct file *, const struct seq_operations *, int);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index e27f216361f..feb3b939ec4 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -155,6 +155,11 @@
#define PORT_SC26XX 82
+/* SH-SCI */
+#define PORT_SCIFA 83
+
+#define PORT_S3C6400 84
+
#ifdef __KERNEL__
#include <linux/compiler.h>
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
new file mode 100644
index 00000000000..68e212ff9dd
--- /dev/null
+++ b/include/linux/sh_intc.h
@@ -0,0 +1,91 @@
+#ifndef __SH_INTC_H
+#define __SH_INTC_H
+
+typedef unsigned char intc_enum;
+
+struct intc_vect {
+ intc_enum enum_id;
+ unsigned short vect;
+};
+
+#define INTC_VECT(enum_id, vect) { enum_id, vect }
+#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq))
+
+struct intc_group {
+ intc_enum enum_id;
+ intc_enum enum_ids[32];
+};
+
+#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } }
+
+struct intc_mask_reg {
+ unsigned long set_reg, clr_reg, reg_width;
+ intc_enum enum_ids[32];
+#ifdef CONFIG_SMP
+ unsigned long smp;
+#endif
+};
+
+struct intc_prio_reg {
+ unsigned long set_reg, clr_reg, reg_width, field_width;
+ intc_enum enum_ids[16];
+#ifdef CONFIG_SMP
+ unsigned long smp;
+#endif
+};
+
+struct intc_sense_reg {
+ unsigned long reg, reg_width, field_width;
+ intc_enum enum_ids[16];
+};
+
+#ifdef CONFIG_SMP
+#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
+#else
+#define INTC_SMP(stride, nr)
+#endif
+
+struct intc_desc {
+ struct intc_vect *vectors;
+ unsigned int nr_vectors;
+ struct intc_group *groups;
+ unsigned int nr_groups;
+ struct intc_mask_reg *mask_regs;
+ unsigned int nr_mask_regs;
+ struct intc_prio_reg *prio_regs;
+ unsigned int nr_prio_regs;
+ struct intc_sense_reg *sense_regs;
+ unsigned int nr_sense_regs;
+ char *name;
+#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
+ struct intc_mask_reg *ack_regs;
+ unsigned int nr_ack_regs;
+#endif
+};
+
+#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a)
+#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \
+ mask_regs, prio_regs, sense_regs) \
+struct intc_desc symbol __initdata = { \
+ _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \
+ _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \
+ _INTC_ARRAY(sense_regs), \
+ chipname, \
+}
+
+#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
+#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \
+ mask_regs, prio_regs, sense_regs, ack_regs) \
+struct intc_desc symbol __initdata = { \
+ _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \
+ _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \
+ _INTC_ARRAY(sense_regs), \
+ chipname, \
+ _INTC_ARRAY(ack_regs), \
+}
+#endif
+
+void __init register_intc_controller(struct intc_desc *desc);
+int intc_set_priority(unsigned int irq, unsigned int prio);
+
+#endif /* __SH_INTC_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2725f4e5a9b..cf2cb50f77d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -250,6 +250,9 @@ typedef unsigned char *sk_buff_data_t;
* @tc_verd: traffic control verdict
* @ndisc_nodetype: router type (from link layer)
* @do_not_encrypt: set to prevent encryption of this frame
+ * @requeue: set to indicate that the wireless core should attempt
+ * a software retry on this frame if we failed to
+ * receive an ACK for it
* @dma_cookie: a cookie to one of several possible DMA operations
* done by skb DMA functions
* @secmark: security marking
@@ -269,8 +272,9 @@ struct sk_buff {
struct dst_entry *dst;
struct rtable *rtable;
};
+#ifdef CONFIG_XFRM
struct sec_path *sp;
-
+#endif
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
@@ -325,6 +329,7 @@ struct sk_buff {
#endif
#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
__u8 do_not_encrypt:1;
+ __u8 requeue:1;
#endif
/* 0/13/14 bit hole */
@@ -488,6 +493,19 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
}
/**
+ * skb_queue_is_first - check if skb is the first entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the first buffer on the list.
+ */
+static inline bool skb_queue_is_first(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ return (skb->prev == (struct sk_buff *) list);
+}
+
+/**
* skb_queue_next - return the next packet in the queue
* @list: queue head
* @skb: current buffer
@@ -506,6 +524,24 @@ static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
}
/**
+ * skb_queue_prev - return the prev packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the prev packet in @list before @skb. It is only valid to
+ * call this if skb_queue_is_first() evaluates to false.
+ */
+static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_queue_is_first(list, skb));
+ return skb->prev;
+}
+
+/**
* skb_get - reference buffer
* @skb: buffer to reference
*
@@ -1647,8 +1683,12 @@ extern int skb_splice_bits(struct sk_buff *skb,
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
+extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
+ int shiftlen);
extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern int skb_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
@@ -1864,6 +1904,18 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu
to->queue_mapping = from->queue_mapping;
}
+#ifdef CONFIG_XFRM
+static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+{
+ return skb->sp;
+}
+#else
+static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+{
+ return NULL;
+}
+#endif
+
static inline int skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5ff9676c1e2..f96d13c281e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -23,6 +23,34 @@
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
+/*
+ * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
+ *
+ * This delays freeing the SLAB page by a grace period, it does _NOT_
+ * delay object freeing. This means that if you do kmem_cache_free()
+ * that memory location is free to be reused at any time. Thus it may
+ * be possible to see another object there in the same RCU grace period.
+ *
+ * This feature only ensures the memory location backing the object
+ * stays valid, the trick to using this is relying on an independent
+ * object validation pass. Something like:
+ *
+ * rcu_read_lock()
+ * again:
+ * obj = lockless_lookup(key);
+ * if (obj) {
+ * if (!try_get_ref(obj)) // might fail for free objects
+ * goto again;
+ *
+ * if (obj->key != key) { // not the object we expected
+ * put_ref(obj);
+ * goto again;
+ * }
+ * }
+ * rcu_read_unlock();
+ *
+ * See also the comment on struct slab_rcu in mm/slab.c.
+ */
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
@@ -225,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
-extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
- __kmalloc_track_caller(size, flags, __builtin_return_address(0))
+ __kmalloc_track_caller(size, flags, _RET_IP_)
#else
#define kmalloc_track_caller(size, flags) \
__kmalloc(size, flags)
@@ -243,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
- __builtin_return_address(0))
+ _RET_IP_)
#else
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node(size, flags, node)
@@ -257,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
#define kmalloc_node_track_caller(size, flags, node) \
kmalloc_track_caller(size, flags)
-#endif /* DEBUG_SLAB */
+#endif /* CONFIG_NUMA */
/*
* Shortcuts
@@ -288,9 +316,4 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
-#ifdef CONFIG_SLABINFO
-extern const struct seq_operations slabinfo_op;
-ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
-#endif
-
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h
index b58f54c2418..521f37143fa 100644
--- a/include/linux/smc911x.h
+++ b/include/linux/smc911x.h
@@ -7,6 +7,7 @@
struct smc911x_platdata {
unsigned long flags;
unsigned long irq_flags; /* IRQF_... */
+ int irq_polarity;
};
#endif /* __SMC911X_H__ */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 66484d4a845..6e7ba16ff45 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,6 +7,7 @@
*/
#include <linux/errno.h>
+#include <linux/types.h>
#include <linux/list.h>
#include <linux/cpumask.h>
@@ -16,7 +17,8 @@ struct call_single_data {
struct list_head list;
void (*func) (void *info);
void *info;
- unsigned int flags;
+ u16 flags;
+ u16 priv;
};
#ifdef CONFIG_SMP
@@ -62,8 +64,17 @@ extern void smp_cpus_done(unsigned int max_cpus);
* Call a function on all other processors
*/
int smp_call_function(void(*func)(void *info), void *info, int wait);
+/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */
int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
int wait);
+
+static inline void smp_call_function_many(const struct cpumask *mask,
+ void (*func)(void *info), void *info,
+ int wait)
+{
+ smp_call_function_mask(*mask, func, info, wait);
+}
+
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int wait);
void __smp_call_function_single(int cpuid, struct call_single_data *data);
@@ -135,6 +146,8 @@ static inline void smp_send_reschedule(int cpu) { }
})
#define smp_call_function_mask(mask, func, info, wait) \
(up_smp_call_function(func, info))
+#define smp_call_function_many(mask, func, info, wait) \
+ (up_smp_call_function(func, info))
static inline void init_call_single_data(void)
{
}
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h
new file mode 100644
index 00000000000..1cbf0313add
--- /dev/null
+++ b/include/linux/smsc911x.h
@@ -0,0 +1,47 @@
+/***************************************************************************
+ *
+ * Copyright (C) 2004-2008 SMSC
+ * Copyright (C) 2005-2008 ARM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************/
+#ifndef __LINUX_SMSC911X_H__
+#define __LINUX_SMSC911X_H__
+
+#include <linux/phy.h>
+
+/* platform_device configuration data, should be assigned to
+ * the platform_device's dev.platform_data */
+struct smsc911x_platform_config {
+ unsigned int irq_polarity;
+ unsigned int irq_type;
+ unsigned int flags;
+ phy_interface_t phy_interface;
+};
+
+/* Constants for platform_device irq polarity configuration */
+#define SMSC911X_IRQ_POLARITY_ACTIVE_LOW 0
+#define SMSC911X_IRQ_POLARITY_ACTIVE_HIGH 1
+
+/* Constants for platform_device irq type configuration */
+#define SMSC911X_IRQ_TYPE_OPEN_DRAIN 0
+#define SMSC911X_IRQ_TYPE_PUSH_PULL 1
+
+/* Constants for flags */
+#define SMSC911X_USE_16BIT (BIT(0))
+#define SMSC911X_USE_32BIT (BIT(1))
+
+#endif /* __LINUX_SMSC911X_H__ */
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 7a6e6bba4a7..aee3f1e1d1c 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -216,6 +216,9 @@ enum
LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */
LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */
LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */
+ LINUX_MIB_SACKSHIFTED,
+ LINUX_MIB_SACKMERGED,
+ LINUX_MIB_SACKSHIFTFALLBACK,
__LINUX_MIB_MAX
};
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h
index b4d9fa6f797..decf6d8c77b 100644
--- a/include/linux/spi/orion_spi.h
+++ b/include/linux/spi/orion_spi.h
@@ -11,6 +11,7 @@
struct orion_spi_info {
u32 tclk; /* no <linux/clk.h> support yet */
+ u32 enable_clock_fix;
};
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index b8db32cea1d..bf8de281b4e 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -18,6 +18,9 @@
* duplex (MicroWire) controllers. Provide chipslect() and txrx_bufs(),
* and custom setup()/cleanup() methods.
*/
+
+#include <linux/workqueue.h>
+
struct spi_bitbang {
struct workqueue_struct *workqueue;
struct work_struct work;
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index e530026eedf..17d9b58f637 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -427,12 +427,16 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
return pci_dma_mapping_error(dev->bus->host_pci, addr);
+#endif
+ break;
case SSB_BUSTYPE_SSB:
return dma_mapping_error(dev->dev, addr);
default:
- __ssb_dma_not_implemented(dev);
+ break;
}
+ __ssb_dma_not_implemented(dev);
return -ENOSYS;
}
@@ -441,12 +445,16 @@ static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
return pci_map_single(dev->bus->host_pci, p, size, dir);
+#endif
+ break;
case SSB_BUSTYPE_SSB:
return dma_map_single(dev->dev, p, size, dir);
default:
- __ssb_dma_not_implemented(dev);
+ break;
}
+ __ssb_dma_not_implemented(dev);
return 0;
}
@@ -455,14 +463,18 @@ static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_a
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
return;
+#endif
+ break;
case SSB_BUSTYPE_SSB:
dma_unmap_single(dev->dev, dma_addr, size, dir);
return;
default:
- __ssb_dma_not_implemented(dev);
+ break;
}
+ __ssb_dma_not_implemented(dev);
}
static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
@@ -472,15 +484,19 @@ static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
size, dir);
return;
+#endif
+ break;
case SSB_BUSTYPE_SSB:
dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
return;
default:
- __ssb_dma_not_implemented(dev);
+ break;
}
+ __ssb_dma_not_implemented(dev);
}
static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
@@ -490,15 +506,19 @@ static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
size, dir);
return;
+#endif
+ break;
case SSB_BUSTYPE_SSB:
dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
return;
default:
- __ssb_dma_not_implemented(dev);
+ break;
}
+ __ssb_dma_not_implemented(dev);
}
static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
@@ -509,17 +529,21 @@ static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
/* Just sync everything. That's all the PCI API can do. */
pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
offset + size, dir);
return;
+#endif
+ break;
case SSB_BUSTYPE_SSB:
dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
size, dir);
return;
default:
- __ssb_dma_not_implemented(dev);
+ break;
}
+ __ssb_dma_not_implemented(dev);
}
static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
@@ -530,17 +554,21 @@ static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
{
switch (dev->bus->bustype) {
case SSB_BUSTYPE_PCI:
+#ifdef CONFIG_SSB_PCIHOST
/* Just sync everything. That's all the PCI API can do. */
pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
offset + size, dir);
return;
+#endif
+ break;
case SSB_BUSTYPE_SSB:
dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
size, dir);
return;
default:
- __ssb_dma_not_implemented(dev);
+ break;
}
+ __ssb_dma_not_implemented(dev);
}
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index b106fd8e0d5..1a8cecc4f38 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+extern void save_stack_trace_user(struct stack_trace *trace);
+#else
+# define save_stack_trace_user(trace) do { } while (0)
+#endif
+
#else
# define save_stack_trace(trace) do { } while (0)
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
+# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index 810d80df0a1..d18fc198aa2 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
-/* We don't want strings.h stuff being user by user stuff by accident */
+/* We don't want strings.h stuff being used by user stuff by accident */
#ifndef __KERNEL__
#include <string.h>
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6f0ee1b84a4..c39a21040dc 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -58,6 +58,7 @@ struct rpc_clnt {
struct rpc_timeout cl_timeout_default;
struct rpc_program * cl_program;
char cl_inline_name[32];
+ char *cl_principal; /* target to authenticate to */
};
/*
@@ -108,6 +109,7 @@ struct rpc_create_args {
u32 version;
rpc_authflavor_t authflavor;
unsigned long flags;
+ char *client_name;
};
/* Values for "flags" field */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index 51b977a4ca2..cea764c2359 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -15,6 +15,7 @@ struct rpc_pipe_ops {
ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t);
ssize_t (*downcall)(struct file *, const char __user *, size_t);
void (*release_pipe)(struct inode *);
+ int (*open_pipe)(struct inode *);
void (*destroy_msg)(struct rpc_pipe_msg *);
};
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 6fd7b016517..0127daca435 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -139,14 +139,14 @@ static inline char *__svc_print_addr(struct sockaddr *addr,
{
switch (addr->sa_family) {
case AF_INET:
- snprintf(buf, len, "%u.%u.%u.%u, port=%u",
- NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
+ snprintf(buf, len, "%pI4, port=%u",
+ &((struct sockaddr_in *)addr)->sin_addr,
ntohs(((struct sockaddr_in *) addr)->sin_port));
break;
case AF_INET6:
- snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
- NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
+ snprintf(buf, len, "%pI6, port=%u",
+ &((struct sockaddr_in6 *)addr)->sin6_addr,
ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
break;
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index c9165d9771a..ca7d725861f 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -20,6 +20,7 @@ int gss_svc_init(void);
void gss_svc_shutdown(void);
int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
u32 svcauth_gss_flavor(struct auth_domain *dom);
+char *svc_gss_principal(struct svc_rqst *);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index e4057d729f0..49e1eb45446 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -37,21 +37,6 @@ struct xdr_netobj {
typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
/*
- * We're still requiring the BKL in the xdr code until it's been
- * more carefully audited, at which point this wrapper will become
- * unnecessary.
- */
-static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj)
-{
- int ret;
-
- lock_kernel();
- ret = xdrproc(rqstp, data, obj);
- unlock_kernel();
- return ret;
-}
-
-/*
* Basic structure for transmission/reception of a client XDR message.
* Features a header (for a linear buffer containing RPC headers
* and the data payload for short messages), and then an array of
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 4d80a118d53..11fc71d50c1 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -76,8 +76,7 @@ struct rpc_rqst {
struct list_head rq_list;
__u32 * rq_buffer; /* XDR encode buffer */
- size_t rq_bufsize,
- rq_callsize,
+ size_t rq_callsize,
rq_rcvsize;
struct xdr_buf rq_private_buf; /* The receive buffer
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 4de56b1d372..54a379c9e8e 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -66,9 +66,6 @@
#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
-#define RDMA_RESOLVE_TIMEOUT (5*HZ) /* TBD 5 seconds */
-#define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
-
/* memory registration strategies */
#define RPCRDMA_PERSISTENT_REGISTRATION (1)
@@ -78,6 +75,7 @@ enum rpcrdma_memreg {
RPCRDMA_MEMWINDOWS,
RPCRDMA_MEMWINDOWS_ASYNC,
RPCRDMA_MTHCAFMR,
+ RPCRDMA_FRMR,
RPCRDMA_ALLPHYSICAL,
RPCRDMA_LAST
};
diff --git a/include/linux/swab.h b/include/linux/swab.h
index 270d5c208a8..bbed279f3b3 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -47,8 +47,6 @@ static inline __attribute_const__ __u16 ___swab16(__u16 val)
{
#ifdef __arch_swab16
return __arch_swab16(val);
-#elif defined(__arch_swab16p)
- return __arch_swab16p(&val);
#else
return __const_swab16(val);
#endif
@@ -58,8 +56,6 @@ static inline __attribute_const__ __u32 ___swab32(__u32 val)
{
#ifdef __arch_swab32
return __arch_swab32(val);
-#elif defined(__arch_swab32p)
- return __arch_swab32p(&val);
#else
return __const_swab32(val);
#endif
@@ -69,8 +65,6 @@ static inline __attribute_const__ __u64 ___swab64(__u64 val)
{
#ifdef __arch_swab64
return __arch_swab64(val);
-#elif defined(__arch_swab64p)
- return __arch_swab64p(&val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
__u32 l = val & ((1ULL << 32) - 1);
@@ -84,8 +78,6 @@ static inline __attribute_const__ __u32 ___swahw32(__u32 val)
{
#ifdef __arch_swahw32
return __arch_swahw32(val);
-#elif defined(__arch_swahw32p)
- return __arch_swahw32p(&val);
#else
return __const_swahw32(val);
#endif
@@ -95,8 +87,6 @@ static inline __attribute_const__ __u32 ___swahb32(__u32 val)
{
#ifdef __arch_swahb32
return __arch_swahb32(val);
-#elif defined(__arch_swahb32p)
- return __arch_swahb32p(&val);
#else
return __const_swahb32(val);
#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de40f169a4e..a3af95b2cb6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -7,6 +7,7 @@
#include <linux/list.h>
#include <linux/memcontrol.h>
#include <linux/sched.h>
+#include <linux/node.h>
#include <asm/atomic.h>
#include <asm/page.h>
@@ -171,8 +172,10 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
-extern void lru_cache_add(struct page *);
-extern void lru_cache_add_active(struct page *);
+extern void __lru_cache_add(struct page *, enum lru_list lru);
+extern void lru_cache_add_lru(struct page *, enum lru_list lru);
+extern void lru_cache_add_active_or_unevictable(struct page *,
+ struct vm_area_struct *);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
@@ -180,12 +183,38 @@ extern int lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void swap_setup(void);
+extern void add_page_to_unevictable_list(struct page *page);
+
+/**
+ * lru_cache_add: add a page to the page lists
+ * @page: the page to add
+ */
+static inline void lru_cache_add_anon(struct page *page)
+{
+ __lru_cache_add(page, LRU_INACTIVE_ANON);
+}
+
+static inline void lru_cache_add_active_anon(struct page *page)
+{
+ __lru_cache_add(page, LRU_ACTIVE_ANON);
+}
+
+static inline void lru_cache_add_file(struct page *page)
+{
+ __lru_cache_add(page, LRU_INACTIVE_FILE);
+}
+
+static inline void lru_cache_add_active_file(struct page *page)
+{
+ __lru_cache_add(page, LRU_ACTIVE_FILE);
+}
+
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
-extern int __isolate_lru_page(struct page *page, int mode);
+extern int __isolate_lru_page(struct page *page, int mode, int file);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
@@ -204,6 +233,34 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
}
#endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+extern int page_evictable(struct page *page, struct vm_area_struct *vma);
+extern void scan_mapping_unevictable_pages(struct address_space *);
+
+extern unsigned long scan_unevictable_pages;
+extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
+extern int scan_unevictable_register_node(struct node *node);
+extern void scan_unevictable_unregister_node(struct node *node);
+#else
+static inline int page_evictable(struct page *page,
+ struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+}
+
+static inline int scan_unevictable_register_node(struct node *node)
+{
+ return 0;
+}
+
+static inline void scan_unevictable_unregister_node(struct node *node) { }
+#endif
+
extern int kswapd_run(int nid);
#ifdef CONFIG_MMU
@@ -251,6 +308,7 @@ extern sector_t swapdev_block(int, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int can_share_swap_page(struct page *);
extern int remove_exclusive_swap_page(struct page *);
+extern int remove_exclusive_swap_page_ref(struct page *);
struct backing_dev_info;
/* linux/mm/thrash.c */
@@ -339,6 +397,11 @@ static inline int remove_exclusive_swap_page(struct page *p)
return 0;
}
+static inline int remove_exclusive_swap_page_ref(struct page *page)
+{
+ return 0;
+}
+
static inline swp_entry_t get_swap_page(void)
{
swp_entry_t entry;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
new file mode 100644
index 00000000000..325af1de035
--- /dev/null
+++ b/include/linux/swiotlb.h
@@ -0,0 +1,105 @@
+#ifndef __LINUX_SWIOTLB_H
+#define __LINUX_SWIOTLB_H
+
+#include <linux/types.h>
+
+struct device;
+struct dma_attrs;
+struct scatterlist;
+
+/*
+ * Maximum allowable number of contiguous slabs to map,
+ * must be a power of 2. What is the appropriate value ?
+ * The complexity of {map,unmap}_single is linearly dependent on this value.
+ */
+#define IO_TLB_SEGSIZE 128
+
+
+/*
+ * log of the size of each IO TLB slab. The number of slabs is command line
+ * controllable.
+ */
+#define IO_TLB_SHIFT 11
+
+extern void
+swiotlb_init(void);
+
+extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
+extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
+
+extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
+extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
+
+extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
+
+extern void
+*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags);
+
+extern void
+swiotlb_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+extern dma_addr_t
+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir);
+
+extern void
+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir);
+
+extern dma_addr_t
+swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
+ int dir, struct dma_attrs *attrs);
+
+extern void
+swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir, struct dma_attrs *attrs);
+
+extern int
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ int direction);
+
+extern void
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ int direction);
+
+extern int
+swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ int dir, struct dma_attrs *attrs);
+
+extern void
+swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, int dir, struct dma_attrs *attrs);
+
+extern void
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir);
+
+extern void
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir);
+
+extern void
+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, int dir);
+
+extern void
+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, int dir);
+
+extern void
+swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ unsigned long offset, size_t size, int dir);
+
+extern void
+swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ unsigned long offset, size_t size,
+ int dir);
+
+extern int
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
+
+extern int
+swiotlb_dma_supported(struct device *hwdev, u64 mask);
+
+#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index d6ff145919c..04fb47bfb92 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -410,8 +410,7 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname,
asmlinkage long sys_bind(int, struct sockaddr __user *, int);
asmlinkage long sys_connect(int, struct sockaddr __user *, int);
asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *);
-asmlinkage long sys_paccept(int, struct sockaddr __user *, int __user *,
- const __user sigset_t *, size_t, int);
+asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
asmlinkage long sys_send(int, void __user *, size_t, unsigned);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index d0437f36921..39d471d1163 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -972,7 +972,7 @@ extern int sysctl_perm(struct ctl_table_root *root,
typedef struct ctl_table ctl_table;
-typedef int ctl_handler (struct ctl_table *table, int __user *name, int nlen,
+typedef int ctl_handler (struct ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 37fa24152bd..9d68fed50f1 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -21,8 +21,9 @@ struct kobject;
struct module;
/* FIXME
- * The *owner field is no longer used, but leave around
- * until the tree gets cleaned up fully.
+ * The *owner field is no longer used.
+ * x86 tree has been cleaned up. The owner
+ * attribute is still left for other arches.
*/
struct attribute {
const char *name;
@@ -78,6 +79,8 @@ struct sysfs_ops {
ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
};
+struct sysfs_dirent;
+
#ifdef CONFIG_SYSFS
int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
@@ -117,9 +120,14 @@ int sysfs_add_file_to_group(struct kobject *kobj,
void sysfs_remove_file_from_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
-void sysfs_notify(struct kobject *kobj, char *dir, char *attr);
-
-extern int __must_check sysfs_init(void);
+void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
+void sysfs_notify_dirent(struct sysfs_dirent *sd);
+struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+ const unsigned char *name);
+struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
+void sysfs_put(struct sysfs_dirent *sd);
+void sysfs_printk_last_file(void);
+int __must_check sysfs_init(void);
#else /* CONFIG_SYSFS */
@@ -222,7 +230,24 @@ static inline void sysfs_remove_file_from_group(struct kobject *kobj,
{
}
-static inline void sysfs_notify(struct kobject *kobj, char *dir, char *attr)
+static inline void sysfs_notify(struct kobject *kobj, const char *dir,
+ const char *attr)
+{
+}
+static inline void sysfs_notify_dirent(struct sysfs_dirent *sd)
+{
+}
+static inline
+struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
+ const unsigned char *name)
+{
+ return NULL;
+}
+static inline struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
+{
+ return NULL;
+}
+static inline void sysfs_put(struct sysfs_dirent *sd)
{
}
@@ -231,6 +256,10 @@ static inline int __must_check sysfs_init(void)
return 0;
}
+static inline void sysfs_printk_last_file(void)
+{
+}
+
#endif /* CONFIG_SYSFS */
#endif /* _SYSFS_H_ */
diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h
index 5e88afc9a2f..bdf855c2856 100644
--- a/include/linux/task_io_accounting.h
+++ b/include/linux/task_io_accounting.h
@@ -5,7 +5,7 @@
* Don't include this header file directly - it is designed to be dragged in via
* sched.h.
*
- * Blame akpm@osdl.org for all this.
+ * Blame Andrew Morton for all this.
*/
struct task_io_accounting {
diff --git a/include/linux/telephony.h b/include/linux/telephony.h
index 0d0cf2a1e7b..f63afe330ad 100644
--- a/include/linux/telephony.h
+++ b/include/linux/telephony.h
@@ -14,7 +14,7 @@
* Authors: Ed Okerson, <eokerson@quicknet.net>
* Greg Herlein, <gherlein@quicknet.net>
*
- * Contributors: Alan Cox, <alan@redhat.com>
+ * Contributors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
* David W. Erhart, <derhart@quicknet.net>
*
* IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
@@ -28,10 +28,6 @@
* ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
* TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
- * Version: $Revision: 4.2 $
- *
- * $Id: telephony.h,v 4.2 2001/08/06 07:09:43 craigs Exp $
- *
*****************************************************************************/
#ifndef TELEPHONY_H
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 38a56477f27..e6b820f8b56 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -38,6 +38,14 @@ struct restart_block {
#endif
u64 expires;
} nanosleep;
+ /* For poll */
+ struct {
+ struct pollfd __user *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
};
};
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 98921a3e1aa..b6ec8189ac0 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -96,9 +96,11 @@ extern cpumask_t *tick_get_broadcast_oneshot_mask(void);
extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
+extern void tick_check_idle(int cpu);
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+static inline void tick_check_idle(int cpu) { }
# endif
#else /* CONFIG_GENERIC_CLOCKEVENTS */
@@ -106,26 +108,23 @@ static inline void tick_init(void) { }
static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
+static inline void tick_check_idle(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
# ifdef CONFIG_NO_HZ
extern void tick_nohz_stop_sched_tick(int inidle);
extern void tick_nohz_restart_sched_tick(void);
-extern void tick_nohz_update_jiffies(void);
extern ktime_t tick_nohz_get_sleep_length(void);
-extern void tick_nohz_stop_idle(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
# else
static inline void tick_nohz_stop_sched_tick(int inidle) { }
static inline void tick_nohz_restart_sched_tick(void) { }
-static inline void tick_nohz_update_jiffies(void) { }
static inline ktime_t tick_nohz_get_sleep_length(void)
{
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
return len;
}
-static inline void tick_nohz_stop_idle(int cpu) { }
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !NO_HZ */
diff --git a/include/linux/time.h b/include/linux/time.h
index e15206a7e82..ce321ac5c8f 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -29,6 +29,8 @@ struct timezone {
#ifdef __KERNEL__
+extern struct timezone sys_tz;
+
/* Parameters used to convert the timespec values: */
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
@@ -38,6 +40,8 @@ struct timezone {
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000L
+#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+
static inline int timespec_equal(const struct timespec *a,
const struct timespec *b)
{
@@ -72,6 +76,8 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
const unsigned int min, const unsigned int sec);
extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
+extern struct timespec timespec_add_safe(const struct timespec lhs,
+ const struct timespec rhs);
/*
* sub = lhs - rhs, in normalized form
@@ -117,6 +123,7 @@ extern int do_setitimer(int which, struct itimerval *value,
extern unsigned int alarm_setitimer(unsigned int seconds);
extern int do_getitimer(int which, struct itimerval *value);
extern void getnstimeofday(struct timespec *tv);
+extern void getrawmonotonic(struct timespec *ts);
extern void getboottime(struct timespec *ts);
extern void monotonic_to_bootbased(struct timespec *ts);
@@ -125,6 +132,9 @@ extern int timekeeping_valid_for_hres(void);
extern void update_wall_time(void);
extern void update_xtime_cache(u64 nsec);
+struct tms;
+extern void do_sys_times(struct tms *);
+
/**
* timespec_to_ns - Convert timespec to nanoseconds
* @ts: pointer to the timespec variable to be converted
@@ -214,6 +224,7 @@ struct itimerval {
#define CLOCK_MONOTONIC 1
#define CLOCK_PROCESS_CPUTIME_ID 2
#define CLOCK_THREAD_CPUTIME_ID 3
+#define CLOCK_MONOTONIC_RAW 4
/*
* The IDs of various hardware clocks:
diff --git a/include/linux/timer.h b/include/linux/timer.h
index d4ba79248a2..daf9685b861 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -186,4 +186,9 @@ unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);
+unsigned long __round_jiffies_up(unsigned long j, int cpu);
+unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
+unsigned long round_jiffies_up(unsigned long j);
+unsigned long round_jiffies_up_relative(unsigned long j);
+
#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index fc6035d29d5..998a55d80ac 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -53,47 +53,11 @@
#ifndef _LINUX_TIMEX_H
#define _LINUX_TIMEX_H
-#include <linux/compiler.h>
#include <linux/time.h>
-#include <asm/param.h>
-
#define NTP_API 4 /* NTP API version */
/*
- * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
- * for a slightly underdamped convergence characteristic. SHIFT_KH
- * establishes the damping of the FLL and is chosen by wisdom and black
- * art.
- *
- * MAXTC establishes the maximum time constant of the PLL. With the
- * SHIFT_KG and SHIFT_KF values given and a time constant range from
- * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
- * respectively.
- */
-#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
-#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
-#define MAXTC 10 /* maximum time constant (shift) */
-
-/*
- * SHIFT_USEC defines the scaling (shift) of the time_freq and
- * time_tolerance variables, which represent the current frequency
- * offset and maximum frequency tolerance.
- */
-#define SHIFT_USEC 16 /* frequency offset scale (shift) */
-#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
-#define PPM_SCALE_INV_SHIFT 20
-#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
- PPM_SCALE + 1)
-
-#define MAXPHASE 500000000l /* max phase error (ns) */
-#define MAXFREQ 500000 /* max frequency error (ns/s) */
-#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
-#define MINSEC 256 /* min interval between updates (s) */
-#define MAXSEC 2048 /* max interval between updates (s) */
-#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
-
-/*
* syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator
*/
@@ -141,8 +105,15 @@ struct timex {
#define ADJ_MICRO 0x1000 /* select microsecond resolution */
#define ADJ_NANO 0x2000 /* select nanosecond resolution */
#define ADJ_TICK 0x4000 /* tick value */
+
+#ifdef __KERNEL__
+#define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */
+#define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */
+#define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */
+#else
#define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */
-#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */
+#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */
+#endif
/* xntp 3.4 compatibility names */
#define MOD_OFFSET ADJ_OFFSET
@@ -192,9 +163,46 @@ struct timex {
#define TIME_BAD TIME_ERROR /* bw compat */
#ifdef __KERNEL__
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/param.h>
+
#include <asm/timex.h>
/*
+ * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
+ * for a slightly underdamped convergence characteristic. SHIFT_KH
+ * establishes the damping of the FLL and is chosen by wisdom and black
+ * art.
+ *
+ * MAXTC establishes the maximum time constant of the PLL. With the
+ * SHIFT_KG and SHIFT_KF values given and a time constant range from
+ * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
+ * respectively.
+ */
+#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
+#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
+#define MAXTC 10 /* maximum time constant (shift) */
+
+/*
+ * SHIFT_USEC defines the scaling (shift) of the time_freq and
+ * time_tolerance variables, which represent the current frequency
+ * offset and maximum frequency tolerance.
+ */
+#define SHIFT_USEC 16 /* frequency offset scale (shift) */
+#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE_INV_SHIFT 19
+#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
+ PPM_SCALE + 1)
+
+#define MAXPHASE 500000000l /* max phase error (ns) */
+#define MAXFREQ 500000 /* max frequency error (ns/s) */
+#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
+#define MINSEC 256 /* min interval between updates (s) */
+#define MAXSEC 2048 /* max interval between updates (s) */
+#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
+
+/*
* kernel variables
* Note: maximum error = NTP synch distance = dispersion + delay / 2;
* estimated error = NTP dispersion.
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 2158fc0d5a5..0c5b5ac36d8 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -49,7 +49,7 @@
for_each_online_node(node) \
if (nr_cpus_node(node))
-void arch_update_cpu_topology(void);
+int arch_update_cpu_topology(void);
/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE 10
@@ -99,7 +99,7 @@ void arch_update_cpu_topology(void);
| SD_BALANCE_FORK \
| SD_BALANCE_EXEC \
| SD_WAKE_AFFINE \
- | SD_WAKE_IDLE \
+ | SD_WAKE_BALANCE \
| SD_SHARE_CPUPOWER, \
.last_balance = jiffies, \
.balance_interval = 1, \
@@ -120,10 +120,10 @@ void arch_update_cpu_topology(void);
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
| SD_BALANCE_FORK \
| SD_BALANCE_EXEC \
| SD_WAKE_AFFINE \
+ | SD_WAKE_BALANCE \
| SD_SHARE_PKG_RESOURCES\
| BALANCE_FOR_MC_POWER, \
.last_balance = jiffies, \
@@ -146,10 +146,10 @@ void arch_update_cpu_topology(void);
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
- | SD_BALANCE_NEWIDLE \
- | SD_BALANCE_FORK \
| SD_BALANCE_EXEC \
+ | SD_BALANCE_FORK \
| SD_WAKE_AFFINE \
+ | SD_WAKE_BALANCE \
| BALANCE_FOR_PKG_POWER,\
.last_balance = jiffies, \
.balance_interval = 1, \
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
new file mode 100644
index 00000000000..75700545836
--- /dev/null
+++ b/include/linux/tracepoint.h
@@ -0,0 +1,156 @@
+#ifndef _LINUX_TRACEPOINT_H
+#define _LINUX_TRACEPOINT_H
+
+/*
+ * Kernel Tracepoint API.
+ *
+ * See Documentation/tracepoint.txt.
+ *
+ * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * Heavily inspired from the Linux Kernel Markers.
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+
+struct module;
+struct tracepoint;
+
+struct tracepoint {
+ const char *name; /* Tracepoint name */
+ int state; /* State. */
+ void **funcs;
+} __attribute__((aligned(32))); /*
+ * Aligned on 32 bytes because it is
+ * globally visible and gcc happily
+ * align these on the structure size.
+ * Keep in sync with vmlinux.lds.h.
+ */
+
+#define TPPROTO(args...) args
+#define TPARGS(args...) args
+
+#ifdef CONFIG_TRACEPOINTS
+
+/*
+ * it_func[0] is never NULL because there is at least one element in the array
+ * when the array itself is non NULL.
+ */
+#define __DO_TRACE(tp, proto, args) \
+ do { \
+ void **it_func; \
+ \
+ rcu_read_lock_sched_notrace(); \
+ it_func = rcu_dereference((tp)->funcs); \
+ if (it_func) { \
+ do { \
+ ((void(*)(proto))(*it_func))(args); \
+ } while (*(++it_func)); \
+ } \
+ rcu_read_unlock_sched_notrace(); \
+ } while (0)
+
+/*
+ * Make sure the alignment of the structure in the __tracepoints section will
+ * not add unwanted padding between the beginning of the section and the
+ * structure. Force alignment to the same alignment as the section start.
+ */
+#define DECLARE_TRACE(name, proto, args) \
+ extern struct tracepoint __tracepoint_##name; \
+ static inline void trace_##name(proto) \
+ { \
+ if (unlikely(__tracepoint_##name.state)) \
+ __DO_TRACE(&__tracepoint_##name, \
+ TPPROTO(proto), TPARGS(args)); \
+ } \
+ static inline int register_trace_##name(void (*probe)(proto)) \
+ { \
+ return tracepoint_probe_register(#name, (void *)probe); \
+ } \
+ static inline int unregister_trace_##name(void (*probe)(proto)) \
+ { \
+ return tracepoint_probe_unregister(#name, (void *)probe);\
+ }
+
+#define DEFINE_TRACE(name) \
+ static const char __tpstrtab_##name[] \
+ __attribute__((section("__tracepoints_strings"))) = #name; \
+ struct tracepoint __tracepoint_##name \
+ __attribute__((section("__tracepoints"), aligned(32))) = \
+ { __tpstrtab_##name, 0, NULL }
+
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
+ EXPORT_SYMBOL_GPL(__tracepoint_##name)
+#define EXPORT_TRACEPOINT_SYMBOL(name) \
+ EXPORT_SYMBOL(__tracepoint_##name)
+
+extern void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end);
+
+#else /* !CONFIG_TRACEPOINTS */
+#define DECLARE_TRACE(name, proto, args) \
+ static inline void _do_trace_##name(struct tracepoint *tp, proto) \
+ { } \
+ static inline void trace_##name(proto) \
+ { } \
+ static inline int register_trace_##name(void (*probe)(proto)) \
+ { \
+ return -ENOSYS; \
+ } \
+ static inline int unregister_trace_##name(void (*probe)(proto)) \
+ { \
+ return -ENOSYS; \
+ }
+
+#define DEFINE_TRACE(name)
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
+#define EXPORT_TRACEPOINT_SYMBOL(name)
+
+static inline void tracepoint_update_probe_range(struct tracepoint *begin,
+ struct tracepoint *end)
+{ }
+#endif /* CONFIG_TRACEPOINTS */
+
+/*
+ * Connect a probe to a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_register(const char *name, void *probe);
+
+/*
+ * Disconnect a probe from a tracepoint.
+ * Internal API, should not be used directly.
+ */
+extern int tracepoint_probe_unregister(const char *name, void *probe);
+
+extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
+extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
+extern void tracepoint_probe_update_all(void);
+
+struct tracepoint_iter {
+ struct module *module;
+ struct tracepoint *tracepoint;
+};
+
+extern void tracepoint_iter_start(struct tracepoint_iter *iter);
+extern void tracepoint_iter_next(struct tracepoint_iter *iter);
+extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
+extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
+extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
+ struct tracepoint *begin, struct tracepoint *end);
+
+/*
+ * tracepoint_synchronize_unregister must be called between the last tracepoint
+ * probe unregistration and the end of module exit to make sure there is no
+ * caller executing a probe when it is freed.
+ */
+static inline void tracepoint_synchronize_unregister(void)
+{
+ synchronize_sched();
+}
+
+#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b8121d4e36..3f4954c55e5 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -325,7 +325,7 @@ extern struct class *tty_class;
* go away
*/
-extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
+static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
kref_get(&tty->kref);
@@ -442,6 +442,7 @@ extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
size_t size);
extern void tty_audit_exit(void);
extern void tty_audit_fork(struct signal_struct *sig);
+extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
extern void tty_audit_push(struct tty_struct *tty);
extern void tty_audit_push_task(struct task_struct *tsk,
uid_t loginuid, u32 sessionid);
@@ -450,6 +451,9 @@ static inline void tty_audit_add_data(struct tty_struct *tty,
unsigned char *data, size_t size)
{
}
+static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+{
+}
static inline void tty_audit_exit(void)
{
}
diff --git a/include/linux/types.h b/include/linux/types.h
index d4a9ce6e276..121f349cb7e 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -135,19 +135,14 @@ typedef __s64 int64_t;
*
* Linux always considers sectors to be 512 bytes long independently
* of the devices real block size.
+ *
+ * blkcnt_t is the type of the inode's block count.
*/
#ifdef CONFIG_LBD
typedef u64 sector_t;
-#else
-typedef unsigned long sector_t;
-#endif
-
-/*
- * The type of the inode's block count.
- */
-#ifdef CONFIG_LSF
typedef u64 blkcnt_t;
#else
+typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
#endif
@@ -190,13 +185,16 @@ typedef __u32 __bitwise __wsum;
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
+typedef unsigned __bitwise__ fmode_t;
-#ifdef CONFIG_RESOURCES_64BIT
-typedef u64 resource_size_t;
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+typedef u64 phys_addr_t;
#else
-typedef u32 resource_size_t;
+typedef u32 phys_addr_t;
#endif
+typedef phys_addr_t resource_size_t;
+
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index fec6decfb98..6b58367d145 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
\
set_fs(KERNEL_DS); \
pagefault_disable(); \
- ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
pagefault_enable(); \
set_fs(old_fs); \
ret; \
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 94ac74aba6b..f72aa51f7bc 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -108,6 +108,7 @@ enum usb_interface_condition {
* (in probe()), bound to a driver, or unbinding (in disconnect())
* @is_active: flag set when the interface is bound and not suspended.
* @sysfs_files_created: sysfs attributes exist
+ * @unregistering: flag set when the interface is being unregistered
* @needs_remote_wakeup: flag set when the driver requires remote-wakeup
* capability during autosuspend.
* @needs_altsetting0: flag set when a set-interface request for altsetting 0
@@ -163,6 +164,7 @@ struct usb_interface {
enum usb_interface_condition condition; /* state of binding */
unsigned is_active:1; /* the interface is not suspended */
unsigned sysfs_files_created:1; /* the sysfs attributes exist */
+ unsigned unregistering:1; /* unregistration is in progress */
unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
unsigned needs_binding:1; /* needs delayed unbind/rebind */
@@ -1135,6 +1137,7 @@ struct usb_anchor {
struct list_head urb_list;
wait_queue_head_t wait;
spinlock_t lock;
+ unsigned int poisoned:1;
};
static inline void init_usb_anchor(struct usb_anchor *anchor)
@@ -1459,12 +1462,18 @@ extern struct urb *usb_get_urb(struct urb *urb);
extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
extern int usb_unlink_urb(struct urb *urb);
extern void usb_kill_urb(struct urb *urb);
+extern void usb_poison_urb(struct urb *urb);
+extern void usb_unpoison_urb(struct urb *urb);
extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
extern void usb_unanchor_urb(struct urb *urb);
extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
unsigned int timeout);
+extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor);
+extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor);
+extern int usb_anchor_empty(struct usb_anchor *anchor);
/**
* usb_urb_dir_in - check if an URB describes an IN transfer
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
index 42e84fc315e..54c446309a2 100644
--- a/include/linux/usb/Kbuild
+++ b/include/linux/usb/Kbuild
@@ -4,4 +4,5 @@ header-y += ch9.h
header-y += gadgetfs.h
header-y += midi.h
header-y += g_printer.h
-
+header-y += tmc.h
+header-y += vstusb.h
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index ca228bb9421..18a729343ff 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -160,6 +160,15 @@ struct usb_cdc_mdlm_detail_desc {
__u8 bDetailData[0];
} __attribute__ ((packed));
+/* "OBEX Control Model Functional Descriptor" */
+struct usb_cdc_obex_desc {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+
+ __le16 bcdVersion;
+} __attribute__ ((packed));
+
/*-------------------------------------------------------------------------*/
/*
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 73a2f4eb1f7..9b42baed390 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -158,8 +158,12 @@ struct usb_ctrlrequest {
* (rarely) accepted by SET_DESCRIPTOR.
*
* Note that all multi-byte values here are encoded in little endian
- * byte order "on the wire". But when exposed through Linux-USB APIs,
- * they've been converted to cpu byte order.
+ * byte order "on the wire". Within the kernel and when exposed
+ * through the Linux-USB APIs, they are not converted to cpu byte
+ * order; it is the responsibility of the client code to do this.
+ * The single exception is when device and configuration descriptors (but
+ * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
+ * in this case the fields are converted to host endianness by the kernel.
*/
/*
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c932390c6da..935c380ffe4 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -130,6 +130,9 @@ struct usb_function {
int usb_add_function(struct usb_configuration *, struct usb_function *);
+int usb_function_deactivate(struct usb_function *);
+int usb_function_activate(struct usb_function *);
+
int usb_interface_id(struct usb_configuration *, struct usb_function *);
/**
@@ -316,9 +319,13 @@ struct usb_composite_dev {
struct usb_composite_driver *driver;
u8 next_string_id;
- spinlock_t lock;
+ /* the gadget driver won't enable the data pullup
+ * while the deactivation count is nonzero.
+ */
+ unsigned deactivations;
- /* REVISIT use and existence of lock ... */
+ /* protects at least deactivation count */
+ spinlock_t lock;
};
extern int usb_string_id(struct usb_composite_dev *c);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 655341d0f53..0b8617a9176 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -192,7 +192,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver (remember
* it will show up in sysfs, so it needs to be short and to the point.
- * Useing the module name is a good idea.)
+ * Using the module name is a good idea.)
*/
struct usb_serial_driver {
const char *description;
diff --git a/include/linux/usb/tmc.h b/include/linux/usb/tmc.h
new file mode 100644
index 00000000000..c045ae12556
--- /dev/null
+++ b/include/linux/usb/tmc.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
+ * Copyright (C) 2008 Novell, Inc.
+ * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ *
+ * This file holds USB constants defined by the USB Device Class
+ * Definition for Test and Measurement devices published by the USB-IF.
+ *
+ * It also has the ioctl definitions for the usbtmc kernel driver that
+ * userspace needs to know about.
+ */
+
+#ifndef __LINUX_USB_TMC_H
+#define __LINUX_USB_TMC_H
+
+/* USB TMC status values */
+#define USBTMC_STATUS_SUCCESS 0x01
+#define USBTMC_STATUS_PENDING 0x02
+#define USBTMC_STATUS_FAILED 0x80
+#define USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS 0x81
+#define USBTMC_STATUS_SPLIT_NOT_IN_PROGRESS 0x82
+#define USBTMC_STATUS_SPLIT_IN_PROGRESS 0x83
+
+/* USB TMC requests values */
+#define USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT 1
+#define USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS 2
+#define USBTMC_REQUEST_INITIATE_ABORT_BULK_IN 3
+#define USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS 4
+#define USBTMC_REQUEST_INITIATE_CLEAR 5
+#define USBTMC_REQUEST_CHECK_CLEAR_STATUS 6
+#define USBTMC_REQUEST_GET_CAPABILITIES 7
+#define USBTMC_REQUEST_INDICATOR_PULSE 64
+
+/* Request values for USBTMC driver's ioctl entry point */
+#define USBTMC_IOC_NR 91
+#define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1)
+#define USBTMC_IOCTL_CLEAR _IO(USBTMC_IOC_NR, 2)
+#define USBTMC_IOCTL_ABORT_BULK_OUT _IO(USBTMC_IOC_NR, 3)
+#define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4)
+#define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6)
+#define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7)
+
+#endif
diff --git a/include/linux/usb/vstusb.h b/include/linux/usb/vstusb.h
new file mode 100644
index 00000000000..1cfac67191f
--- /dev/null
+++ b/include/linux/usb/vstusb.h
@@ -0,0 +1,71 @@
+/*****************************************************************************
+ * File: drivers/usb/misc/vstusb.h
+ *
+ * Purpose: Support for the bulk USB Vernier Spectrophotometers
+ *
+ * Author: EQware Engineering, Inc.
+ * Oregon City, OR, USA 97045
+ *
+ * Copyright: 2007, 2008
+ * Vernier Software & Technology
+ * Beaverton, OR, USA 97005
+ *
+ * Web: www.vernier.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *****************************************************************************/
+/*****************************************************************************
+ *
+ * The vstusb module is a standard usb 'client' driver running on top of the
+ * standard usb host controller stack.
+ *
+ * In general, vstusb supports standard bulk usb pipes. It supports multiple
+ * devices and multiple pipes per device.
+ *
+ * The vstusb driver supports two interfaces:
+ * 1 - ioctl SEND_PIPE/RECV_PIPE - a general bulk write/read msg
+ * interface to any pipe with timeout support;
+ * 2 - standard read/write with ioctl config - offers standard read/write
+ * interface with ioctl configured pipes and timeouts.
+ *
+ * Both interfaces can be signal from other process and will abort its i/o
+ * operation.
+ *
+ * A timeout of 0 means NO timeout. The user can still terminate the read via
+ * signal.
+ *
+ * If using multiple threads with this driver, the user should ensure that
+ * any reads, writes, or ioctls are complete before closing the device.
+ * Changing read/write timeouts or pipes takes effect on next read/write.
+ *
+ *****************************************************************************/
+
+struct vstusb_args {
+ union {
+ /* this struct is used for IOCTL_VSTUSB_SEND_PIPE, *
+ * IOCTL_VSTUSB_RECV_PIPE, and read()/write() fops */
+ struct {
+ void __user *buffer;
+ size_t count;
+ unsigned int timeout_ms;
+ int pipe;
+ };
+
+ /* this one is used for IOCTL_VSTUSB_CONFIG_RW */
+ struct {
+ int rd_pipe;
+ int rd_timeout_ms;
+ int wr_pipe;
+ int wr_timeout_ms;
+ };
+ };
+};
+
+#define VST_IOC_MAGIC 'L'
+#define VST_IOC_FIRST 0x20
+#define IOCTL_VSTUSB_SEND_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST)
+#define IOCTL_VSTUSB_RECV_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 1)
+#define IOCTL_VSTUSB_CONFIG_RW _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 2)
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
new file mode 100644
index 00000000000..a102561e702
--- /dev/null
+++ b/include/linux/usb/wusb-wa.h
@@ -0,0 +1,271 @@
+/*
+ * Wireless USB Wire Adapter constants and structures.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation.
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ *
+ * References:
+ * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
+ */
+#ifndef __LINUX_USB_WUSB_WA_H
+#define __LINUX_USB_WUSB_WA_H
+
+/**
+ * Radio Command Request for the Radio Control Interface
+ *
+ * Radio Control Interface command and event codes are the same as
+ * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
+ */
+enum {
+ WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
+};
+
+/* Wireless Adapter Requests ([WUSB] table 8-51) */
+enum {
+ WUSB_REQ_ADD_MMC_IE = 20,
+ WUSB_REQ_REMOVE_MMC_IE = 21,
+ WUSB_REQ_SET_NUM_DNTS = 22,
+ WUSB_REQ_SET_CLUSTER_ID = 23,
+ WUSB_REQ_SET_DEV_INFO = 24,
+ WUSB_REQ_GET_TIME = 25,
+ WUSB_REQ_SET_STREAM_IDX = 26,
+ WUSB_REQ_SET_WUSB_MAS = 27,
+};
+
+
+/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
+enum {
+ WUSB_TIME_ADJ = 0,
+ WUSB_TIME_BPST = 1,
+ WUSB_TIME_WUSB = 2,
+};
+
+enum {
+ WA_ENABLE = 0x01,
+ WA_RESET = 0x02,
+ RPIPE_PAUSE = 0x1,
+};
+
+/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
+enum {
+ WA_STATUS_ENABLED = 0x01,
+ WA_STATUS_RESETTING = 0x02
+};
+
+enum rpipe_crs {
+ RPIPE_CRS_CTL = 0x01,
+ RPIPE_CRS_ISO = 0x02,
+ RPIPE_CRS_BULK = 0x04,
+ RPIPE_CRS_INTR = 0x08
+};
+
+/**
+ * RPipe descriptor ([WUSB] section 8.5.2.11)
+ *
+ * FIXME: explain rpipes
+ */
+struct usb_rpipe_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 wRPipeIndex;
+ __le16 wRequests;
+ __le16 wBlocks; /* rw if 0 */
+ __le16 wMaxPacketSize; /* rw? */
+ u8 bHSHubAddress; /* reserved: 0 */
+ u8 bHSHubPort; /* ??? FIXME ??? */
+ u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
+ u8 bDeviceAddress; /* rw: Target device address */
+ u8 bEndpointAddress; /* rw: Target EP address */
+ u8 bDataSequence; /* ro: Current Data sequence */
+ __le32 dwCurrentWindow; /* ro */
+ u8 bMaxDataSequence; /* ro?: max supported seq */
+ u8 bInterval; /* rw: */
+ u8 bOverTheAirInterval; /* rw: */
+ u8 bmAttribute; /* ro? */
+ u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
+ u8 bmRetryOptions; /* rw? */
+ __le16 wNumTransactionErrors; /* rw */
+} __attribute__ ((packed));
+
+/**
+ * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
+ *
+ * These are the notifications coming on the notification endpoint of
+ * an HWA and a DWA.
+ */
+enum wa_notif_type {
+ DWA_NOTIF_RWAKE = 0x91,
+ DWA_NOTIF_PORTSTATUS = 0x92,
+ WA_NOTIF_TRANSFER = 0x93,
+ HWA_NOTIF_BPST_ADJ = 0x94,
+ HWA_NOTIF_DN = 0x95,
+};
+
+/**
+ * Wire Adapter notification header
+ *
+ * Notifications coming from a wire adapter use a common header
+ * defined in [WUSB] sections 8.4.5 & 8.5.4.
+ */
+struct wa_notif_hdr {
+ u8 bLength;
+ u8 bNotifyType; /* enum wa_notif_type */
+} __attribute__((packed));
+
+/**
+ * HWA DN Received notification [(WUSB] section 8.5.4.2)
+ *
+ * The DNData is specified in WUSB1.0[7.6]. For each device
+ * notification we received, we just need to dispatch it.
+ *
+ * @dndata: this is really an array of notifications, but all start
+ * with the same header.
+ */
+struct hwa_notif_dn {
+ struct wa_notif_hdr hdr;
+ u8 bSourceDeviceAddr; /* from errata 2005/07 */
+ u8 bmAttributes;
+ struct wusb_dn_hdr dndata[];
+} __attribute__((packed));
+
+/* [WUSB] section 8.3.3 */
+enum wa_xfer_type {
+ WA_XFER_TYPE_CTL = 0x80,
+ WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
+ WA_XFER_TYPE_ISO = 0x82,
+ WA_XFER_RESULT = 0x83,
+ WA_XFER_ABORT = 0x84,
+};
+
+/* [WUSB] section 8.3.3 */
+struct wa_xfer_hdr {
+ u8 bLength; /* 0x18 */
+ u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+ __le32 dwTransferLength; /* Length of data to xfer */
+ u8 bTransferSegment;
+} __attribute__((packed));
+
+struct wa_xfer_ctl {
+ struct wa_xfer_hdr hdr;
+ u8 bmAttribute;
+ __le16 wReserved;
+ struct usb_ctrlrequest baSetupData;
+} __attribute__((packed));
+
+struct wa_xfer_bi {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wReserved;
+} __attribute__((packed));
+
+struct wa_xfer_hwaiso {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wPresentationTime;
+ __le32 dwNumOfPackets;
+ /* FIXME: u8 pktdata[]? */
+} __attribute__((packed));
+
+/* [WUSB] section 8.3.3.5 */
+struct wa_xfer_abort {
+ u8 bLength;
+ u8 bRequestType;
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+} __attribute__((packed));
+
+/**
+ * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
+ *
+ */
+struct wa_notif_xfer {
+ struct wa_notif_hdr hdr;
+ u8 bEndpoint;
+ u8 Reserved;
+} __attribute__((packed));
+
+/** Transfer result basic codes [WUSB] table 8-15 */
+enum {
+ WA_XFER_STATUS_SUCCESS,
+ WA_XFER_STATUS_HALTED,
+ WA_XFER_STATUS_DATA_BUFFER_ERROR,
+ WA_XFER_STATUS_BABBLE,
+ WA_XFER_RESERVED,
+ WA_XFER_STATUS_NOT_FOUND,
+ WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
+ WA_XFER_STATUS_TRANSACTION_ERROR,
+ WA_XFER_STATUS_ABORTED,
+ WA_XFER_STATUS_RPIPE_NOT_READY,
+ WA_XFER_INVALID_FORMAT,
+ WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
+ WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
+};
+
+/** [WUSB] section 8.3.3.4 */
+struct wa_xfer_result {
+ struct wa_notif_hdr hdr;
+ __le32 dwTransferID;
+ __le32 dwTransferLength;
+ u8 bTransferSegment;
+ u8 bTransferStatus;
+ __le32 dwNumOfPackets;
+} __attribute__((packed));
+
+/**
+ * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
+ *
+ * NOTE: u16 fields are read Little Endian from the hardware.
+ *
+ * @bNumPorts is the original max number of devices that the host can
+ * connect; we might chop this so the stack can handle
+ * it. In case you need to access it, use wusbhc->ports_max
+ * if it is a Wireless USB WA.
+ */
+struct usb_wa_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ u16 bcdWAVersion;
+ u8 bNumPorts; /* don't use!! */
+ u8 bmAttributes; /* Reserved == 0 */
+ u16 wNumRPipes;
+ u16 wRPipeMaxBlock;
+ u8 bRPipeBlockSize;
+ u8 bPwrOn2PwrGood;
+ u8 bNumMMCIEs;
+ u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
+} __attribute__((packed));
+
+/**
+ * HWA Device Information Buffer (WUSB1.0[T8.54])
+ */
+struct hwa_dev_info {
+ u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
+ u8 bDeviceAddress;
+ __le16 wPHYRates;
+ u8 bmDeviceAttribute;
+} __attribute__((packed));
+
+#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
new file mode 100644
index 00000000000..5f401b644ed
--- /dev/null
+++ b/include/linux/usb/wusb.h
@@ -0,0 +1,376 @@
+/*
+ * Wireless USB Standard Definitions
+ * Event Size Tables
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ */
+
+#ifndef __WUSB_H__
+#define __WUSB_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/uwb/spec.h>
+#include <linux/usb/ch9.h>
+#include <linux/param.h>
+
+/**
+ * WUSB Information Element header
+ *
+ * I don't know why, they decided to make it different to the MBOA MAC
+ * IE Header; beats me.
+ */
+struct wuie_hdr {
+ u8 bLength;
+ u8 bIEIdentifier;
+} __attribute__((packed));
+
+enum {
+ WUIE_ID_WCTA = 0x80,
+ WUIE_ID_CONNECTACK,
+ WUIE_ID_HOST_INFO,
+ WUIE_ID_CHANGE_ANNOUNCE,
+ WUIE_ID_DEVICE_DISCONNECT,
+ WUIE_ID_HOST_DISCONNECT,
+ WUIE_ID_KEEP_ALIVE = 0x89,
+ WUIE_ID_ISOCH_DISCARD,
+ WUIE_ID_RESET_DEVICE,
+};
+
+/**
+ * Maximum number of array elements in a WUSB IE.
+ *
+ * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
+ * are "arrays" have to limited to 4 elements. So we define it
+ * like that to ease up and submit only the neeed size.
+ */
+#define WUIE_ELT_MAX 4
+
+/**
+ * Wrapper for the data that defines a CHID, a CDID or a CK
+ *
+ * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
+ * data. In order to avoid confusion and enforce types, we wrap it.
+ *
+ * Make it packed, as we use it in some hw defintions.
+ */
+struct wusb_ckhdid {
+ u8 data[16];
+} __attribute__((packed));
+
+const static
+struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
+
+#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
+
+/**
+ * WUSB IE: Host Information (WUSB1.0[7.5.2])
+ *
+ * Used to provide information about the host to the Wireless USB
+ * devices in range (CHID can be used as an ASCII string).
+ */
+struct wuie_host_info {
+ struct wuie_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CHID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
+ *
+ * Used to acknowledge device connect requests. See note for
+ * WUIE_ELT_MAX.
+ */
+struct wuie_connect_ack {
+ struct wuie_hdr hdr;
+ struct {
+ struct wusb_ckhdid CDID;
+ u8 bDeviceAddress; /* 0 means unused */
+ u8 bReserved;
+ } blk[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE Host Information Element, Connect Availability
+ *
+ * WUSB1.0[7.5.2], bmAttributes description
+ */
+enum {
+ WUIE_HI_CAP_RECONNECT = 0,
+ WUIE_HI_CAP_LIMITED,
+ WUIE_HI_CAP_RESERVED,
+ WUIE_HI_CAP_ALL,
+};
+
+/**
+ * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
+ *
+ * Tells devices the host is going to stop sending MMCs and will dissapear.
+ */
+struct wuie_channel_stop {
+ struct wuie_hdr hdr;
+ u8 attributes;
+ u8 timestamp[3];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Keepalive (WUSB1.0[7.5.9])
+ *
+ * Ask device(s) to send keepalives.
+ */
+struct wuie_keep_alive {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Reset device (WUSB1.0[7.5.11])
+ *
+ * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
+ * use it for one at the time...
+ *
+ * In any case, this request is a wee bit silly: why don't they target
+ * by address??
+ */
+struct wuie_reset {
+ struct wuie_hdr hdr;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
+ *
+ * Tell device to disconnect; we can fit 4 addresses, but we only use
+ * it for one at the time...
+ */
+struct wuie_disconnect {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress;
+ u8 padding;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
+ *
+ * Tells all connected devices to disconnect.
+ */
+struct wuie_host_disconnect {
+ struct wuie_hdr hdr;
+} __attribute__((packed));
+
+/**
+ * WUSB Device Notification header (WUSB1.0[7.6])
+ */
+struct wusb_dn_hdr {
+ u8 bType;
+ u8 notifdata[];
+} __attribute__((packed));
+
+/** Device Notification codes (WUSB1.0[Table 7-54]) */
+enum WUSB_DN {
+ WUSB_DN_CONNECT = 0x01,
+ WUSB_DN_DISCONNECT = 0x02,
+ WUSB_DN_EPRDY = 0x03,
+ WUSB_DN_MASAVAILCHANGED = 0x04,
+ WUSB_DN_RWAKE = 0x05,
+ WUSB_DN_SLEEP = 0x06,
+ WUSB_DN_ALIVE = 0x07,
+};
+
+/** WUSB Device Notification Connect */
+struct wusb_dn_connect {
+ struct wusb_dn_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
+{
+ return le16_to_cpu(dn->attributes) & 0xff;
+}
+
+static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
+}
+
+static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
+}
+
+/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
+struct wusb_dn_alive {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/** Device is disconnecting (WUSB1.0[7.6.2]) */
+struct wusb_dn_disconnect {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/* General constants */
+enum {
+ WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
+};
+
+static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size,
+ const struct wusb_ckhdid *ckhdid)
+{
+ return scnprintf(pr_ckhdid, size,
+ "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx "
+ "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx",
+ ckhdid->data[0], ckhdid->data[1],
+ ckhdid->data[2], ckhdid->data[3],
+ ckhdid->data[4], ckhdid->data[5],
+ ckhdid->data[6], ckhdid->data[7],
+ ckhdid->data[8], ckhdid->data[9],
+ ckhdid->data[10], ckhdid->data[11],
+ ckhdid->data[12], ckhdid->data[13],
+ ckhdid->data[14], ckhdid->data[15]);
+}
+
+/*
+ * WUSB Crypto stuff (WUSB1.0[6])
+ */
+
+extern const char *wusb_et_name(u8);
+
+/**
+ * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
+ * the host or the device.
+ */
+static inline u8 wusb_key_index(int index, int type, int originator)
+{
+ return (originator << 6) | (type << 4) | index;
+}
+
+#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
+#define WUSB_KEY_INDEX_TYPE_ASSOC 1
+#define WUSB_KEY_INDEX_TYPE_GTK 2
+#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
+#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
+
+/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
+struct aes_ccm_nonce {
+ u8 sfn[6]; /* Little Endian */
+ u8 tkid[3]; /* LE */
+ struct uwb_dev_addr dest_addr;
+ struct uwb_dev_addr src_addr;
+} __attribute__((packed));
+
+/* A CCM operation label, defined on WUSB1.0[6.5.x] */
+struct aes_ccm_label {
+ u8 data[14];
+} __attribute__((packed));
+
+/*
+ * Input to the key derivation sequence defined in
+ * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
+ * PRF function.
+ */
+struct wusb_keydvt_in {
+ u8 hnonce[16];
+ u8 dnonce[16];
+} __attribute__((packed));
+
+/*
+ * Output from the key derivation sequence defined in
+ * WUSB1.0[6.5.1].
+ */
+struct wusb_keydvt_out {
+ u8 kck[16];
+ u8 ptk[16];
+} __attribute__((packed));
+
+/* Pseudo Random Function WUSB1.0[6.5] */
+extern int wusb_crypto_init(void);
+extern void wusb_crypto_exit(void);
+extern ssize_t wusb_prf(void *out, size_t out_size,
+ const u8 key[16], const struct aes_ccm_nonce *_n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen, size_t len);
+
+static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 64);
+}
+
+static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 128);
+}
+
+static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 256);
+}
+
+/* Key derivation WUSB1.0[6.5.1] */
+static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
+ const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct wusb_keydvt_in *keydvt_in)
+{
+ const struct aes_ccm_label a = { .data = "Pair-wise keys" };
+ return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
+ keydvt_in, sizeof(*keydvt_in));
+}
+
+/*
+ * Out-of-band MIC Generation WUSB1.0[6.5.2]
+ *
+ * Compute the MIC over @key, @n and @hs and place it in @mic_out.
+ *
+ * @mic_out: Where to place the 8 byte MIC tag
+ * @key: KCK from the derivation process
+ * @n: CCM nonce, n->sfn == 0, TKID as established in the
+ * process.
+ * @hs: Handshake struct for phase 2 of the 4-way.
+ * hs->bStatus and hs->bReserved are zero.
+ * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
+ * hs->dest_addr is the device's USB address padded with 0
+ * hs->src_addr is the hosts's UWB device address
+ * hs->mic is ignored (as we compute that value).
+ */
+static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct usb_handshake *hs)
+{
+ const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
+ return wusb_prf_64(mic_out, 8, key, n, &a,
+ hs, sizeof(*hs) - sizeof(hs->MIC));
+}
+
+#endif /* #ifndef __WUSB_H__ */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b5f41d4c2ee..315bcd37522 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -12,7 +12,7 @@
struct user_namespace {
struct kref kref;
struct hlist_head uidhash_table[UIDHASH_SZ];
- struct user_struct *root_user;
+ struct user_struct *creator;
};
extern struct user_namespace init_user_ns;
@@ -26,8 +26,7 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
return ns;
}
-extern struct user_namespace *copy_user_ns(int flags,
- struct user_namespace *old_ns);
+extern int create_user_ns(struct cred *new);
extern void free_user_ns(struct kref *kref);
static inline void put_user_ns(struct user_namespace *ns)
@@ -43,13 +42,9 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
return &init_user_ns;
}
-static inline struct user_namespace *copy_user_ns(int flags,
- struct user_namespace *old_ns)
+static inline int create_user_ns(struct cred *new)
{
- if (flags & CLONE_NEWUSER)
- return ERR_PTR(-EINVAL);
-
- return old_ns;
+ return -EINVAL;
}
static inline void put_user_ns(struct user_namespace *ns)
diff --git a/include/linux/uwb.h b/include/linux/uwb.h
new file mode 100644
index 00000000000..f9ccbd9a2ce
--- /dev/null
+++ b/include/linux/uwb.h
@@ -0,0 +1,765 @@
+/*
+ * Ultra Wide Band
+ * UWB API
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: doc: overview of the API, different parts and pointers
+ */
+
+#ifndef __LINUX__UWB_H__
+#define __LINUX__UWB_H__
+
+#include <linux/limits.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/uwb/spec.h>
+
+struct uwb_dev;
+struct uwb_beca_e;
+struct uwb_rc;
+struct uwb_rsv;
+struct uwb_dbg;
+
+/**
+ * struct uwb_dev - a UWB Device
+ * @rc: UWB Radio Controller that discovered the device (kind of its
+ * parent).
+ * @bce: a beacon cache entry for this device; or NULL if the device
+ * is a local radio controller.
+ * @mac_addr: the EUI-48 address of this device.
+ * @dev_addr: the current DevAddr used by this device.
+ * @beacon_slot: the slot number the beacon is using.
+ * @streams: bitmap of streams allocated to reservations targeted at
+ * this device. For an RC, this is the streams allocated for
+ * reservations targeted at DevAddrs.
+ *
+ * A UWB device may either by a neighbor or part of a local radio
+ * controller.
+ */
+struct uwb_dev {
+ struct mutex mutex;
+ struct list_head list_node;
+ struct device dev;
+ struct uwb_rc *rc; /* radio controller */
+ struct uwb_beca_e *bce; /* Beacon Cache Entry */
+
+ struct uwb_mac_addr mac_addr;
+ struct uwb_dev_addr dev_addr;
+ int beacon_slot;
+ DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
+};
+#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
+
+/**
+ * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
+ *
+ * RC[CE]Bs have a 'context ID' field that matches the command with
+ * the event received to confirm it.
+ *
+ * Maximum number of context IDs
+ */
+enum { UWB_RC_CTX_MAX = 256 };
+
+
+/** Notification chain head for UWB generated events to listeners */
+struct uwb_notifs_chain {
+ struct list_head list;
+ struct mutex mutex;
+};
+
+/**
+ * struct uwb_mas_bm - a bitmap of all MAS in a superframe
+ * @bm: a bitmap of length #UWB_NUM_MAS
+ */
+struct uwb_mas_bm {
+ DECLARE_BITMAP(bm, UWB_NUM_MAS);
+};
+
+/**
+ * uwb_rsv_state - UWB Reservation state.
+ *
+ * NONE - reservation is not active (no DRP IE being transmitted).
+ *
+ * Owner reservation states:
+ *
+ * INITIATED - owner has sent an initial DRP request.
+ * PENDING - target responded with pending Reason Code.
+ * MODIFIED - reservation manager is modifying an established
+ * reservation with a different MAS allocation.
+ * ESTABLISHED - the reservation has been successfully negotiated.
+ *
+ * Target reservation states:
+ *
+ * DENIED - request is denied.
+ * ACCEPTED - request is accepted.
+ * PENDING - PAL has yet to make a decision to whether to accept or
+ * deny.
+ *
+ * FIXME: further target states TBD.
+ */
+enum uwb_rsv_state {
+ UWB_RSV_STATE_NONE,
+ UWB_RSV_STATE_O_INITIATED,
+ UWB_RSV_STATE_O_PENDING,
+ UWB_RSV_STATE_O_MODIFIED,
+ UWB_RSV_STATE_O_ESTABLISHED,
+ UWB_RSV_STATE_T_ACCEPTED,
+ UWB_RSV_STATE_T_DENIED,
+ UWB_RSV_STATE_T_PENDING,
+
+ UWB_RSV_STATE_LAST,
+};
+
+enum uwb_rsv_target_type {
+ UWB_RSV_TARGET_DEV,
+ UWB_RSV_TARGET_DEVADDR,
+};
+
+/**
+ * struct uwb_rsv_target - the target of a reservation.
+ *
+ * Reservations unicast and targeted at a single device
+ * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
+ * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
+ */
+struct uwb_rsv_target {
+ enum uwb_rsv_target_type type;
+ union {
+ struct uwb_dev *dev;
+ struct uwb_dev_addr devaddr;
+ };
+};
+
+/*
+ * Number of streams reserved for reservations targeted at DevAddrs.
+ */
+#define UWB_NUM_GLOBAL_STREAMS 1
+
+typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
+
+/**
+ * struct uwb_rsv - a DRP reservation
+ *
+ * Data structure management:
+ *
+ * @rc: the radio controller this reservation is for
+ * (as target or owner)
+ * @rc_node: a list node for the RC
+ * @pal_node: a list node for the PAL
+ *
+ * Owner and target parameters:
+ *
+ * @owner: the UWB device owning this reservation
+ * @target: the target UWB device
+ * @type: reservation type
+ *
+ * Owner parameters:
+ *
+ * @max_mas: maxiumum number of MAS
+ * @min_mas: minimum number of MAS
+ * @sparsity: owner selected sparsity
+ * @is_multicast: true iff multicast
+ *
+ * @callback: callback function when the reservation completes
+ * @pal_priv: private data for the PAL making the reservation
+ *
+ * Reservation status:
+ *
+ * @status: negotiation status
+ * @stream: stream index allocated for this reservation
+ * @mas: reserved MAS
+ * @drp_ie: the DRP IE
+ * @ie_valid: true iff the DRP IE matches the reservation parameters
+ *
+ * DRP reservations are uniquely identified by the owner, target and
+ * stream index. However, when using a DevAddr as a target (e.g., for
+ * a WUSB cluster reservation) the responses may be received from
+ * devices with different DevAddrs. In this case, reservations are
+ * uniquely identified by just the stream index. A number of stream
+ * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
+ */
+struct uwb_rsv {
+ struct uwb_rc *rc;
+ struct list_head rc_node;
+ struct list_head pal_node;
+
+ struct uwb_dev *owner;
+ struct uwb_rsv_target target;
+ enum uwb_drp_type type;
+ int max_mas;
+ int min_mas;
+ int sparsity;
+ bool is_multicast;
+
+ uwb_rsv_cb_f callback;
+ void *pal_priv;
+
+ enum uwb_rsv_state state;
+ u8 stream;
+ struct uwb_mas_bm mas;
+ struct uwb_ie_drp *drp_ie;
+ bool ie_valid;
+ struct timer_list timer;
+ bool expired;
+};
+
+static const
+struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
+
+static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
+{
+ bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
+}
+
+/**
+ * struct uwb_drp_avail - a radio controller's view of MAS usage
+ * @global: MAS unused by neighbors (excluding reservations targetted
+ * or owned by the local radio controller) or the beaon period
+ * @local: MAS unused by local established reservations
+ * @pending: MAS unused by local pending reservations
+ * @ie: DRP Availability IE to be included in the beacon
+ * @ie_valid: true iff @ie is valid and does not need to regenerated from
+ * @global and @local
+ *
+ * Each radio controller maintains a view of MAS usage or
+ * availability. MAS available for a new reservation are determined
+ * from the intersection of @global, @local, and @pending.
+ *
+ * The radio controller must transmit a DRP Availability IE that's the
+ * intersection of @global and @local.
+ *
+ * A set bit indicates the MAS is unused and available.
+ *
+ * rc->rsvs_mutex should be held before accessing this data structure.
+ *
+ * [ECMA-368] section 17.4.3.
+ */
+struct uwb_drp_avail {
+ DECLARE_BITMAP(global, UWB_NUM_MAS);
+ DECLARE_BITMAP(local, UWB_NUM_MAS);
+ DECLARE_BITMAP(pending, UWB_NUM_MAS);
+ struct uwb_ie_drp_avail ie;
+ bool ie_valid;
+};
+
+
+const char *uwb_rsv_state_str(enum uwb_rsv_state state);
+const char *uwb_rsv_type_str(enum uwb_drp_type type);
+
+struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
+ void *pal_priv);
+void uwb_rsv_destroy(struct uwb_rsv *rsv);
+
+int uwb_rsv_establish(struct uwb_rsv *rsv);
+int uwb_rsv_modify(struct uwb_rsv *rsv,
+ int max_mas, int min_mas, int sparsity);
+void uwb_rsv_terminate(struct uwb_rsv *rsv);
+
+void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
+
+/**
+ * Radio Control Interface instance
+ *
+ *
+ * Life cycle rules: those of the UWB Device.
+ *
+ * @index: an index number for this radio controller, as used in the
+ * device name.
+ * @version: version of protocol supported by this device
+ * @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
+ * @cmd: Backend implementation to execute commands; rw and call
+ * only with uwb_dev.dev.sem taken.
+ * @reset: Hardware reset of radio controller and any PAL controllers.
+ * @filter: Backend implementation to manipulate data to and from device
+ * to be compliant to specification assumed by driver (WHCI
+ * 0.95).
+ *
+ * uwb_dev.dev.mutex is used to execute commands and update
+ * the corresponding structures; can't use a spinlock
+ * because rc->cmd() can sleep.
+ * @ies: This is a dynamically allocated array cacheing the
+ * IEs (settable by the host) that the beacon of this
+ * radio controller is currently sending.
+ *
+ * In reality, we store here the full command we set to
+ * the radio controller (which is basically a command
+ * prefix followed by all the IEs the beacon currently
+ * contains). This way we don't have to realloc and
+ * memcpy when setting it.
+ *
+ * We set this up in uwb_rc_ie_setup(), where we alloc
+ * this struct, call get_ie() [so we know which IEs are
+ * currently being sent, if any].
+ *
+ * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
+ * amount used is given by sizeof(*ies) plus ies->wIELength
+ * (which is a little endian quantity all the time).
+ * @ies_mutex: protect the IE cache
+ * @dbg: information for the debug interface
+ */
+struct uwb_rc {
+ struct uwb_dev uwb_dev;
+ int index;
+ u16 version;
+
+ struct module *owner;
+ void *priv;
+ int (*start)(struct uwb_rc *rc);
+ void (*stop)(struct uwb_rc *rc);
+ int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
+ int (*reset)(struct uwb_rc *rc);
+ int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
+ int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
+ size_t *, size_t *);
+
+ spinlock_t neh_lock; /* protects neh_* and ctx_* */
+ struct list_head neh_list; /* Open NE handles */
+ unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
+ u8 ctx_roll;
+
+ int beaconing; /* Beaconing state [channel number] */
+ int scanning;
+ enum uwb_scan_type scan_type:3;
+ unsigned ready:1;
+ struct uwb_notifs_chain notifs_chain;
+
+ struct uwb_drp_avail drp_avail;
+ struct list_head reservations;
+ struct mutex rsvs_mutex;
+ struct workqueue_struct *rsv_workq;
+ struct work_struct rsv_update_work;
+
+ struct mutex ies_mutex;
+ struct uwb_rc_cmd_set_ie *ies;
+ size_t ies_capacity;
+
+ spinlock_t pal_lock;
+ struct list_head pals;
+
+ struct uwb_dbg *dbg;
+};
+
+
+/**
+ * struct uwb_pal - a UWB PAL
+ * @name: descriptive name for this PAL (wushc, wlp, etc.).
+ * @device: a device for the PAL. Used to link the PAL and the radio
+ * controller in sysfs.
+ * @new_rsv: called when a peer requests a reservation (may be NULL if
+ * the PAL cannot accept reservation requests).
+ *
+ * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
+ * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
+ *
+ * The PALs using a radio controller must register themselves to
+ * permit the UWB stack to coordinate usage of the radio between the
+ * various PALs or to allow PALs to response to certain requests from
+ * peers.
+ *
+ * A struct uwb_pal should be embedded in a containing structure
+ * belonging to the PAL and initialized with uwb_pal_init()). Fields
+ * should be set appropriately by the PAL before registering the PAL
+ * with uwb_pal_register().
+ */
+struct uwb_pal {
+ struct list_head node;
+ const char *name;
+ struct device *device;
+ void (*new_rsv)(struct uwb_rsv *rsv);
+};
+
+void uwb_pal_init(struct uwb_pal *pal);
+int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal);
+void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal);
+
+/*
+ * General public API
+ *
+ * This API can be used by UWB device drivers or by those implementing
+ * UWB Radio Controllers
+ */
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+ const struct uwb_dev_addr *devaddr);
+struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
+static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
+{
+ get_device(&uwb_dev->dev);
+}
+static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
+{
+ put_device(&uwb_dev->dev);
+}
+struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
+
+/**
+ * Callback function for 'uwb_{dev,rc}_foreach()'.
+ *
+ * @dev: Linux device instance
+ * 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
+ * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
+ *
+ * @returns: 0 to continue the iterations, any other val to stop
+ * iterating and return the value to the caller of
+ * _foreach().
+ */
+typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
+int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
+
+struct uwb_rc *uwb_rc_alloc(void);
+struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
+struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
+void uwb_rc_put(struct uwb_rc *rc);
+
+typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
+ struct uwb_rceb *reply, ssize_t reply_size);
+
+int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ uwb_rc_cmd_cb_f cb, void *arg);
+ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ struct uwb_rceb *reply, size_t reply_size);
+ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ struct uwb_rceb **preply);
+ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **);
+int uwb_bg_joined(struct uwb_rc *rc);
+
+size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
+
+int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
+int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
+int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
+int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
+int __uwb_mac_addr_assigned_check(struct device *, void *);
+int __uwb_dev_addr_assigned_check(struct device *, void *);
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
+ const struct uwb_dev_addr *addr)
+{
+ return __uwb_addr_print(buf, buf_size, addr->data, 0);
+}
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
+ const struct uwb_mac_addr *addr)
+{
+ return __uwb_addr_print(buf, buf_size, addr->data, 1);
+}
+
+/* @returns 0 if device addresses @addr2 and @addr1 are equal */
+static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
+ const struct uwb_dev_addr *addr2)
+{
+ return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
+static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
+ const struct uwb_mac_addr *addr2)
+{
+ return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns !0 if a MAC @addr is a broadcast address */
+static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
+{
+ struct uwb_mac_addr bcast = {
+ .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+ };
+ return !uwb_mac_addr_cmp(addr, &bcast);
+}
+
+/* @returns !0 if a MAC @addr is all zeroes*/
+static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
+{
+ struct uwb_mac_addr unset = {
+ .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+ };
+ return !uwb_mac_addr_cmp(addr, &unset);
+}
+
+/* @returns !0 if the address is in use. */
+static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
+ struct uwb_dev_addr *addr)
+{
+ return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
+}
+
+/*
+ * UWB Radio Controller API
+ *
+ * This API is used (in addition to the general API) to implement UWB
+ * Radio Controllers.
+ */
+void uwb_rc_init(struct uwb_rc *);
+int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
+void uwb_rc_rm(struct uwb_rc *);
+void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
+void uwb_rc_neh_error(struct uwb_rc *, int);
+void uwb_rc_reset_all(struct uwb_rc *rc);
+
+/**
+ * uwb_rsv_is_owner - is the owner of this reservation the RC?
+ * @rsv: the reservation
+ */
+static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
+{
+ return rsv->owner == &rsv->rc->uwb_dev;
+}
+
+/**
+ * Events generated by UWB that can be passed to any listeners
+ *
+ * Higher layers can register callback functions with the radio
+ * controller using uwb_notifs_register(). The radio controller
+ * maintains a list of all registered handlers and will notify all
+ * nodes when an event occurs.
+ */
+enum uwb_notifs {
+ UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */
+ UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */
+ UWB_NOTIF_ONAIR,
+ UWB_NOTIF_OFFAIR,
+};
+
+/* Callback function registered with UWB */
+struct uwb_notifs_handler {
+ struct list_head list_node;
+ void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
+ void *data;
+};
+
+int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
+int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
+
+
+/**
+ * UWB radio controller Event Size Entry (for creating entry tables)
+ *
+ * WUSB and WHCI define events and notifications, and they might have
+ * fixed or variable size.
+ *
+ * Each event/notification has a size which is not necessarily known
+ * in advance based on the event code. As well, vendor specific
+ * events/notifications will have a size impossible to determine
+ * unless we know about the device's specific details.
+ *
+ * It was way too smart of the spec writers not to think that it would
+ * be impossible for a generic driver to skip over vendor specific
+ * events/notifications if there are no LENGTH fields in the HEADER of
+ * each message...the transaction size cannot be counted on as the
+ * spec does not forbid to pack more than one event in a single
+ * transaction.
+ *
+ * Thus, we guess sizes with tables (or for events, when you know the
+ * size ahead of time you can use uwb_rc_neh_extra_size*()). We
+ * register tables with the known events and their sizes, and then we
+ * traverse those tables. For those with variable length, we provide a
+ * way to lookup the size inside the event/notification's
+ * payload. This allows device-specific event size tables to be
+ * registered.
+ *
+ * @size: Size of the payload
+ *
+ * @offset: if != 0, at offset @offset-1 starts a field with a length
+ * that has to be added to @size. The format of the field is
+ * given by @type.
+ *
+ * @type: Type and length of the offset field. Most common is LE 16
+ * bits (that's why that is zero); others are there mostly to
+ * cover for bugs and weirdos.
+ */
+struct uwb_est_entry {
+ size_t size;
+ unsigned offset;
+ enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
+};
+
+int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *, size_t entries);
+int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *, size_t entries);
+ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+ size_t len);
+
+/* -- Misc */
+
+enum {
+ EDC_MAX_ERRORS = 10,
+ EDC_ERROR_TIMEFRAME = HZ,
+};
+
+/* error density counter */
+struct edc {
+ unsigned long timestart;
+ u16 errorcount;
+};
+
+static inline
+void edc_init(struct edc *edc)
+{
+ edc->timestart = jiffies;
+}
+
+/* Called when an error occured.
+ * This is way to determine if the number of acceptable errors per time
+ * period has been exceeded. It is not accurate as there are cases in which
+ * this scheme will not work, for example if there are periodic occurences
+ * of errors that straddle updates to the start time. This scheme is
+ * sufficient for our usage.
+ *
+ * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
+ */
+static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
+{
+ unsigned long now;
+
+ now = jiffies;
+ if (now - err_hist->timestart > timeframe) {
+ err_hist->errorcount = 1;
+ err_hist->timestart = now;
+ } else if (++err_hist->errorcount > max_err) {
+ err_hist->errorcount = 0;
+ err_hist->timestart = now;
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Information Element handling */
+
+/* For representing the state of writing to a buffer when iterating */
+struct uwb_buf_ctx {
+ char *buf;
+ size_t bytes, size;
+};
+
+typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *,
+ size_t, void *);
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
+ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
+ const void *buf, size_t size);
+int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *,
+ size_t, void *);
+int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *);
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
+
+
+/*
+ * Transmission statistics
+ *
+ * UWB uses LQI and RSSI (one byte values) for reporting radio signal
+ * strength and line quality indication. We do quick and dirty
+ * averages of those. They are signed values, btw.
+ *
+ * For 8 bit quantities, we keep the min, the max, an accumulator
+ * (@sigma) and a # of samples. When @samples gets to 255, we compute
+ * the average (@sigma / @samples), place it in @sigma and reset
+ * @samples to 1 (so we use it as the first sample).
+ *
+ * Now, statistically speaking, probably I am kicking the kidneys of
+ * some books I have in my shelves collecting dust, but I just want to
+ * get an approx, not the Nobel.
+ *
+ * LOCKING: there is no locking per se, but we try to keep a lockless
+ * schema. Only _add_samples() modifies the values--as long as you
+ * have other locking on top that makes sure that no two calls of
+ * _add_sample() happen at the same time, then we are fine. Now, for
+ * resetting the values we just set @samples to 0 and that makes the
+ * next _add_sample() to start with defaults. Reading the values in
+ * _show() currently can race, so you need to make sure the calls are
+ * under the same lock that protects calls to _add_sample(). FIXME:
+ * currently unlocked (It is not ultraprecise but does the trick. Bite
+ * me).
+ */
+struct stats {
+ s8 min, max;
+ s16 sigma;
+ atomic_t samples;
+};
+
+static inline
+void stats_init(struct stats *stats)
+{
+ atomic_set(&stats->samples, 0);
+ wmb();
+}
+
+static inline
+void stats_add_sample(struct stats *stats, s8 sample)
+{
+ s8 min, max;
+ s16 sigma;
+ unsigned samples = atomic_read(&stats->samples);
+ if (samples == 0) { /* it was zero before, so we initialize */
+ min = 127;
+ max = -128;
+ sigma = 0;
+ } else {
+ min = stats->min;
+ max = stats->max;
+ sigma = stats->sigma;
+ }
+
+ if (sample < min) /* compute new values */
+ min = sample;
+ else if (sample > max)
+ max = sample;
+ sigma += sample;
+
+ stats->min = min; /* commit */
+ stats->max = max;
+ stats->sigma = sigma;
+ if (atomic_add_return(1, &stats->samples) > 255) {
+ /* wrapped around! reset */
+ stats->sigma = sigma / 256;
+ atomic_set(&stats->samples, 1);
+ }
+}
+
+static inline ssize_t stats_show(struct stats *stats, char *buf)
+{
+ int min, max, avg;
+ int samples = atomic_read(&stats->samples);
+ if (samples == 0)
+ min = max = avg = 0;
+ else {
+ min = stats->min;
+ max = stats->max;
+ avg = stats->sigma / samples;
+ }
+ return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
+}
+
+static inline ssize_t stats_store(struct stats *stats, const char *buf,
+ size_t size)
+{
+ stats_init(stats);
+ return size;
+}
+
+#endif /* #ifndef __LINUX__UWB_H__ */
diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h
new file mode 100644
index 00000000000..1141f41bab5
--- /dev/null
+++ b/include/linux/uwb/debug-cmd.h
@@ -0,0 +1,57 @@
+/*
+ * Ultra Wide Band
+ * Debug interface commands
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX__UWB__DEBUG_CMD_H__
+#define __LINUX__UWB__DEBUG_CMD_H__
+
+#include <linux/types.h>
+
+/*
+ * Debug interface commands
+ *
+ * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
+ *
+ * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
+ */
+
+enum uwb_dbg_cmd_type {
+ UWB_DBG_CMD_RSV_ESTABLISH = 1,
+ UWB_DBG_CMD_RSV_TERMINATE = 2,
+};
+
+struct uwb_dbg_cmd_rsv_establish {
+ __u8 target[6];
+ __u8 type;
+ __u16 max_mas;
+ __u16 min_mas;
+ __u8 sparsity;
+};
+
+struct uwb_dbg_cmd_rsv_terminate {
+ int index;
+};
+
+struct uwb_dbg_cmd {
+ __u32 type;
+ union {
+ struct uwb_dbg_cmd_rsv_establish rsv_establish;
+ struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
+ };
+};
+
+#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */
diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h
new file mode 100644
index 00000000000..a86a73fe303
--- /dev/null
+++ b/include/linux/uwb/debug.h
@@ -0,0 +1,82 @@
+/*
+ * Ultra Wide Band
+ * Debug Support
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: doc
+ * Invoke like:
+ *
+ * #define D_LOCAL 4
+ * #include <linux/uwb/debug.h>
+ *
+ * At the end of your include files.
+ */
+#include <linux/types.h>
+
+struct device;
+extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize);
+
+/* Master debug switch; !0 enables, 0 disables */
+#define D_MASTER (!0)
+
+/* Local (per-file) debug switch; #define before #including */
+#ifndef D_LOCAL
+#define D_LOCAL 0
+#endif
+
+#undef __d_printf
+#undef d_fnstart
+#undef d_fnend
+#undef d_printf
+#undef d_dump
+
+#define __d_printf(l, _tag, _dev, f, a...) \
+do { \
+ struct device *__dev = (_dev); \
+ if (D_MASTER && D_LOCAL >= (l)) { \
+ char __head[64] = ""; \
+ if (_dev != NULL) { \
+ if ((unsigned long)__dev < 4096) \
+ printk(KERN_ERR "E: Corrupt dev %p\n", \
+ __dev); \
+ else \
+ snprintf(__head, sizeof(__head), \
+ "%s %s: ", \
+ dev_driver_string(__dev), \
+ __dev->bus_id); \
+ } \
+ printk(KERN_ERR "%s%s" _tag ": " f, __head, \
+ __func__, ## a); \
+ } \
+} while (0 && _dev)
+
+#define d_fnstart(l, _dev, f, a...) \
+ __d_printf(l, " FNSTART", _dev, f, ## a)
+#define d_fnend(l, _dev, f, a...) \
+ __d_printf(l, " FNEND", _dev, f, ## a)
+#define d_printf(l, _dev, f, a...) \
+ __d_printf(l, "", _dev, f, ## a)
+#define d_dump(l, _dev, ptr, size) \
+do { \
+ struct device *__dev = _dev; \
+ if (D_MASTER && D_LOCAL >= (l)) \
+ dump_bytes(__dev, ptr, size); \
+} while (0 && _dev)
+#define d_test(l) (D_MASTER && D_LOCAL >= (l))
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h
new file mode 100644
index 00000000000..198c15f8e25
--- /dev/null
+++ b/include/linux/uwb/spec.h
@@ -0,0 +1,727 @@
+/*
+ * Ultra Wide Band
+ * UWB Standard definitions
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * All these definitions are based on the ECMA-368 standard.
+ *
+ * Note all definitions are Little Endian in the wire, and we will
+ * convert them to host order before operating on the bitfields (that
+ * yes, we use extensively).
+ */
+
+#ifndef __LINUX__UWB_SPEC_H__
+#define __LINUX__UWB_SPEC_H__
+
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#define i1480_FW 0x00000303
+/* #define i1480_FW 0x00000302 */
+
+/**
+ * Number of Medium Access Slots in a superframe.
+ *
+ * UWB divides time in SuperFrames, each one divided in 256 pieces, or
+ * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
+ * basic bandwidth allocation unit in UWB.
+ */
+enum { UWB_NUM_MAS = 256 };
+
+/**
+ * Number of Zones in superframe.
+ *
+ * UWB divides the superframe into zones with numbering starting from BPST.
+ * See MBOA MAC[16.8.6]
+ */
+enum { UWB_NUM_ZONES = 16 };
+
+/*
+ * Number of MAS in a zone.
+ */
+#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
+
+/*
+ * Number of streams per DRP reservation between a pair of devices.
+ *
+ * [ECMA-368] section 16.8.6.
+ */
+enum { UWB_NUM_STREAMS = 8 };
+
+/*
+ * mMasLength
+ *
+ * The length of a MAS in microseconds.
+ *
+ * [ECMA-368] section 17.16.
+ */
+enum { UWB_MAS_LENGTH_US = 256 };
+
+/*
+ * mBeaconSlotLength
+ *
+ * The length of the beacon slot in microseconds.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
+
+/*
+ * mMaxLostBeacons
+ *
+ * The number beacons missing in consecutive superframes before a
+ * device can be considered as unreachable.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_MAX_LOST_BEACONS = 3 };
+
+/*
+ * Length of a superframe in microseconds.
+ */
+#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
+
+/**
+ * UWB MAC address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_mac_addr {
+ u8 data[6];
+} __attribute__((packed));
+
+
+/**
+ * UWB device address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_dev_addr {
+ u8 data[2];
+} __attribute__((packed));
+
+
+/**
+ * Types of UWB addresses
+ *
+ * Order matters (by size).
+ */
+enum uwb_addr_type {
+ UWB_ADDR_DEV = 0,
+ UWB_ADDR_MAC = 1,
+};
+
+
+/** Size of a char buffer for printing a MAC/device address */
+enum { UWB_ADDR_STRSIZE = 32 };
+
+
+/** UWB WiMedia protocol IDs. */
+enum uwb_prid {
+ UWB_PRID_WLP_RESERVED = 0x0000,
+ UWB_PRID_WLP = 0x0001,
+ UWB_PRID_WUSB_BOT = 0x0010,
+ UWB_PRID_WUSB = 0x0010,
+ UWB_PRID_WUSB_TOP = 0x001F,
+};
+
+
+/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
+enum uwb_phy_rate {
+ UWB_PHY_RATE_53 = 0,
+ UWB_PHY_RATE_80,
+ UWB_PHY_RATE_106,
+ UWB_PHY_RATE_160,
+ UWB_PHY_RATE_200,
+ UWB_PHY_RATE_320,
+ UWB_PHY_RATE_400,
+ UWB_PHY_RATE_480,
+ UWB_PHY_RATE_INVALID
+};
+
+
+/**
+ * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
+ */
+enum uwb_scan_type {
+ UWB_SCAN_ONLY = 0,
+ UWB_SCAN_OUTSIDE_BP,
+ UWB_SCAN_WHILE_INACTIVE,
+ UWB_SCAN_DISABLED,
+ UWB_SCAN_ONLY_STARTTIME,
+ UWB_SCAN_TOP
+};
+
+
+/** ACK Policy types (MBOA MAC[7.2.1.3]) */
+enum uwb_ack_pol {
+ UWB_ACK_NO = 0,
+ UWB_ACK_INM = 1,
+ UWB_ACK_B = 2,
+ UWB_ACK_B_REQ = 3,
+};
+
+
+/** DRP reservation types ([ECMA-368 table 106) */
+enum uwb_drp_type {
+ UWB_DRP_TYPE_ALIEN_BP = 0,
+ UWB_DRP_TYPE_HARD,
+ UWB_DRP_TYPE_SOFT,
+ UWB_DRP_TYPE_PRIVATE,
+ UWB_DRP_TYPE_PCA,
+};
+
+
+/** DRP Reason Codes ([ECMA-368] table 107) */
+enum uwb_drp_reason {
+ UWB_DRP_REASON_ACCEPTED = 0,
+ UWB_DRP_REASON_CONFLICT,
+ UWB_DRP_REASON_PENDING,
+ UWB_DRP_REASON_DENIED,
+ UWB_DRP_REASON_MODIFIED,
+};
+
+/**
+ * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
+ */
+enum uwb_drp_notif_reason {
+ UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
+ UWB_DRP_NOTIF_CONFLICT,
+ UWB_DRP_NOTIF_TERMINATE,
+};
+
+
+/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
+struct uwb_drp_alloc {
+ __le16 zone_bm;
+ __le16 mas_bm;
+} __attribute__((packed));
+
+
+/** General MAC Header format (ECMA-368[16.2]) */
+struct uwb_mac_frame_hdr {
+ __le16 Frame_Control;
+ struct uwb_dev_addr DestAddr;
+ struct uwb_dev_addr SrcAddr;
+ __le16 Sequence_Control;
+ __le16 Access_Information;
+} __attribute__((packed));
+
+
+/**
+ * uwb_beacon_frame - a beacon frame including MAC headers
+ *
+ * [ECMA] section 16.3.
+ */
+struct uwb_beacon_frame {
+ struct uwb_mac_frame_hdr hdr;
+ struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */
+ u8 Beacon_Slot_Number;
+ u8 Device_Control;
+ u8 IEData[];
+} __attribute__((packed));
+
+
+/** Information Element codes (MBOA MAC[T54]) */
+enum uwb_ie {
+ UWB_PCA_AVAILABILITY = 2,
+ UWB_IE_DRP_AVAILABILITY = 8,
+ UWB_IE_DRP = 9,
+ UWB_BP_SWITCH_IE = 11,
+ UWB_MAC_CAPABILITIES_IE = 12,
+ UWB_PHY_CAPABILITIES_IE = 13,
+ UWB_APP_SPEC_PROBE_IE = 15,
+ UWB_IDENTIFICATION_IE = 19,
+ UWB_MASTER_KEY_ID_IE = 20,
+ UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
+ UWB_APP_SPEC_IE = 255,
+};
+
+
+/**
+ * Header common to all Information Elements (IEs)
+ */
+struct uwb_ie_hdr {
+ u8 element_id; /* enum uwb_ie */
+ u8 length;
+} __attribute__((packed));
+
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
+struct uwb_ie_drp {
+ struct uwb_ie_hdr hdr;
+ __le16 drp_control;
+ struct uwb_dev_addr dev_addr;
+ struct uwb_drp_alloc allocs[];
+} __attribute__((packed));
+
+static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
+}
+
+static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
+}
+
+static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
+}
+
+static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
+}
+
+static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
+}
+
+static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
+}
+
+static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
+}
+
+static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
+ enum uwb_drp_reason reason_code)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
+struct uwb_ie_drp_avail {
+ struct uwb_ie_hdr hdr;
+ DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/**
+ * The Vendor ID is set to an OUI that indicates the vendor of the device.
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_vendor_id {
+ u8 data[3];
+} __attribute__((packed));
+
+/**
+ * The device type ID
+ * FIXME: clarify what this means
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_device_type_id {
+ u8 data[3];
+} __attribute__((packed));
+
+
+/**
+ * UWB device information types
+ * ECMA-368 [16.8.10]
+ */
+enum uwb_dev_info_type {
+ UWB_DEV_INFO_VENDOR_ID = 0,
+ UWB_DEV_INFO_VENDOR_TYPE,
+ UWB_DEV_INFO_NAME,
+};
+
+/**
+ * UWB device information found in Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_dev_info {
+ u8 type; /* enum uwb_dev_info_type */
+ u8 length;
+ u8 data[];
+} __attribute__((packed));
+
+/**
+ * UWB Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_identification_ie {
+ struct uwb_ie_hdr hdr;
+ struct uwb_dev_info info[];
+} __attribute__((packed));
+
+/*
+ * UWB Radio Controller
+ *
+ * These definitions are common to the Radio Control layers as
+ * exported by the WUSB1.0 HWA and WHCI interfaces.
+ */
+
+/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
+struct uwb_rccb {
+ u8 bCommandType; /* enum hwa_cet */
+ __le16 wCommand; /* Command code */
+ u8 bCommandContext; /* Context ID */
+} __attribute__((packed));
+
+
+/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
+struct uwb_rceb {
+ u8 bEventType; /* enum hwa_cet */
+ __le16 wEvent; /* Event code */
+ u8 bEventContext; /* Context ID */
+} __attribute__((packed));
+
+
+enum {
+ UWB_RC_CET_GENERAL = 0, /* General Command/Event type */
+ UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */
+};
+
+/* Commands to the radio controller */
+enum uwb_rc_cmd {
+ UWB_RC_CMD_CHANNEL_CHANGE = 16,
+ UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */
+ UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */
+ UWB_RC_CMD_RESET = 19,
+ UWB_RC_CMD_SCAN = 20, /* Scan management */
+ UWB_RC_CMD_SET_BEACON_FILTER = 21,
+ UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */
+ UWB_RC_CMD_SET_IE = 23, /* Information Element management */
+ UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
+ UWB_RC_CMD_SET_TX_POWER = 25,
+ UWB_RC_CMD_SLEEP = 26,
+ UWB_RC_CMD_START_BEACON = 27,
+ UWB_RC_CMD_STOP_BEACON = 28,
+ UWB_RC_CMD_BP_MERGE = 29,
+ UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
+ UWB_RC_CMD_SET_ASIE_NOTIF = 31,
+};
+
+/* Notifications from the radio controller */
+enum uwb_rc_evt {
+ UWB_RC_EVT_IE_RCV = 0,
+ UWB_RC_EVT_BEACON = 1,
+ UWB_RC_EVT_BEACON_SIZE = 2,
+ UWB_RC_EVT_BPOIE_CHANGE = 3,
+ UWB_RC_EVT_BP_SLOT_CHANGE = 4,
+ UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
+ UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
+ UWB_RC_EVT_DRP_AVAIL = 7,
+ UWB_RC_EVT_DRP = 8,
+ UWB_RC_EVT_BP_SWITCH_STATUS = 9,
+ UWB_RC_EVT_CMD_FRAME_RCV = 10,
+ UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
+ /* Events (command responses) use the same code as the command */
+ UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
+};
+
+enum uwb_rc_extended_type_1_cmd {
+ UWB_RC_SET_DAA_ENERGY_MASK = 32,
+ UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
+};
+
+enum uwb_rc_extended_type_1_evt {
+ UWB_RC_DAA_ENERGY_DETECTED = 0,
+};
+
+/* Radio Control Result Code. [WHCI] table 3-3. */
+enum {
+ UWB_RC_RES_SUCCESS = 0,
+ UWB_RC_RES_FAIL,
+ UWB_RC_RES_FAIL_HARDWARE,
+ UWB_RC_RES_FAIL_NO_SLOTS,
+ UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
+ UWB_RC_RES_FAIL_INVALID_PARAMETER,
+ UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
+ UWB_RC_RES_FAIL_INVALID_IE_DATA,
+ UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
+ UWB_RC_RES_FAIL_CANCELLED,
+ UWB_RC_RES_FAIL_INVALID_STATE,
+ UWB_RC_RES_FAIL_INVALID_SIZE,
+ UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
+ UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
+ UWB_RC_RES_FAIL_TIME_OUT = 255,
+};
+
+/* Confirm event. [WHCI] section 3.1.3.1 etc. */
+struct uwb_rc_evt_confirm {
+ struct uwb_rceb rceb;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Device Address Management event. [WHCI] section 3.1.3.2. */
+struct uwb_rc_evt_dev_addr_mgmt {
+ struct uwb_rceb rceb;
+ u8 baAddr[6];
+ u8 bResultCode;
+} __attribute__((packed));
+
+
+/* Get IE Event. [WHCI] section 3.1.3.3. */
+struct uwb_rc_evt_get_ie {
+ struct uwb_rceb rceb;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
+struct uwb_rc_evt_set_drp_ie {
+ struct uwb_rceb rceb;
+ __le16 wRemainingSpace;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Set IE Event. [WHCI] section 3.1.3.8. */
+struct uwb_rc_evt_set_ie {
+ struct uwb_rceb rceb;
+ __le16 RemainingSpace;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Scan command. [WHCI] 3.1.3.5. */
+struct uwb_rc_cmd_scan {
+ struct uwb_rccb rccb;
+ u8 bChannelNumber;
+ u8 bScanState;
+ __le16 wStartTime;
+} __attribute__((packed));
+
+/* Set DRP IE command. [WHCI] section 3.1.3.7. */
+struct uwb_rc_cmd_set_drp_ie {
+ struct uwb_rccb rccb;
+ __le16 wIELength;
+ struct uwb_ie_drp IEData[];
+} __attribute__((packed));
+
+/* Set IE command. [WHCI] section 3.1.3.8. */
+struct uwb_rc_cmd_set_ie {
+ struct uwb_rccb rccb;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
+struct uwb_rc_evt_set_daa_energy_mask {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 result;
+} __attribute__((packed));
+
+/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
+struct uwb_rc_evt_set_notification_filter_ex {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 result;
+} __attribute__((packed));
+
+/* IE Received notification. [WHCI] section 3.1.4.1. */
+struct uwb_rc_evt_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr SrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Type of the received beacon. [WHCI] section 3.1.4.2. */
+enum uwb_rc_beacon_type {
+ UWB_RC_BEACON_TYPE_SCAN = 0,
+ UWB_RC_BEACON_TYPE_NEIGHBOR,
+ UWB_RC_BEACON_TYPE_OL_ALIEN,
+ UWB_RC_BEACON_TYPE_NOL_ALIEN,
+};
+
+/* Beacon received notification. [WHCI] 3.1.4.2. */
+struct uwb_rc_evt_beacon {
+ struct uwb_rceb rceb;
+ u8 bChannelNumber;
+ u8 bBeaconType;
+ __le16 wBPSTOffset;
+ u8 bLQI;
+ u8 bRSSI;
+ __le16 wBeaconInfoLength;
+ u8 BeaconInfo[];
+} __attribute__((packed));
+
+
+/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
+struct uwb_rc_evt_beacon_size {
+ struct uwb_rceb rceb;
+ __le16 wNewBeaconSize;
+} __attribute__((packed));
+
+
+/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
+struct uwb_rc_evt_bpoie_change {
+ struct uwb_rceb rceb;
+ __le16 wBPOIELength;
+ u8 BPOIE[];
+} __attribute__((packed));
+
+
+/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
+struct uwb_rc_evt_bp_slot_change {
+ struct uwb_rceb rceb;
+ u8 slot_info;
+} __attribute__((packed));
+
+static inline int uwb_rc_evt_bp_slot_change_slot_num(
+ const struct uwb_rc_evt_bp_slot_change *evt)
+{
+ return evt->slot_info & 0x7f;
+}
+
+static inline int uwb_rc_evt_bp_slot_change_no_slot(
+ const struct uwb_rc_evt_bp_slot_change *evt)
+{
+ return (evt->slot_info & 0x80) >> 7;
+}
+
+/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
+struct uwb_rc_evt_bp_switch_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr wSrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
+struct uwb_rc_evt_dev_addr_conflict {
+ struct uwb_rceb rceb;
+} __attribute__((packed));
+
+/* DRP notification. [WHCI] section 3.1.4.9. */
+struct uwb_rc_evt_drp {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr src_addr;
+ u8 reason;
+ u8 beacon_slot_number;
+ __le16 ie_length;
+ u8 ie_data[];
+} __attribute__((packed));
+
+static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
+{
+ return evt->reason & 0x0f;
+}
+
+
+/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
+struct uwb_rc_evt_drp_avail {
+ struct uwb_rceb rceb;
+ DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/* BP switch status notification. [WHCI] section 3.1.4.10. */
+struct uwb_rc_evt_bp_switch_status {
+ struct uwb_rceb rceb;
+ u8 status;
+ u8 slot_offset;
+ __le16 bpst_offset;
+ u8 move_countdown;
+} __attribute__((packed));
+
+/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
+struct uwb_rc_evt_cmd_frame_rcv {
+ struct uwb_rceb rceb;
+ __le16 receive_time;
+ struct uwb_dev_addr wSrcAddr;
+ struct uwb_dev_addr wDstAddr;
+ __le16 control;
+ __le16 reserved;
+ __le16 dataLength;
+ u8 data[];
+} __attribute__((packed));
+
+/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
+struct uwb_rc_evt_channel_change_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr wSrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
+struct uwb_rc_evt_daa_energy_detected {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 bandID;
+ u8 reserved;
+ u8 toneBmp[16];
+} __attribute__((packed));
+
+
+/**
+ * Radio Control Interface Class Descriptor
+ *
+ * WUSB 1.0 [8.6.1.2]
+ */
+struct uwb_rc_control_intf_class_desc {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 bcdRCIVersion;
+} __attribute__((packed));
+
+#endif /* #ifndef __LINUX__UWB_SPEC_H__ */
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h
new file mode 100644
index 00000000000..36a39e34f8d
--- /dev/null
+++ b/include/linux/uwb/umc.h
@@ -0,0 +1,194 @@
+/*
+ * UWB Multi-interface Controller support.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * UMC (UWB Multi-interface Controller) capabilities (e.g., radio
+ * controller, host controller) are presented as devices on the "umc"
+ * bus.
+ *
+ * The radio controller is not strictly a UMC capability but it's
+ * useful to present it as such.
+ *
+ * References:
+ *
+ * [WHCI] Wireless Host Controller Interface Specification for
+ * Certified Wireless Universal Serial Bus, revision 0.95.
+ *
+ * How this works is kind of convoluted but simple. The whci.ko driver
+ * loads when WHCI devices are detected. These WHCI devices expose
+ * many devices in the same PCI function (they couldn't have reused
+ * functions, no), so for each PCI function that exposes these many
+ * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
+ * with umc_device_create() and adds it to the bus with
+ * umc_device_register().
+ *
+ * umc_device_register() calls device_register() which will push the
+ * bus management code to load your UMC driver's somehting_probe()
+ * that you have registered for that capability code.
+ *
+ * Now when the WHCI device is removed, whci_remove() will go over
+ * each umc_dev assigned to each of the PCI function's capabilities
+ * and through whci_del_cap() call umc_device_unregister() each
+ * created umc_dev. Of course, if you are bound to the device, your
+ * driver's something_remove() will be called.
+ */
+
+#ifndef _LINUX_UWB_UMC_H_
+#define _LINUX_UWB_UMC_H_
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+/*
+ * UMC capability IDs.
+ *
+ * 0x00 is reserved so use it for the radio controller device.
+ *
+ * [WHCI] table 2-8
+ */
+#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */
+#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
+
+/**
+ * struct umc_dev - UMC capability device
+ *
+ * @version: version of the specification this capability conforms to.
+ * @cap_id: capability ID.
+ * @bar: PCI Bar (64 bit) where the resource lies
+ * @resource: register space resource.
+ * @irq: interrupt line.
+ */
+struct umc_dev {
+ u16 version;
+ u8 cap_id;
+ u8 bar;
+ struct resource resource;
+ unsigned irq;
+ struct device dev;
+};
+
+#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
+
+/**
+ * struct umc_driver - UMC capability driver
+ * @cap_id: supported capability ID.
+ * @match: driver specific capability matching function.
+ * @match_data: driver specific data for match() (e.g., a
+ * table of pci_device_id's if umc_match_pci_id() is used).
+ */
+struct umc_driver {
+ char *name;
+ u8 cap_id;
+ int (*match)(struct umc_driver *, struct umc_dev *);
+ const void *match_data;
+
+ int (*probe)(struct umc_dev *);
+ void (*remove)(struct umc_dev *);
+ int (*suspend)(struct umc_dev *, pm_message_t state);
+ int (*resume)(struct umc_dev *);
+
+ struct device_driver driver;
+};
+
+#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
+
+extern struct bus_type umc_bus_type;
+
+struct umc_dev *umc_device_create(struct device *parent, int n);
+int __must_check umc_device_register(struct umc_dev *umc);
+void umc_device_unregister(struct umc_dev *umc);
+
+int __must_check __umc_driver_register(struct umc_driver *umc_drv,
+ struct module *mod,
+ const char *mod_name);
+
+/**
+ * umc_driver_register - register a UMC capabiltity driver.
+ * @umc_drv: pointer to the driver.
+ */
+static inline int __must_check umc_driver_register(struct umc_driver *umc_drv)
+{
+ return __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME);
+}
+void umc_driver_unregister(struct umc_driver *umc_drv);
+
+/*
+ * Utility function you can use to match (umc_driver->match) against a
+ * null-terminated array of 'struct pci_device_id' in
+ * umc_driver->match_data.
+ */
+int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
+
+/**
+ * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
+ * @umc_dev: UMC device whose parent PCI device we are looking for
+ *
+ * DIRTY!!! DON'T RELY ON THIS
+ *
+ * FIXME: This is as dirty as it gets, but we need some way to check
+ * the correct type of umc_dev->parent (so that for example, we can
+ * cast to pci_dev). Casting to pci_dev is necesary because at some
+ * point we need to request resources from the device. Mapping is
+ * easily over come (ioremap and stuff are bus agnostic), but hooking
+ * up to some error handlers (such as pci error handlers) might need
+ * this.
+ *
+ * THIS might (probably will) be removed in the future, so don't count
+ * on it.
+ */
+static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
+{
+ struct pci_dev *pci_dev = NULL;
+ if (umc_dev->dev.parent->bus == &pci_bus_type)
+ pci_dev = to_pci_dev(umc_dev->dev.parent);
+ return pci_dev;
+}
+
+/**
+ * umc_dev_get() - reference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ *
+ * NOTE: we are assuming in this whole scheme that the parent device
+ * is referenced at _probe() time and unreferenced at _remove()
+ * time by the parent's subsystem.
+ */
+static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
+{
+ get_device(&umc_dev->dev);
+ return umc_dev;
+}
+
+/**
+ * umc_dev_put() - unreference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void umc_dev_put(struct umc_dev *umc_dev)
+{
+ put_device(&umc_dev->dev);
+}
+
+/**
+ * umc_set_drvdata - set UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ * @data: Data to set.
+ */
+static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
+{
+ dev_set_drvdata(&umc_dev->dev, data);
+}
+
+/**
+ * umc_get_drvdata - recover UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
+{
+ return dev_get_drvdata(&umc_dev->dev);
+}
+
+int umc_controller_reset(struct umc_dev *umc);
+
+#endif /* #ifndef _LINUX_UWB_UMC_H_ */
diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h
new file mode 100644
index 00000000000..915ec23042d
--- /dev/null
+++ b/include/linux/uwb/whci.h
@@ -0,0 +1,117 @@
+/*
+ * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ *
+ * References:
+ * [WHCI] Wireless Host Controller Interface Specification for
+ * Certified Wireless Universal Serial Bus, revision 0.95.
+ */
+#ifndef _LINUX_UWB_WHCI_H_
+#define _LINUX_UWB_WHCI_H_
+
+#include <linux/pci.h>
+
+/*
+ * UWB interface capability registers (offsets from UWBBASE)
+ *
+ * [WHCI] section 2.2
+ */
+#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */
+# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull)
+#define UWBCAPDATA(n) (8*(n))
+# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull)
+# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull)
+# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull)
+# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32))
+# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull)
+
+/* Size of the WHCI capability data (including the RC capability) for
+ a device with n capabilities. */
+#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
+
+
+/*
+ * URC registers (offsets from URCBASE)
+ *
+ * [WHCI] section 2.3
+ */
+#define URCCMD 0x00
+# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */
+# define URCCMD_RS (1 << 30) /* Run/Stop */
+# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */
+# define URCCMD_ACTIVE (1 << 15) /* Command is active */
+# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */
+# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */
+#define URCSTS 0x04
+# define URCSTS_EPS (1 << 17) /* Event Processing Status */
+# define URCSTS_HALTED (1 << 16) /* RC halted */
+# define URCSTS_HSE (1 << 10) /* Host System Error...fried */
+# define URCSTS_ER (1 << 9) /* Event Ready */
+# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */
+# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */
+# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */
+#define URCINTR 0x08
+# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */
+#define URCCMDADDR 0x10
+#define URCEVTADDR 0x18
+# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */
+
+
+/** Write 32 bit @value to little endian register at @addr */
+static inline
+void le_writel(u32 value, void __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+
+/** Read from 32 bit little endian register at @addr */
+static inline
+u32 le_readl(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+
+/** Write 64 bit @value to little endian register at @addr */
+static inline
+void le_writeq(u64 value, void __iomem *addr)
+{
+ iowrite32(value, addr);
+ iowrite32(value >> 32, addr + 4);
+}
+
+
+/** Read from 64 bit little endian register at @addr */
+static inline
+u64 le_readq(void __iomem *addr)
+{
+ u64 value;
+ value = ioread32(addr);
+ value |= (u64)ioread32(addr + 4) << 32;
+ return value;
+}
+
+extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
+ u32 mask, u32 result,
+ unsigned long max_ms, const char *tag);
+
+#endif /* #ifndef _LINUX_UWB_WHCI_H_ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index d4b03034ee7..1f126e30766 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -293,6 +293,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */
+#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */
#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
@@ -304,6 +305,8 @@ struct v4l2_pix_format {
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
+#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
+#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
/* The following formats are not defined in the V4L2 specification */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
@@ -315,6 +318,13 @@ struct v4l2_pix_format {
/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
+/*
+ * 10bit raw bayer, expanded to 16 bits
+ * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
+ */
+#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0')
+/* 10bit raw bayer DPCM compressed to 8 bits */
+#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
/* compressed formats */
@@ -1043,7 +1053,7 @@ enum v4l2_mpeg_video_bitrate_mode {
#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
-/* MPEG-class control IDs specific to the CX2584x driver as defined by V4L2 */
+/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
@@ -1110,6 +1120,12 @@ enum v4l2_exposure_auto_type {
#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11)
#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12)
+#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13)
+#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14)
+#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15)
+
+#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
+
/*
* T U N I N G
*/
@@ -1362,6 +1378,7 @@ struct v4l2_streamparm {
#define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */
#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */
#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */
+#define V4L2_CHIP_MATCH_AC97 3 /* Match against anciliary AC97 chip */
struct v4l2_register {
__u32 match_type; /* Match type */
@@ -1451,6 +1468,8 @@ struct v4l2_chip_ident {
#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident)
#endif
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
+/* Reminder: when adding new ioctls please add support for them to
+ drivers/media/video/v4l2-compat-ioctl32.c as well! */
#ifdef __OLD_VIDIOC_
/* for compatibility, will go away some day */
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index c30c7bfbf39..8726ff77763 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -10,6 +10,9 @@
/* The feature bitmap for virtio balloon */
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
+/* Size of a PFN in the balloon interface. */
+#define VIRTIO_BALLOON_PFN_SHIFT 12
+
struct virtio_balloon_config
{
/* Number of pages host wants Guest to give up. */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index 19a0da0dba4..7615ffcdd55 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -7,6 +7,17 @@
/* The ID for virtio console */
#define VIRTIO_ID_CONSOLE 3
+/* Feature bits */
+#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+
+struct virtio_console_config {
+ /* colums of the screens */
+ __u16 cols;
+ /* rows of the screens */
+ __u16 rows;
+} __attribute__((packed));
+
+
#ifdef __KERNEL__
int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
#endif /* __KERNEL__ */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 5e33761b9b8..5cdd0aa8bde 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -20,6 +20,7 @@
#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
+#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
struct virtio_net_config
{
@@ -44,4 +45,12 @@ struct virtio_net_hdr
__u16 csum_start; /* Position to start checksumming from */
__u16 csum_offset; /* Offset after that to place checksum */
};
+
+/* This is the version of the header to use when the MRG_RXBUF
+ * feature has been negotiated. */
+struct virtio_net_hdr_mrg_rxbuf {
+ struct virtio_net_hdr hdr;
+ __u16 num_buffers; /* Number of merged rx buffers */
+};
+
#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index cdef3574293..cd0fd5d181a 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -53,4 +53,12 @@
/* Virtio ABI version, this must match exactly */
#define VIRTIO_PCI_ABI_VERSION 0
+
+/* How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size. */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring.
+ * x86 pagesize again. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
#endif
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index c4a598fb382..71e03722fb5 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -83,7 +83,7 @@ struct vring {
* __u16 avail_idx;
* __u16 available[num];
*
- * // Padding to the next page boundary.
+ * // Padding to the next align boundary.
* char pad[];
*
* // A ring of used descriptor heads with free-running index.
@@ -93,19 +93,19 @@ struct vring {
* };
*/
static inline void vring_init(struct vring *vr, unsigned int num, void *p,
- unsigned long pagesize)
+ unsigned long align)
{
vr->num = num;
vr->desc = p;
vr->avail = p + num*sizeof(struct vring_desc);
- vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + pagesize-1)
- & ~(pagesize - 1));
+ vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1)
+ & ~(align - 1));
}
-static inline unsigned vring_size(unsigned int num, unsigned long pagesize)
+static inline unsigned vring_size(unsigned int num, unsigned long align)
{
return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num)
- + pagesize - 1) & ~(pagesize - 1))
+ + align - 1) & ~(align - 1))
+ sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num;
}
@@ -115,6 +115,7 @@ struct virtio_device;
struct virtqueue;
struct virtqueue *vring_new_virtqueue(unsigned int num,
+ unsigned int vring_align,
struct virtio_device *vdev,
void *pages,
void (*notify)(struct virtqueue *vq),
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 328eb402272..307b88577ea 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -2,6 +2,7 @@
#define _LINUX_VMALLOC_H
#include <linux/spinlock.h>
+#include <linux/init.h>
#include <asm/page.h> /* pgprot_t */
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -23,7 +24,6 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
#endif
struct vm_struct {
- /* keep next,addr,size together to speedup lookups */
struct vm_struct *next;
void *addr;
unsigned long size;
@@ -37,6 +37,19 @@ struct vm_struct {
/*
* Highlevel APIs for driver use
*/
+extern void vm_unmap_ram(const void *mem, unsigned int count);
+extern void *vm_map_ram(struct page **pages, unsigned int count,
+ int node, pgprot_t prot);
+extern void vm_unmap_aliases(void);
+
+#ifdef CONFIG_MMU
+extern void __init vmalloc_init(void);
+#else
+static inline void vmalloc_init(void)
+{
+}
+#endif
+
extern void *vmalloc(unsigned long size);
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
@@ -90,6 +103,4 @@ extern void free_vm_area(struct vm_struct *area);
extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist;
-extern const struct seq_operations vmalloc_op;
-
#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 58334d43951..524cd1b28ec 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -41,13 +41,19 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
#endif
+#ifdef CONFIG_UNEVICTABLE_LRU
+ UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
+ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
+ UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
+ UNEVICTABLE_PGMLOCKED,
+ UNEVICTABLE_PGMUNLOCKED,
+ UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
+ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
+ UNEVICTABLE_MLOCKFREED,
+#endif
NR_VM_EVENT_ITEMS
};
-extern const struct seq_operations fragmentation_op;
-extern const struct seq_operations pagetypeinfo_op;
-extern const struct seq_operations zoneinfo_op;
-extern const struct seq_operations vmstat_op;
extern int sysctl_stat_interval;
#ifdef CONFIG_VM_EVENT_COUNTERS
@@ -159,6 +165,16 @@ static inline unsigned long zone_page_state(struct zone *zone,
return x;
}
+extern unsigned long global_lru_pages(void);
+
+static inline unsigned long zone_lru_pages(struct zone *zone)
+{
+ return (zone_page_state(zone, NR_ACTIVE_ANON)
+ + zone_page_state(zone, NR_ACTIVE_FILE)
+ + zone_page_state(zone, NR_INACTIVE_ANON)
+ + zone_page_state(zone, NR_INACTIVE_FILE));
+}
+
#ifdef CONFIG_NUMA
/*
* Determine the per node value of a stat item. This function
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0081147a9fe..ef609f842fa 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -108,15 +108,6 @@ static inline int waitqueue_active(wait_queue_head_t *q)
return !list_empty(&q->task_list);
}
-/*
- * Used to distinguish between sync and async io wait context:
- * sync i/o typically specifies a NULL wait queue entry or a wait
- * queue entry bound to a task (current task) to wake up.
- * aio specifies a wait queue entry with an async notification
- * callback routine, not associated with any task.
- */
-#define is_sync_wait(wait) (!(wait) || ((wait)->private))
-
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
diff --git a/include/linux/wlp.h b/include/linux/wlp.h
new file mode 100644
index 00000000000..033545e145c
--- /dev/null
+++ b/include/linux/wlp.h
@@ -0,0 +1,735 @@
+/*
+ * WiMedia Logical Link Control Protocol (WLP)
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Reinette Chatre <reinette.chatre@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ *
+ * - Does not (yet) include support for WLP control frames
+ * WLP Draft 0.99 [6.5].
+ *
+ * A visual representation of the data structures.
+ *
+ * wssidB wssidB
+ * ^ ^
+ * | |
+ * wssidA wssidA
+ * wlp interface { ^ ^
+ * ... | |
+ * ... ... wssid wssid ...
+ * wlp --- ... | |
+ * }; neighbors --> neighbA --> neighbB
+ * ...
+ * wss
+ * ...
+ * eda cache --> neighborA --> neighborB --> neighborC ...
+ */
+
+#ifndef __LINUX__WLP_H_
+#define __LINUX__WLP_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/uwb.h>
+
+/**
+ * WLP Protocol ID
+ * WLP Draft 0.99 [6.2]
+ *
+ * The MUX header for all WLP frames
+ */
+#define WLP_PROTOCOL_ID 0x0100
+
+/**
+ * WLP Version
+ * WLP version placed in the association frames (WLP 0.99 [6.6])
+ */
+#define WLP_VERSION 0x10
+
+/**
+ * Bytes needed to print UUID as string
+ */
+#define WLP_WSS_UUID_STRSIZE 48
+
+/**
+ * Bytes needed to print nonce as string
+ */
+#define WLP_WSS_NONCE_STRSIZE 48
+
+
+/**
+ * Size used for WLP name size
+ *
+ * The WSS name is set to 65 bytes, 1 byte larger than the maximum
+ * allowed by the WLP spec. This is to have a null terminated string
+ * for display to the user. A maximum of 64 bytes will still be used
+ * when placing the WSS name field in association frames.
+ */
+#define WLP_WSS_NAME_SIZE 65
+
+/**
+ * Number of bytes added by WLP to data frame
+ *
+ * A data frame transmitted from a host will be placed in a Standard or
+ * Abbreviated WLP frame. These have an extra 4 bytes of header (struct
+ * wlp_frame_std_abbrv_hdr).
+ * When the stack sends this data frame for transmission it needs to ensure
+ * there is enough headroom for this header.
+ */
+#define WLP_DATA_HLEN 4
+
+/**
+ * State of device regarding WLP Service Set
+ *
+ * WLP_WSS_STATE_NONE: the host does not participate in any WSS
+ * WLP_WSS_STATE_PART_ENROLLED: used as part of the enrollment sequence
+ * ("Partial Enroll"). This state is used to
+ * indicate the first part of enrollment that is
+ * unsecure. If the WSS is unsecure then the
+ * state will promptly go to WLP_WSS_STATE_ENROLLED,
+ * if the WSS is not secure then the enrollment
+ * procedure is a few more steps before we are
+ * enrolled.
+ * WLP_WSS_STATE_ENROLLED: the host is enrolled in a WSS
+ * WLP_WSS_STATE_ACTIVE: WSS is activated
+ * WLP_WSS_STATE_CONNECTED: host is connected to neighbor in WSS
+ *
+ */
+enum wlp_wss_state {
+ WLP_WSS_STATE_NONE = 0,
+ WLP_WSS_STATE_PART_ENROLLED,
+ WLP_WSS_STATE_ENROLLED,
+ WLP_WSS_STATE_ACTIVE,
+ WLP_WSS_STATE_CONNECTED,
+};
+
+/**
+ * WSS Secure status
+ * WLP 0.99 Table 6
+ *
+ * Set to one if the WSS is secure, zero if it is not secure
+ */
+enum wlp_wss_sec_status {
+ WLP_WSS_UNSECURE = 0,
+ WLP_WSS_SECURE,
+};
+
+/**
+ * WLP frame type
+ * WLP Draft 0.99 [6.2 Table 1]
+ */
+enum wlp_frame_type {
+ WLP_FRAME_STANDARD = 0,
+ WLP_FRAME_ABBREVIATED,
+ WLP_FRAME_CONTROL,
+ WLP_FRAME_ASSOCIATION,
+};
+
+/**
+ * WLP Association Message Type
+ * WLP Draft 0.99 [6.6.1.2 Table 8]
+ */
+enum wlp_assoc_type {
+ WLP_ASSOC_D1 = 2,
+ WLP_ASSOC_D2 = 3,
+ WLP_ASSOC_M1 = 4,
+ WLP_ASSOC_M2 = 5,
+ WLP_ASSOC_M3 = 7,
+ WLP_ASSOC_M4 = 8,
+ WLP_ASSOC_M5 = 9,
+ WLP_ASSOC_M6 = 10,
+ WLP_ASSOC_M7 = 11,
+ WLP_ASSOC_M8 = 12,
+ WLP_ASSOC_F0 = 14,
+ WLP_ASSOC_E1 = 32,
+ WLP_ASSOC_E2 = 33,
+ WLP_ASSOC_C1 = 34,
+ WLP_ASSOC_C2 = 35,
+ WLP_ASSOC_C3 = 36,
+ WLP_ASSOC_C4 = 37,
+};
+
+/**
+ * WLP Attribute Type
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ */
+enum wlp_attr_type {
+ WLP_ATTR_AUTH = 0x1005, /* Authenticator */
+ WLP_ATTR_DEV_NAME = 0x1011, /* Device Name */
+ WLP_ATTR_DEV_PWD_ID = 0x1012, /* Device Password ID */
+ WLP_ATTR_E_HASH1 = 0x1014, /* E-Hash1 */
+ WLP_ATTR_E_HASH2 = 0x1015, /* E-Hash2 */
+ WLP_ATTR_E_SNONCE1 = 0x1016, /* E-SNonce1 */
+ WLP_ATTR_E_SNONCE2 = 0x1017, /* E-SNonce2 */
+ WLP_ATTR_ENCR_SET = 0x1018, /* Encrypted Settings */
+ WLP_ATTR_ENRL_NONCE = 0x101A, /* Enrollee Nonce */
+ WLP_ATTR_KEYWRAP_AUTH = 0x101E, /* Key Wrap Authenticator */
+ WLP_ATTR_MANUF = 0x1021, /* Manufacturer */
+ WLP_ATTR_MSG_TYPE = 0x1022, /* Message Type */
+ WLP_ATTR_MODEL_NAME = 0x1023, /* Model Name */
+ WLP_ATTR_MODEL_NR = 0x1024, /* Model Number */
+ WLP_ATTR_PUB_KEY = 0x1032, /* Public Key */
+ WLP_ATTR_REG_NONCE = 0x1039, /* Registrar Nonce */
+ WLP_ATTR_R_HASH1 = 0x103D, /* R-Hash1 */
+ WLP_ATTR_R_HASH2 = 0x103E, /* R-Hash2 */
+ WLP_ATTR_R_SNONCE1 = 0x103F, /* R-SNonce1 */
+ WLP_ATTR_R_SNONCE2 = 0x1040, /* R-SNonce2 */
+ WLP_ATTR_SERIAL = 0x1042, /* Serial number */
+ WLP_ATTR_UUID_E = 0x1047, /* UUID-E */
+ WLP_ATTR_UUID_R = 0x1048, /* UUID-R */
+ WLP_ATTR_PRI_DEV_TYPE = 0x1054, /* Primary Device Type */
+ WLP_ATTR_SEC_DEV_TYPE = 0x1055, /* Secondary Device Type */
+ WLP_ATTR_PORT_DEV = 0x1056, /* Portable Device */
+ WLP_ATTR_APP_EXT = 0x1058, /* Application Extension */
+ WLP_ATTR_WLP_VER = 0x2000, /* WLP Version */
+ WLP_ATTR_WSSID = 0x2001, /* WSSID */
+ WLP_ATTR_WSS_NAME = 0x2002, /* WSS Name */
+ WLP_ATTR_WSS_SEC_STAT = 0x2003, /* WSS Secure Status */
+ WLP_ATTR_WSS_BCAST = 0x2004, /* WSS Broadcast Address */
+ WLP_ATTR_WSS_M_KEY = 0x2005, /* WSS Master Key */
+ WLP_ATTR_ACC_ENRL = 0x2006, /* Accepting Enrollment */
+ WLP_ATTR_WSS_INFO = 0x2007, /* WSS Information */
+ WLP_ATTR_WSS_SEL_MTHD = 0x2008, /* WSS Selection Method */
+ WLP_ATTR_ASSC_MTHD_LIST = 0x2009, /* Association Methods List */
+ WLP_ATTR_SEL_ASSC_MTHD = 0x200A, /* Selected Association Method */
+ WLP_ATTR_ENRL_HASH_COMM = 0x200B, /* Enrollee Hash Commitment */
+ WLP_ATTR_WSS_TAG = 0x200C, /* WSS Tag */
+ WLP_ATTR_WSS_VIRT = 0x200D, /* WSS Virtual EUI-48 */
+ WLP_ATTR_WLP_ASSC_ERR = 0x200E, /* WLP Association Error */
+ WLP_ATTR_VNDR_EXT = 0x200F, /* Vendor Extension */
+};
+
+/**
+ * WLP Category ID of primary/secondary device
+ * WLP Draft 0.99 [6.6.1.8 Table 12]
+ */
+enum wlp_dev_category_id {
+ WLP_DEV_CAT_COMPUTER = 1,
+ WLP_DEV_CAT_INPUT,
+ WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER,
+ WLP_DEV_CAT_CAMERA,
+ WLP_DEV_CAT_STORAGE,
+ WLP_DEV_CAT_INFRASTRUCTURE,
+ WLP_DEV_CAT_DISPLAY,
+ WLP_DEV_CAT_MULTIM,
+ WLP_DEV_CAT_GAMING,
+ WLP_DEV_CAT_TELEPHONE,
+ WLP_DEV_CAT_OTHER = 65535,
+};
+
+/**
+ * WLP WSS selection method
+ * WLP Draft 0.99 [6.6.1.6 Table 10]
+ */
+enum wlp_wss_sel_mthd {
+ WLP_WSS_ENRL_SELECT = 1, /* Enrollee selects */
+ WLP_WSS_REG_SELECT, /* Registrar selects */
+};
+
+/**
+ * WLP association error values
+ * WLP Draft 0.99 [6.6.1.5 Table 9]
+ */
+enum wlp_assc_error {
+ WLP_ASSOC_ERROR_NONE,
+ WLP_ASSOC_ERROR_AUTH, /* Authenticator Failure */
+ WLP_ASSOC_ERROR_ROGUE, /* Rogue activity suspected */
+ WLP_ASSOC_ERROR_BUSY, /* Device busy */
+ WLP_ASSOC_ERROR_LOCK, /* Setup Locked */
+ WLP_ASSOC_ERROR_NOT_READY, /* Registrar not ready */
+ WLP_ASSOC_ERROR_INV, /* Invalid WSS selection */
+ WLP_ASSOC_ERROR_MSG_TIME, /* Message timeout */
+ WLP_ASSOC_ERROR_ENR_TIME, /* Enrollment session timeout */
+ WLP_ASSOC_ERROR_PW, /* Device password invalid */
+ WLP_ASSOC_ERROR_VER, /* Unsupported version */
+ WLP_ASSOC_ERROR_INT, /* Internal error */
+ WLP_ASSOC_ERROR_UNDEF, /* Undefined error */
+ WLP_ASSOC_ERROR_NUM, /* Numeric comparison failure */
+ WLP_ASSOC_ERROR_WAIT, /* Waiting for user input */
+};
+
+/**
+ * WLP Parameters
+ * WLP 0.99 [7.7]
+ */
+enum wlp_parameters {
+ WLP_PER_MSG_TIMEOUT = 15, /* Seconds to wait for response to
+ association message. */
+};
+
+/**
+ * WLP IE
+ *
+ * The WLP IE should be included in beacons by all devices.
+ *
+ * The driver can set only a few of the fields in this information element,
+ * most fields are managed by the device self. When the driver needs to set
+ * a field it will only provide values for the fields of interest, the rest
+ * will be filled with zeroes. The fields of interest are:
+ *
+ * Element ID
+ * Length
+ * Capabilities (only to include WSSID Hash list length)
+ * WSSID Hash List fields
+ *
+ * WLP 0.99 [6.7]
+ *
+ * Only the fields that will be used are detailed in this structure, rest
+ * are not detailed or marked as "notused".
+ */
+struct wlp_ie {
+ struct uwb_ie_hdr hdr;
+ __le16 capabilities;
+ __le16 cycle_param;
+ __le16 acw_anchor_addr;
+ u8 wssid_hash_list[];
+} __attribute__((packed));
+
+static inline int wlp_ie_hash_length(struct wlp_ie *ie)
+{
+ return (le16_to_cpu(ie->capabilities) >> 12) & 0xf;
+}
+
+static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length)
+{
+ u16 caps = le16_to_cpu(ie->capabilities);
+ caps = (caps & ~(0xf << 12)) | (hash_length << 12);
+ ie->capabilities = cpu_to_le16(caps);
+}
+
+/**
+ * WLP nonce
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ *
+ * A 128-bit random number often used (E-SNonce1, E-SNonce2, Enrollee
+ * Nonce, Registrar Nonce, R-SNonce1, R-SNonce2). It is passed to HW so
+ * it is packed.
+ */
+struct wlp_nonce {
+ u8 data[16];
+} __attribute__((packed));
+
+/**
+ * WLP UUID
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ *
+ * Universally Unique Identifier (UUID) encoded as an octet string in the
+ * order the octets are shown in string representation in RFC4122. A UUID
+ * is often used (UUID-E, UUID-R, WSSID). It is passed to HW so it is packed.
+ */
+struct wlp_uuid {
+ u8 data[16];
+} __attribute__((packed));
+
+
+/**
+ * Primary and secondary device type attributes
+ * WLP Draft 0.99 [6.6.1.8]
+ */
+struct wlp_dev_type {
+ enum wlp_dev_category_id category:16;
+ u8 OUI[3];
+ u8 OUIsubdiv;
+ __le16 subID;
+} __attribute__((packed));
+
+/**
+ * WLP frame header
+ * WLP Draft 0.99 [6.2]
+ */
+struct wlp_frame_hdr {
+ __le16 mux_hdr; /* WLP_PROTOCOL_ID */
+ enum wlp_frame_type type:8;
+} __attribute__((packed));
+
+/**
+ * WLP attribute field header
+ * WLP Draft 0.99 [6.6.1]
+ *
+ * Header of each attribute found in an association frame
+ */
+struct wlp_attr_hdr {
+ __le16 type;
+ __le16 length;
+} __attribute__((packed));
+
+/**
+ * Device information commonly used together
+ *
+ * Each of these device information elements has a specified range in which it
+ * should fit (WLP 0.99 [Table 6]). This range provided in the spec does not
+ * include the termination null '\0' character (when used in the
+ * association protocol the attribute fields are accompanied
+ * with a "length" field so the full range from the spec can be used for
+ * the value). We thus allocate an extra byte to be able to store a string
+ * of max length with a terminating '\0'.
+ */
+struct wlp_device_info {
+ char name[33];
+ char model_name[33];
+ char manufacturer[65];
+ char model_nr[33];
+ char serial[33];
+ struct wlp_dev_type prim_dev_type;
+};
+
+/**
+ * Macros for the WLP attributes
+ *
+ * There are quite a few attributes (total is 43). The attribute layout can be
+ * in one of three categories: one value, an array, an enum forced to 8 bits.
+ * These macros help with their definitions.
+ */
+#define wlp_attr(type, name) \
+struct wlp_attr_##name { \
+ struct wlp_attr_hdr hdr; \
+ type name; \
+} __attribute__((packed));
+
+#define wlp_attr_array(type, name) \
+struct wlp_attr_##name { \
+ struct wlp_attr_hdr hdr; \
+ type name[]; \
+} __attribute__((packed));
+
+/**
+ * WLP association attribute fields
+ * WLP Draft 0.99 [6.6.1 Table 6]
+ *
+ * Attributes appear in same order as the Table in the spec
+ * FIXME Does not define all attributes yet
+ */
+
+/* Device name: Friendly name of sending device */
+wlp_attr_array(u8, dev_name)
+
+/* Enrollee Nonce: Random number generated by enrollee for an enrollment
+ * session */
+wlp_attr(struct wlp_nonce, enonce)
+
+/* Manufacturer name: Name of manufacturer of the sending device */
+wlp_attr_array(u8, manufacturer)
+
+/* WLP Message Type */
+wlp_attr(u8, msg_type)
+
+/* WLP Model name: Model name of sending device */
+wlp_attr_array(u8, model_name)
+
+/* WLP Model number: Model number of sending device */
+wlp_attr_array(u8, model_nr)
+
+/* Registrar Nonce: Random number generated by registrar for an enrollment
+ * session */
+wlp_attr(struct wlp_nonce, rnonce)
+
+/* Serial number of device */
+wlp_attr_array(u8, serial)
+
+/* UUID of enrollee */
+wlp_attr(struct wlp_uuid, uuid_e)
+
+/* UUID of registrar */
+wlp_attr(struct wlp_uuid, uuid_r)
+
+/* WLP Primary device type */
+wlp_attr(struct wlp_dev_type, prim_dev_type)
+
+/* WLP Secondary device type */
+wlp_attr(struct wlp_dev_type, sec_dev_type)
+
+/* WLP protocol version */
+wlp_attr(u8, version)
+
+/* WLP service set identifier */
+wlp_attr(struct wlp_uuid, wssid)
+
+/* WLP WSS name */
+wlp_attr_array(u8, wss_name)
+
+/* WLP WSS Secure Status */
+wlp_attr(u8, wss_sec_status)
+
+/* WSS Broadcast Address */
+wlp_attr(struct uwb_mac_addr, wss_bcast)
+
+/* WLP Accepting Enrollment */
+wlp_attr(u8, accept_enrl)
+
+/**
+ * WSS information attributes
+ * WLP Draft 0.99 [6.6.3 Table 15]
+ */
+struct wlp_wss_info {
+ struct wlp_attr_wssid wssid;
+ struct wlp_attr_wss_name name;
+ struct wlp_attr_accept_enrl accept;
+ struct wlp_attr_wss_sec_status sec_stat;
+ struct wlp_attr_wss_bcast bcast;
+} __attribute__((packed));
+
+/* WLP WSS Information */
+wlp_attr_array(struct wlp_wss_info, wss_info)
+
+/* WLP WSS Selection method */
+wlp_attr(u8, wss_sel_mthd)
+
+/* WLP WSS tag */
+wlp_attr(u8, wss_tag)
+
+/* WSS Virtual Address */
+wlp_attr(struct uwb_mac_addr, wss_virt)
+
+/* WLP association error */
+wlp_attr(u8, wlp_assc_err)
+
+/**
+ * WLP standard and abbreviated frames
+ *
+ * WLP Draft 0.99 [6.3] and [6.4]
+ *
+ * The difference between the WLP standard frame and the WLP
+ * abbreviated frame is that the standard frame includes the src
+ * and dest addresses from the Ethernet header, the abbreviated frame does
+ * not.
+ * The src/dest (as well as the type/length and client data) are already
+ * defined as part of the Ethernet header, we do not do this here.
+ * From this perspective the standard and abbreviated frames appear the
+ * same - they will be treated differently though.
+ *
+ * The size of this header is also captured in WLP_DATA_HLEN to enable
+ * interfaces to prepare their headroom.
+ */
+struct wlp_frame_std_abbrv_hdr {
+ struct wlp_frame_hdr hdr;
+ u8 tag;
+} __attribute__((packed));
+
+/**
+ * WLP association frames
+ *
+ * WLP Draft 0.99 [6.6]
+ */
+struct wlp_frame_assoc {
+ struct wlp_frame_hdr hdr;
+ enum wlp_assoc_type type:8;
+ struct wlp_attr_version version;
+ struct wlp_attr_msg_type msg_type;
+ u8 attr[];
+} __attribute__((packed));
+
+/* Ethernet to dev address mapping */
+struct wlp_eda {
+ spinlock_t lock;
+ struct list_head cache; /* Eth<->Dev Addr cache */
+};
+
+/**
+ * WSS information temporary storage
+ *
+ * This information is only stored temporarily during discovery. It should
+ * not be stored unless the device is enrolled in the advertised WSS. This
+ * is done mainly because we follow the letter of the spec in this regard.
+ * See WLP 0.99 [7.2.3].
+ * When the device does become enrolled in a WSS the WSS information will
+ * be stored as part of the more comprehensive struct wlp_wss.
+ */
+struct wlp_wss_tmp_info {
+ char name[WLP_WSS_NAME_SIZE];
+ u8 accept_enroll;
+ u8 sec_status;
+ struct uwb_mac_addr bcast;
+};
+
+struct wlp_wssid_e {
+ struct list_head node;
+ struct wlp_uuid wssid;
+ struct wlp_wss_tmp_info *info;
+};
+
+/**
+ * A cache entry of WLP neighborhood
+ *
+ * @node: head of list is wlp->neighbors
+ * @wssid: list of wssids of this neighbor, element is wlp_wssid_e
+ * @info: temporary storage for information learned during discovery. This
+ * storage is used together with the wssid_e temporary storage
+ * during discovery.
+ */
+struct wlp_neighbor_e {
+ struct list_head node;
+ struct wlp_uuid uuid;
+ struct uwb_dev *uwb_dev;
+ struct list_head wssid; /* Elements are wlp_wssid_e */
+ struct wlp_device_info *info;
+};
+
+struct wlp;
+/**
+ * Information for an association session in progress.
+ *
+ * @exp_message: The type of the expected message. Both this message and a
+ * F0 message (which can be sent in response to any
+ * association frame) will be accepted as a valid message for
+ * this session.
+ * @cb: The function that will be called upon receipt of this
+ * message.
+ * @cb_priv: Private data of callback
+ * @data: Data used in association process (always a sk_buff?)
+ * @neighbor: Address of neighbor with which association session is in
+ * progress.
+ */
+struct wlp_session {
+ enum wlp_assoc_type exp_message;
+ void (*cb)(struct wlp *);
+ void *cb_priv;
+ void *data;
+ struct uwb_dev_addr neighbor_addr;
+};
+
+/**
+ * WLP Service Set
+ *
+ * @mutex: used to protect entire WSS structure.
+ *
+ * @name: The WSS name is set to 65 bytes, 1 byte larger than the maximum
+ * allowed by the WLP spec. This is to have a null terminated string
+ * for display to the user. A maximum of 64 bytes will still be used
+ * when placing the WSS name field in association frames.
+ *
+ * @accept_enroll: Accepting enrollment: Set to one if registrar is
+ * accepting enrollment in WSS, or zero otherwise.
+ *
+ * Global and local information for each WSS in which we are enrolled.
+ * WLP 0.99 Section 7.2.1 and Section 7.2.2
+ */
+struct wlp_wss {
+ struct mutex mutex;
+ struct kobject kobj;
+ /* Global properties. */
+ struct wlp_uuid wssid;
+ u8 hash;
+ char name[WLP_WSS_NAME_SIZE];
+ struct uwb_mac_addr bcast;
+ u8 secure_status:1;
+ u8 master_key[16];
+ /* Local properties. */
+ u8 tag;
+ struct uwb_mac_addr virtual_addr;
+ /* Extra */
+ u8 accept_enroll:1;
+ enum wlp_wss_state state;
+};
+
+/**
+ * WLP main structure
+ * @mutex: protect changes to WLP structure. We only allow changes to the
+ * uuid, so currently this mutex only protects this field.
+ */
+struct wlp {
+ struct mutex mutex;
+ struct uwb_rc *rc; /* UWB radio controller */
+ struct uwb_pal pal;
+ struct wlp_eda eda;
+ struct wlp_uuid uuid;
+ struct wlp_session *session;
+ struct wlp_wss wss;
+ struct mutex nbmutex; /* Neighbor mutex protects neighbors list */
+ struct list_head neighbors; /* Elements are wlp_neighbor_e */
+ struct uwb_notifs_handler uwb_notifs_handler;
+ struct wlp_device_info *dev_info;
+ void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info);
+ int (*xmit_frame)(struct wlp *, struct sk_buff *,
+ struct uwb_dev_addr *);
+ void (*stop_queue)(struct wlp *);
+ void (*start_queue)(struct wlp *);
+};
+
+/* sysfs */
+
+
+struct wlp_wss_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct wlp_wss *wss, char *buf);
+ ssize_t (*store)(struct wlp_wss *wss, const char *buf, size_t count);
+};
+
+#define WSS_ATTR(_name, _mode, _show, _store) \
+static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \
+ _show, _store)
+
+extern int wlp_setup(struct wlp *, struct uwb_rc *);
+extern void wlp_remove(struct wlp *);
+extern ssize_t wlp_neighborhood_show(struct wlp *, char *);
+extern int wlp_wss_setup(struct net_device *, struct wlp_wss *);
+extern void wlp_wss_remove(struct wlp_wss *);
+extern ssize_t wlp_wss_activate_show(struct wlp_wss *, char *);
+extern ssize_t wlp_wss_activate_store(struct wlp_wss *, const char *, size_t);
+extern ssize_t wlp_eda_show(struct wlp *, char *);
+extern ssize_t wlp_eda_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_uuid_show(struct wlp *, char *);
+extern ssize_t wlp_uuid_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_name_show(struct wlp *, char *);
+extern ssize_t wlp_dev_name_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_manufacturer_show(struct wlp *, char *);
+extern ssize_t wlp_dev_manufacturer_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_model_name_show(struct wlp *, char *);
+extern ssize_t wlp_dev_model_name_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_model_nr_show(struct wlp *, char *);
+extern ssize_t wlp_dev_model_nr_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_serial_show(struct wlp *, char *);
+extern ssize_t wlp_dev_serial_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_prim_category_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_category_store(struct wlp *, const char *,
+ size_t);
+extern ssize_t wlp_dev_prim_OUI_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_OUI_store(struct wlp *, const char *, size_t);
+extern ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *, const char *,
+ size_t);
+extern ssize_t wlp_dev_prim_subcat_show(struct wlp *, char *);
+extern ssize_t wlp_dev_prim_subcat_store(struct wlp *, const char *,
+ size_t);
+extern int wlp_receive_frame(struct device *, struct wlp *, struct sk_buff *,
+ struct uwb_dev_addr *);
+extern int wlp_prepare_tx_frame(struct device *, struct wlp *,
+ struct sk_buff *, struct uwb_dev_addr *);
+void wlp_reset_all(struct wlp *wlp);
+
+/**
+ * Initialize WSS
+ */
+static inline
+void wlp_wss_init(struct wlp_wss *wss)
+{
+ mutex_init(&wss->mutex);
+}
+
+static inline
+void wlp_init(struct wlp *wlp)
+{
+ INIT_LIST_HEAD(&wlp->neighbors);
+ mutex_init(&wlp->mutex);
+ mutex_init(&wlp->nbmutex);
+ wlp_wss_init(&wlp->wss);
+}
+
+
+#endif /* #ifndef __LINUX__WLP_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 5c158c477ac..b36291130f2 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -149,11 +149,11 @@ struct execute_work {
extern struct workqueue_struct *
__create_workqueue_key(const char *name, int singlethread,
- int freezeable, struct lock_class_key *key,
+ int freezeable, int rt, struct lock_class_key *key,
const char *lock_name);
#ifdef CONFIG_LOCKDEP
-#define __create_workqueue(name, singlethread, freezeable) \
+#define __create_workqueue(name, singlethread, freezeable, rt) \
({ \
static struct lock_class_key __key; \
const char *__lock_name; \
@@ -164,17 +164,19 @@ __create_workqueue_key(const char *name, int singlethread,
__lock_name = #name; \
\
__create_workqueue_key((name), (singlethread), \
- (freezeable), &__key, \
+ (freezeable), (rt), &__key, \
__lock_name); \
})
#else
-#define __create_workqueue(name, singlethread, freezeable) \
- __create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL)
+#define __create_workqueue(name, singlethread, freezeable, rt) \
+ __create_workqueue_key((name), (singlethread), (freezeable), (rt), \
+ NULL, NULL)
#endif
-#define create_workqueue(name) __create_workqueue((name), 0, 0)
-#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1)
-#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
+#define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
+#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
+#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
+#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
extern void destroy_workqueue(struct workqueue_struct *wq);
@@ -238,4 +240,12 @@ void cancel_rearming_delayed_work(struct delayed_work *work)
cancel_delayed_work_sync(work);
}
+#ifndef CONFIG_SMP
+static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
+{
+ return fn(arg);
+}
+#else
+long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
+#endif /* CONFIG_SMP */
#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 12b15c561a1..e585657e983 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -63,7 +63,15 @@ struct writeback_control {
unsigned for_writepages:1; /* This is a writepages() call */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned more_io:1; /* more io to be dispatched */
- unsigned range_cont:1;
+ /*
+ * write_cache_pages() won't update wbc->nr_to_write and
+ * mapping->writeback_index if no_nrwrite_index_update
+ * is set. write_cache_pages() may write more than we
+ * requested and we want to make sure nr_to_write and
+ * writeback_index are updated in a consistent manner
+ * so we use a single control to update them
+ */
+ unsigned no_nrwrite_index_update:1;
};
/*
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 4bc1e6b86cb..52f3abd453a 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -199,6 +199,9 @@ enum {
#define XFRM_MSG_NEWSPDINFO XFRM_MSG_NEWSPDINFO
XFRM_MSG_GETSPDINFO,
#define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO
+
+ XFRM_MSG_MAPPING,
+#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
__XFRM_MSG_MAX
};
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -438,6 +441,15 @@ struct xfrm_user_migrate {
__u16 new_family;
};
+struct xfrm_user_mapping {
+ struct xfrm_usersa_id id;
+ __u32 reqid;
+ xfrm_address_t old_saddr;
+ xfrm_address_t new_saddr;
+ __be16 old_sport;
+ __be16 new_sport;
+};
+
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
@@ -464,6 +476,8 @@ enum xfrm_nlgroups {
#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT
XFRMNLGRP_MIGRATE,
#define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE
+ XFRMNLGRP_MAPPING,
+#define XFRMNLGRP_MAPPING XFRMNLGRP_MAPPING
__XFRMNLGRP_MAX
};
#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1)