diff options
Diffstat (limited to 'include/linux')
709 files changed, 25093 insertions, 9201 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 12e9a2957ca..334a3593cdf 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -41,6 +41,7 @@ header-y += baycom.h header-y += bfs_fs.h header-y += blkpg.h header-y += bpqether.h +header-y += bsg.h header-y += can.h header-y += cdk.h header-y += chio.h @@ -51,10 +52,12 @@ header-y += const.h header-y += cgroupstats.h header-y += cramfs_fs.h header-y += cycx_cfm.h +header-y += dcbnl.h header-y += dlmconstants.h header-y += dlm_device.h header-y += dlm_netlink.h header-y += dm-ioctl.h +header-y += dm-log-userspace.h header-y += dn.h header-y += dqblk_xfs.h header-y += efs_fs_sb.h @@ -65,6 +68,7 @@ header-y += falloc.h header-y += fd.h header-y += fdreg.h header-y += fib_rules.h +header-y += fiemap.h header-y += firewire-cdev.h header-y += firewire-constants.h header-y += fuse.h @@ -89,7 +93,6 @@ header-y += if_ppp.h header-y += if_slip.h header-y += if_strip.h header-y += if_tun.h -header-y += if_tunnel.h header-y += in_route.h header-y += ioctl.h header-y += ip6_tunnel.h @@ -114,6 +117,7 @@ header-y += mqueue.h header-y += mtio.h header-y += ncp_no.h header-y += neighbour.h +header-y += net_dropmon.h header-y += netfilter_arp.h header-y += netrom.h header-y += nfs2.h @@ -132,9 +136,11 @@ header-y += posix_types.h header-y += ppdev.h header-y += prctl.h header-y += qnxtypes.h +header-y += qnx4_fs.h header-y += radeonfb.h header-y += raw.h header-y += resource.h +header-y += romfs_fs.h header-y += rose.h header-y += serial_reg.h header-y += smbno.h @@ -156,8 +162,6 @@ header-y += ultrasound.h header-y += un.h header-y += utime.h header-y += veth.h -header-y += video_decoder.h -header-y += video_encoder.h header-y += videotext.h header-y += x25.h @@ -235,6 +239,7 @@ unifdef-y += if_phonet.h unifdef-y += if_pppol2tp.h unifdef-y += if_pppox.h unifdef-y += if_tr.h +unifdef-y += if_tunnel.h unifdef-y += if_vlan.h unifdef-y += igmp.h unifdef-y += inet_diag.h @@ -304,15 +309,15 @@ unifdef-y += pmu.h unifdef-y += poll.h unifdef-y += ppp_defs.h unifdef-y += ppp-comp.h +unifdef-y += pps.h unifdef-y += ptrace.h -unifdef-y += qnx4_fs.h unifdef-y += quota.h unifdef-y += random.h +unifdef-y += rfkill.h unifdef-y += irqnr.h unifdef-y += reboot.h unifdef-y += reiserfs_fs.h unifdef-y += reiserfs_xattr.h -unifdef-y += romfs_fs.h unifdef-y += route.h unifdef-y += rtc.h unifdef-y += rtnetlink.h diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6fce2fc2d12..34321cfffea 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table); typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); char * __acpi_map_table (unsigned long phys_addr, unsigned long size); +void __acpi_unmap_table(char *map, unsigned long size); int early_acpi_boot_init(void); int acpi_boot_init (void); int acpi_boot_table_init (void); @@ -96,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following four functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); void acpi_numa_arch_fixup(void); @@ -109,14 +111,12 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; - -extern struct acpi_mcfg_allocation *pci_mmcfg_config; -extern int pci_mmcfg_config_num; +extern u32 acpi_irq_not_handled; extern int sbf_port; extern unsigned long acpi_realmode_flags; -int acpi_register_gsi (u32 gsi, int triggering, int polarity); +int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); #ifdef CONFIG_X86_IO_APIC @@ -256,7 +256,44 @@ void __init acpi_no_s4_hw_signature(void); void __init acpi_old_suspend_ordering(void); void __init acpi_s4_no_nvs(void); #endif /* CONFIG_PM_SLEEP */ + +#define OSC_QUERY_TYPE 0 +#define OSC_SUPPORT_TYPE 1 +#define OSC_CONTROL_TYPE 2 +#define OSC_SUPPORT_MASKS 0x1f + +/* _OSC DW0 Definition */ +#define OSC_QUERY_ENABLE 1 +#define OSC_REQUEST_ERROR 2 +#define OSC_INVALID_UUID_ERROR 4 +#define OSC_INVALID_REVISION_ERROR 8 +#define OSC_CAPABILITIES_MASK_ERROR 16 + +/* _OSC DW1 Definition (OS Support Fields) */ +#define OSC_EXT_PCI_CONFIG_SUPPORT 1 +#define OSC_ACTIVE_STATE_PWR_SUPPORT 2 +#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 +#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 +#define OSC_MSI_SUPPORT 16 + +/* _OSC DW1 Definition (OS Control Fields) */ +#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 +#define OSC_SHPC_NATIVE_HP_CONTROL 2 +#define OSC_PCI_EXPRESS_PME_CONTROL 4 +#define OSC_PCI_EXPRESS_AER_CONTROL 8 +#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 + +#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ + OSC_SHPC_NATIVE_HP_CONTROL | \ + OSC_PCI_EXPRESS_PME_CONTROL | \ + OSC_PCI_EXPRESS_AER_CONTROL | \ + OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) + +extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); +extern void acpi_early_init(void); + #else /* CONFIG_ACPI */ +static inline void acpi_early_init(void) { } static inline int early_acpi_boot_init(void) { diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h index ef788c2085a..b19801f7389 100644 --- a/include/linux/adfs_fs.h +++ b/include/linux/adfs_fs.h @@ -41,8 +41,6 @@ struct adfs_discrecord { #define ADFS_DR_SIZE_BITS (ADFS_DR_SIZE << 3) #ifdef __KERNEL__ -#include <linux/adfs_fs_i.h> -#include <linux/adfs_fs_sb.h> /* * Calculate the boot block checksum on an ADFS drive. Note that this will * appear to be correct if the sector contains all zeros, so also check that @@ -60,17 +58,6 @@ static inline int adfs_checkbblk(unsigned char *ptr) return (result & 0xff) != ptr[511]; } - -static inline struct adfs_sb_info *ADFS_SB(struct super_block *sb) -{ - return sb->s_fs_info; -} - -static inline struct adfs_inode_info *ADFS_I(struct inode *inode) -{ - return container_of(inode, struct adfs_inode_info, vfs_inode); -} - #endif #endif diff --git a/include/linux/adfs_fs_i.h b/include/linux/adfs_fs_i.h deleted file mode 100644 index cb543034e54..00000000000 --- a/include/linux/adfs_fs_i.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * linux/include/linux/adfs_fs_i.h - * - * Copyright (C) 1997 Russell King - */ - -#ifndef _ADFS_FS_I -#define _ADFS_FS_I - -/* - * adfs file system inode data in memory - */ -struct adfs_inode_info { - loff_t mmu_private; - unsigned long parent_id; /* object id of parent */ - __u32 loadaddr; /* RISC OS load address */ - __u32 execaddr; /* RISC OS exec address */ - unsigned int filetype; /* RISC OS file type */ - unsigned int attr; /* RISC OS permissions */ - unsigned int stamped:1; /* RISC OS file has date/time */ - struct inode vfs_inode; -}; - -#endif diff --git a/include/linux/adfs_fs_sb.h b/include/linux/adfs_fs_sb.h deleted file mode 100644 index d9bf05c02cc..00000000000 --- a/include/linux/adfs_fs_sb.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * linux/include/linux/adfs_fs_sb.h - * - * Copyright (C) 1997-1999 Russell King - */ - -#ifndef _ADFS_FS_SB -#define _ADFS_FS_SB - -/* - * Forward-declare this - */ -struct adfs_discmap; -struct adfs_dir_ops; - -/* - * ADFS file system superblock data in memory - */ -struct adfs_sb_info { - struct adfs_discmap *s_map; /* bh list containing map */ - struct adfs_dir_ops *s_dir; /* directory operations */ - - uid_t s_uid; /* owner uid */ - gid_t s_gid; /* owner gid */ - umode_t s_owner_mask; /* ADFS owner perm -> unix perm */ - umode_t s_other_mask; /* ADFS other perm -> unix perm */ - - __u32 s_ids_per_zone; /* max. no ids in one zone */ - __u32 s_idlen; /* length of ID in map */ - __u32 s_map_size; /* sector size of a map */ - unsigned long s_size; /* total size (in blocks) of this fs */ - signed int s_map2blk; /* shift left by this for map->sector */ - unsigned int s_log2sharesize;/* log2 share size */ - __le32 s_version; /* disc format version */ - unsigned int s_namelen; /* maximum number of characters in name */ -}; - -#endif diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 2b8df8b420f..76fa794fdac 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -70,7 +70,7 @@ struct agp_memory { struct agp_memory *next; struct agp_memory *prev; struct agp_bridge_data *bridge; - unsigned long *memory; + struct page **pages; size_t page_count; int key; int num_scratch_pages; diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h index 110c600c885..f6778eceb8f 100644 --- a/include/linux/agpgart.h +++ b/include/linux/agpgart.h @@ -77,20 +77,20 @@ typedef struct _agp_setup { * The "prot" down below needs still a "sleep" flag somehow ... */ typedef struct _agp_segment { - off_t pg_start; /* starting page to populate */ - size_t pg_count; /* number of pages */ - int prot; /* prot flags for mmap */ + __kernel_off_t pg_start; /* starting page to populate */ + __kernel_size_t pg_count; /* number of pages */ + int prot; /* prot flags for mmap */ } agp_segment; typedef struct _agp_region { - pid_t pid; /* pid of process */ - size_t seg_count; /* number of segments */ + __kernel_pid_t pid; /* pid of process */ + __kernel_size_t seg_count; /* number of segments */ struct _agp_segment *seg_list; } agp_region; typedef struct _agp_allocate { int key; /* tag of allocation */ - size_t pg_count; /* number of pages */ + __kernel_size_t pg_count;/* number of pages */ __u32 type; /* 0 == normal, other devspec */ __u32 physical; /* device specific (some devices * need a phys address of the @@ -100,7 +100,7 @@ typedef struct _agp_allocate { typedef struct _agp_bind { int key; /* tag of allocation */ - off_t pg_start; /* starting page to populate */ + __kernel_off_t pg_start;/* starting page to populate */ } agp_bind; typedef struct _agp_unbind { diff --git a/include/linux/aio.h b/include/linux/aio.h index b16a957030f..47f7d932a01 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -121,9 +121,9 @@ struct kiocb { /* * If the aio_resfd field of the userspace iocb is not zero, - * this is the underlying file* to deliver event to. + * this is the underlying eventfd context to deliver events to. */ - struct file *ki_eventfd; + struct eventfd_ctx *ki_eventfd; }; #define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY) diff --git a/include/linux/aio_abi.h b/include/linux/aio_abi.h index 9e017293131..2c873166418 100644 --- a/include/linux/aio_abi.h +++ b/include/linux/aio_abi.h @@ -27,6 +27,7 @@ #ifndef __LINUX__AIO_ABI_H #define __LINUX__AIO_ABI_H +#include <linux/types.h> #include <asm/byteorder.h> typedef unsigned long aio_context_t; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 51e6e54b2aa..9b93cafa82a 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -28,7 +28,7 @@ struct amba_id { struct amba_driver { struct device_driver drv; - int (*probe)(struct amba_device *, void *); + int (*probe)(struct amba_device *, struct amba_id *); int (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); int (*suspend)(struct amba_device *, pm_message_t); diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h new file mode 100644 index 00000000000..dcad0ffd175 --- /dev/null +++ b/include/linux/amba/pl022.h @@ -0,0 +1,264 @@ +/* + * include/linux/amba/pl022.h + * + * Copyright (C) 2008-2009 ST-Ericsson AB + * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. + * + * Author: Linus Walleij <linus.walleij@stericsson.com> + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c + * Initial adoption to PL022 by: + * Sachin Verma <sachin.verma@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _SSP_PL022_H +#define _SSP_PL022_H + +#include <linux/device.h> + +/** + * whether SSP is in loopback mode or not + */ +enum ssp_loopback { + LOOPBACK_DISABLED, + LOOPBACK_ENABLED +}; + +/** + * enum ssp_interface - interfaces allowed for this SSP Controller + * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface + * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial + * interface + * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire + * interface + * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810 + * &STn8815 only) + */ +enum ssp_interface { + SSP_INTERFACE_MOTOROLA_SPI, + SSP_INTERFACE_TI_SYNC_SERIAL, + SSP_INTERFACE_NATIONAL_MICROWIRE, + SSP_INTERFACE_UNIDIRECTIONAL +}; + +/** + * enum ssp_hierarchy - whether SSP is configured as Master or Slave + */ +enum ssp_hierarchy { + SSP_MASTER, + SSP_SLAVE +}; + +/** + * enum ssp_clock_params - clock parameters, to set SSP clock at a + * desired freq + */ +struct ssp_clock_params { + u8 cpsdvsr; /* value from 2 to 254 (even only!) */ + u8 scr; /* value from 0 to 255 */ +}; + +/** + * enum ssp_rx_endian - endianess of Rx FIFO Data + */ +enum ssp_rx_endian { + SSP_RX_MSB, + SSP_RX_LSB +}; + +/** + * enum ssp_tx_endian - endianess of Tx FIFO Data + */ +enum ssp_tx_endian { + SSP_TX_MSB, + SSP_TX_LSB +}; + +/** + * enum ssp_data_size - number of bits in one data element + */ +enum ssp_data_size { + SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6, + SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9, + SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12, + SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15, + SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18, + SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21, + SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24, + SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27, + SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30, + SSP_DATA_BITS_31, SSP_DATA_BITS_32 +}; + +/** + * enum ssp_mode - SSP mode of operation (Communication modes) + */ +enum ssp_mode { + INTERRUPT_TRANSFER, + POLLING_TRANSFER, + DMA_TRANSFER +}; + +/** + * enum ssp_rx_level_trig - receive FIFO watermark level which triggers + * IT: Interrupt fires when _N_ or more elements in RX FIFO. + */ +enum ssp_rx_level_trig { + SSP_RX_1_OR_MORE_ELEM, + SSP_RX_4_OR_MORE_ELEM, + SSP_RX_8_OR_MORE_ELEM, + SSP_RX_16_OR_MORE_ELEM, + SSP_RX_32_OR_MORE_ELEM +}; + +/** + * Transmit FIFO watermark level which triggers (IT Interrupt fires + * when _N_ or more empty locations in TX FIFO) + */ +enum ssp_tx_level_trig { + SSP_TX_1_OR_MORE_EMPTY_LOC, + SSP_TX_4_OR_MORE_EMPTY_LOC, + SSP_TX_8_OR_MORE_EMPTY_LOC, + SSP_TX_16_OR_MORE_EMPTY_LOC, + SSP_TX_32_OR_MORE_EMPTY_LOC +}; + +/** + * enum SPI Clock Phase - clock phase (Motorola SPI interface only) + * @SSP_CLK_RISING_EDGE: Receive data on rising edge + * @SSP_CLK_FALLING_EDGE: Receive data on falling edge + */ +enum ssp_spi_clk_phase { + SSP_CLK_RISING_EDGE, + SSP_CLK_FALLING_EDGE +}; + +/** + * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only) + * @SSP_CLK_POL_IDLE_LOW: Low inactive level + * @SSP_CLK_POL_IDLE_HIGH: High inactive level + */ +enum ssp_spi_clk_pol { + SSP_CLK_POL_IDLE_LOW, + SSP_CLK_POL_IDLE_HIGH +}; + +/** + * Microwire Conrol Lengths Command size in microwire format + */ +enum ssp_microwire_ctrl_len { + SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6, + SSP_BITS_7, SSP_BITS_8, SSP_BITS_9, + SSP_BITS_10, SSP_BITS_11, SSP_BITS_12, + SSP_BITS_13, SSP_BITS_14, SSP_BITS_15, + SSP_BITS_16, SSP_BITS_17, SSP_BITS_18, + SSP_BITS_19, SSP_BITS_20, SSP_BITS_21, + SSP_BITS_22, SSP_BITS_23, SSP_BITS_24, + SSP_BITS_25, SSP_BITS_26, SSP_BITS_27, + SSP_BITS_28, SSP_BITS_29, SSP_BITS_30, + SSP_BITS_31, SSP_BITS_32 +}; + +/** + * enum Microwire Wait State + * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit + * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit + */ +enum ssp_microwire_wait_state { + SSP_MWIRE_WAIT_ZERO, + SSP_MWIRE_WAIT_ONE +}; + +/** + * enum Microwire - whether Full/Half Duplex + * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional, + * SSPRXD not used + * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is + * an input. + */ +enum ssp_duplex { + SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX +}; + +/** + * CHIP select/deselect commands + */ +enum ssp_chip_select { + SSP_CHIP_SELECT, + SSP_CHIP_DESELECT +}; + + +/** + * struct pl022_ssp_master - device.platform_data for SPI controller devices. + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects - 1. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. + * @enable_dma: if true enables DMA driven transfers. + */ +struct pl022_ssp_controller { + u16 bus_id; + u8 num_chipselect; + u8 enable_dma:1; +}; + +/** + * struct ssp_config_chip - spi_board_info.controller_data for SPI + * slave devices, copied to spi_device.controller_data. + * + * @lbm: used for test purpose to internally connect RX and TX + * @iface: Interface type(Motorola, TI, Microwire, Universal) + * @hierarchy: sets whether interface is master or slave + * @slave_tx_disable: SSPTXD is disconnected (in slave mode only) + * @clk_freq: Tune freq parameters of SSP(when in master mode) + * @endian_rx: Endianess of Data in Rx FIFO + * @endian_tx: Endianess of Data in Tx FIFO + * @data_size: Width of data element(4 to 32 bits) + * @com_mode: communication mode: polling, Interrupt or DMA + * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode) + * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode) + * @clk_phase: Motorola SPI interface Clock phase + * @clk_pol: Motorola SPI interface Clock polarity + * @ctrl_len: Microwire interface: Control length + * @wait_state: Microwire interface: Wait state + * @duplex: Microwire interface: Full/Half duplex + * @cs_control: function pointer to board-specific function to + * assert/deassert I/O port to control HW generation of devices chip-select. + * @dma_xfer_type: Type of DMA xfer (Mem-to-periph or Periph-to-Periph) + * @dma_config: DMA configuration for SSP controller and peripheral + */ +struct pl022_config_chip { + struct device *dev; + enum ssp_loopback lbm; + enum ssp_interface iface; + enum ssp_hierarchy hierarchy; + bool slave_tx_disable; + struct ssp_clock_params clk_freq; + enum ssp_rx_endian endian_rx; + enum ssp_tx_endian endian_tx; + enum ssp_data_size data_size; + enum ssp_mode com_mode; + enum ssp_rx_level_trig rx_lev_trig; + enum ssp_tx_level_trig tx_lev_trig; + enum ssp_spi_clk_phase clk_phase; + enum ssp_spi_clk_pol clk_pol; + enum ssp_microwire_ctrl_len ctrl_len; + enum ssp_microwire_wait_state wait_state; + enum ssp_duplex duplex; + void (*cs_control) (u32 control); +}; + +#endif /* _SSP_PL022_H */ diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h new file mode 100644 index 00000000000..b4fbd986260 --- /dev/null +++ b/include/linux/amba/pl061.h @@ -0,0 +1,15 @@ +/* platform data for the PL061 GPIO driver */ + +struct pl061_platform_data { + /* number of the first GPIO */ + unsigned gpio_base; + + /* number of the first IRQ. + * If the IRQ functionality in not desired this must be set to + * (unsigned) -1. + */ + unsigned irq_base; + + u8 directions; /* startup directions, 1: out, 0: in */ + u8 values; /* startup values */ +}; diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 48ee32a18ac..5a5a7fd6249 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h @@ -114,6 +114,9 @@ #define UART011_IFLS_TX4_8 (2 << 0) #define UART011_IFLS_TX6_8 (3 << 0) #define UART011_IFLS_TX7_8 (4 << 0) +/* special values for ST vendor with deeper fifo */ +#define UART011_IFLS_RX_HALF (5 << 3) +#define UART011_IFLS_TX_HALF (5 << 0) #define UART011_OEIM (1 << 10) /* overrun error interrupt mask */ #define UART011_BEIM (1 << 9) /* break error interrupt mask */ @@ -159,6 +162,7 @@ #define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) #ifndef __ASSEMBLY__ +struct amba_device; /* in uncompress this is included but amba/bus.h is not */ struct amba_pl010_data { void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); }; diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index a1916078fd0..7d650a0e3d8 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h @@ -235,8 +235,6 @@ struct Outgoing { struct arcnet_local { - struct net_device_stats stats; - uint8_t config, /* current value of CONFIG register */ timeout, /* Extended timeout for COM20020 */ backplane, /* Backplane flag for COM20020 */ @@ -335,7 +333,13 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc); void arcnet_unregister_proto(struct ArcProto *proto); irqreturn_t arcnet_interrupt(int irq, void *dev_id); -struct net_device *alloc_arcdev(char *name); +struct net_device *alloc_arcdev(const char *name); + +int arcnet_open(struct net_device *dev); +int arcnet_close(struct net_device *dev); +netdev_tx_t arcnet_send_packet(struct sk_buff *skb, + struct net_device *dev); +void arcnet_timeout(struct net_device *dev); #endif /* __KERNEL__ */ #endif /* _LINUX_ARCDEVICE_H */ diff --git a/include/linux/async.h b/include/linux/async.h index c4ecacd0b32..68a9530196f 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -17,9 +17,11 @@ typedef u64 async_cookie_t; typedef void (async_func_ptr) (void *data, async_cookie_t cookie); extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); -extern async_cookie_t async_schedule_special(async_func_ptr *ptr, void *data, struct list_head *list); +extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, + struct list_head *list); extern void async_synchronize_full(void); -extern void async_synchronize_full_special(struct list_head *list); +extern void async_synchronize_full_domain(struct list_head *list); extern void async_synchronize_cookie(async_cookie_t cookie); -extern void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *list); +extern void async_synchronize_cookie_domain(async_cookie_t cookie, + struct list_head *list); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 45f6297821b..5fc2ef8d97f 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -21,6 +21,15 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> +/* on architectures without dma-mapping capabilities we need to ensure + * that the asynchronous path compiles away + */ +#ifdef CONFIG_HAS_DMA +#define __async_inline +#else +#define __async_inline __always_inline +#endif + /** * dma_chan_ref - object used to manage dma channels received from the * dmaengine core. diff --git a/include/linux/ata.h b/include/linux/ata.h index a53318b8cbd..6299a259ed1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -29,6 +29,8 @@ #ifndef __LINUX_ATA_H__ #define __LINUX_ATA_H__ +#include <linux/kernel.h> +#include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> @@ -89,6 +91,9 @@ enum { ATA_ID_DLF = 128, ATA_ID_CSFO = 129, ATA_ID_CFA_POWER = 160, + ATA_ID_CFA_KEY_MGMT = 162, + ATA_ID_CFA_MODES = 163, + ATA_ID_DATA_SET_MGMT = 169, ATA_ID_ROT_SPEED = 217, ATA_ID_PIO4 = (1 << 1), @@ -106,6 +111,8 @@ enum { ATA_PIO5 = ATA_PIO4 | (1 << 5), ATA_PIO6 = ATA_PIO5 | (1 << 6), + ATA_PIO4_ONLY = (1 << 4), + ATA_SWDMA0 = (1 << 0), ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1), ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2), @@ -115,6 +122,8 @@ enum { ATA_MWDMA0 = (1 << 0), ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1), ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2), + ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3), + ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4), ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2), ATA_MWDMA2_ONLY = (1 << 2), @@ -129,6 +138,8 @@ enum { ATA_UDMA7 = ATA_UDMA6 | (1 << 7), /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */ + ATA_UDMA24_ONLY = (1 << 2) | (1 << 4), + ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */ /* DMA-related */ @@ -199,15 +210,25 @@ enum { ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */ ATA_CMD_IDLE = 0xE3, /* place in idle power mode */ ATA_CMD_EDD = 0x90, /* execute device diagnostic */ + ATA_CMD_DOWNLOAD_MICRO = 0x92, + ATA_CMD_NOP = 0x00, ATA_CMD_FLUSH = 0xE7, ATA_CMD_FLUSH_EXT = 0xEA, ATA_CMD_ID_ATA = 0xEC, ATA_CMD_ID_ATAPI = 0xA1, + ATA_CMD_SERVICE = 0xA2, ATA_CMD_READ = 0xC8, ATA_CMD_READ_EXT = 0x25, + ATA_CMD_READ_QUEUED = 0x26, + ATA_CMD_READ_STREAM_EXT = 0x2B, + ATA_CMD_READ_STREAM_DMA_EXT = 0x2A, ATA_CMD_WRITE = 0xCA, ATA_CMD_WRITE_EXT = 0x35, + ATA_CMD_WRITE_QUEUED = 0x36, + ATA_CMD_WRITE_STREAM_EXT = 0x3B, + ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A, ATA_CMD_WRITE_FUA_EXT = 0x3D, + ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E, ATA_CMD_FPDMA_READ = 0x60, ATA_CMD_FPDMA_WRITE = 0x61, ATA_CMD_PIO_READ = 0x20, @@ -224,6 +245,7 @@ enum { ATA_CMD_PACKET = 0xA0, ATA_CMD_VERIFY = 0x40, ATA_CMD_VERIFY_EXT = 0x42, + ATA_CMD_WRITE_UNCORR_EXT = 0x45, ATA_CMD_STANDBYNOW1 = 0xE0, ATA_CMD_IDLEIMMEDIATE = 0xE1, ATA_CMD_SLEEP = 0xE6, @@ -232,18 +254,36 @@ enum { ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, ATA_CMD_SET_MAX = 0xF9, ATA_CMD_SET_MAX_EXT = 0x37, - ATA_CMD_READ_LOG_EXT = 0x2f, + ATA_CMD_READ_LOG_EXT = 0x2F, + ATA_CMD_WRITE_LOG_EXT = 0x3F, + ATA_CMD_READ_LOG_DMA_EXT = 0x47, + ATA_CMD_WRITE_LOG_DMA_EXT = 0x57, + ATA_CMD_TRUSTED_RCV = 0x5C, + ATA_CMD_TRUSTED_RCV_DMA = 0x5D, + ATA_CMD_TRUSTED_SND = 0x5E, + ATA_CMD_TRUSTED_SND_DMA = 0x5F, ATA_CMD_PMP_READ = 0xE4, ATA_CMD_PMP_WRITE = 0xE8, ATA_CMD_CONF_OVERLAY = 0xB1, + ATA_CMD_SEC_SET_PASS = 0xF1, + ATA_CMD_SEC_UNLOCK = 0xF2, + ATA_CMD_SEC_ERASE_PREP = 0xF3, + ATA_CMD_SEC_ERASE_UNIT = 0xF4, ATA_CMD_SEC_FREEZE_LOCK = 0xF5, + ATA_CMD_SEC_DISABLE_PASS = 0xF6, + ATA_CMD_CONFIG_STREAM = 0x51, ATA_CMD_SMART = 0xB0, ATA_CMD_MEDIA_LOCK = 0xDE, ATA_CMD_MEDIA_UNLOCK = 0xDF, + ATA_CMD_DSM = 0x06, + ATA_CMD_CHK_MED_CRD_TYP = 0xD1, + ATA_CMD_CFA_REQ_EXT_ERR = 0x03, + ATA_CMD_CFA_WRITE_NE = 0x38, + ATA_CMD_CFA_TRANS_SECT = 0x87, + ATA_CMD_CFA_ERASE = 0xC0, + ATA_CMD_CFA_WRITE_MULT_NE = 0xCD, /* marked obsolete in the ATA/ATAPI-7 spec */ ATA_CMD_RESTORE = 0x10, - /* EXABYTE specific */ - ATA_EXABYTE_ENABLE_NEST = 0xF0, /* READ_LOG_EXT pages */ ATA_LOG_SATA_NCQ = 0x10, @@ -296,6 +336,7 @@ enum { /* SETFEATURE Sector counts for SATA features */ SATA_AN = 0x05, /* Asynchronous Notification */ SATA_DIPM = 0x03, /* Device Initiated Power Management */ + SATA_FPDMA_AA = 0x02, /* DMA Setup FIS Auto-Activate */ /* feature values for SET_MAX */ ATA_SET_MAX_ADDR = 0x00, @@ -315,6 +356,9 @@ enum { ATA_SMART_READ_VALUES = 0xD0, ATA_SMART_READ_THRESHOLDS = 0xD1, + /* feature values for Data Set Management */ + ATA_DSM_TRIM = 0x01, + /* password used in LBA Mid / LBA High for executing SMART commands */ ATA_SMART_LBAM_PASS = 0x4F, ATA_SMART_LBAH_PASS = 0xC2, @@ -512,6 +556,9 @@ static inline int ata_is_data(u8 prot) #define ata_id_has_atapi_AN(id) \ ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ ((id)[78] & (1 << 5)) ) +#define ata_id_has_fpdma_aa(id) \ + ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ + ((id)[78] & (1 << 2)) ) #define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) #define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) #define ata_id_u32(id,n) \ @@ -717,6 +764,42 @@ static inline int ata_id_has_unload(const u16 *id) return 0; } +static inline int ata_id_form_factor(const u16 *id) +{ + u16 val = id[168]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + val &= 0xf; + + if (val > 5) + return 0; + + return val; +} + +static inline int ata_id_rotation_rate(const u16 *id) +{ + u16 val = id[217]; + + if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff) + return 0; + + if (val > 1 && val < 0x401) + return 0; + + return val; +} + +static inline int ata_id_has_trim(const u16 *id) +{ + if (ata_id_major_version(id) >= 7 && + (id[ATA_ID_DATA_SET_MGMT] & 1)) + return 1; + return 0; +} + static inline int ata_id_current_chs_valid(const u16 *id) { /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command @@ -731,12 +814,17 @@ static inline int ata_id_current_chs_valid(const u16 *id) static inline int ata_id_is_cfa(const u16 *id) { - if (id[ATA_ID_CONFIG] == 0x848A) /* Standard CF */ + if (id[ATA_ID_CONFIG] == 0x848A) /* Traditional CF */ return 1; - /* Could be CF hiding as standard ATA */ - if (ata_id_major_version(id) >= 3 && - id[ATA_ID_COMMAND_SET_1] != 0xFFFF && - (id[ATA_ID_COMMAND_SET_1] & (1 << 2))) + /* + * CF specs don't require specific value in the word 0 anymore and yet + * they forbid to report the ATA version in the word 80 and require the + * CFA feature set support to be indicated in the word 83 in this case. + * Unfortunately, some cards only follow either of this requirements, + * and while those that don't indicate CFA feature support need some + * sort of quirk list, it seems impractical for the ones that do... + */ + if ((id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004) return 1; return 0; } @@ -746,6 +834,20 @@ static inline int ata_id_is_ssd(const u16 *id) return id[ATA_ID_ROT_SPEED] == 0x01; } +static inline int ata_id_pio_need_iordy(const u16 *id, const u8 pio) +{ + /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */ + if (pio > 4 && ata_id_is_cfa(id)) + return 0; + /* For PIO3 and higher it is mandatory. */ + if (pio > 2) + return 1; + /* Turn it on when possible. */ + if (ata_id_has_iordy(id)) + return 1; + return 0; +} + static inline int ata_drive_40wire(const u16 *dev_id) { if (ata_id_is_sata(dev_id)) @@ -852,6 +954,32 @@ static inline void ata_id_to_hd_driveid(u16 *id) #endif } +/* + * Write up to 'max' LBA Range Entries to the buffer that will cover the + * extent from sector to sector + count. This is used for TRIM and for + * ADD LBA(S) TO NV CACHE PINNED SET. + */ +static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max, + u64 sector, unsigned long count) +{ + __le64 *buffer = _buffer; + unsigned i = 0; + + while (i < max) { + u64 entry = sector | + ((u64)(count > 0xffff ? 0xffff : count) << 48); + buffer[i++] = __cpu_to_le64(entry); + if (count <= 0xffff) + break; + count -= 0xffff; + sector += 0xffff; + } + + max = ALIGN(i * 8, 512); + memset(buffer + i, 0, max - i * 8); + return max; +} + static inline int is_multi_taskfile(struct ata_taskfile *tf) { return (tf->command == ATA_CMD_READ_MULTI) || diff --git a/include/linux/atalk.h b/include/linux/atalk.h index e9ebac2e2ec..d34c187432e 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h @@ -1,6 +1,7 @@ #ifndef __LINUX_ATALK_H__ #define __LINUX_ATALK_H__ +#include <linux/types.h> #include <asm/byteorder.h> /* diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h new file mode 100644 index 00000000000..b847fc7b93f --- /dev/null +++ b/include/linux/ath9k_platform.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> + * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_ATH9K_PLATFORM_H +#define _LINUX_ATH9K_PLATFORM_H + +#define ATH9K_PLAT_EEP_MAX_WORDS 2048 + +struct ath9k_platform_data { + u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; +}; + +#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/atmbr2684.h b/include/linux/atmbr2684.h index 52bf72affbb..fdb2629b618 100644 --- a/include/linux/atmbr2684.h +++ b/include/linux/atmbr2684.h @@ -1,6 +1,7 @@ #ifndef _LINUX_ATMBR2684_H #define _LINUX_ATMBR2684_H +#include <linux/types.h> #include <linux/atm.h> #include <linux/if.h> /* For IFNAMSIZ */ diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 2f1f95737ac..57b1846a3c8 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h @@ -10,6 +10,7 @@ * @bus_width: Number of data lines wired up the slot * @detect_pin: GPIO pin wired to the card detect switch * @wp_pin: GPIO pin wired to the write protect sensor + * @detect_is_active_high: The state of the detect pin when it is active * * If a given slot is not present on the board, @bus_width should be * set to 0. The other fields are ignored in this case. @@ -24,6 +25,7 @@ struct mci_slot_pdata { unsigned int bus_width; int detect_pin; int wp_pin; + bool detect_is_active_high; }; /** diff --git a/include/linux/atmlec.h b/include/linux/atmlec.h index 6f5a1bab8f5..39c917fd1b9 100644 --- a/include/linux/atmlec.h +++ b/include/linux/atmlec.h @@ -11,6 +11,7 @@ #include <linux/atmioc.h> #include <linux/atm.h> #include <linux/if_ether.h> +#include <linux/types.h> /* ATM lec daemon control socket */ #define ATMLEC_CTRL _IO('a', ATMIOC_LANE) @@ -78,8 +79,8 @@ struct atmlec_msg { } normal; struct atmlec_config_msg config; struct { - uint16_t lec_id; /* requestor lec_id */ - uint32_t tran_id; /* transaction id */ + __u16 lec_id; /* requestor lec_id */ + __u32 tran_id; /* transaction id */ unsigned char mac_addr[ETH_ALEN]; /* dst mac addr */ unsigned char atm_addr[ATM_ESA_LEN]; /* reqestor ATM addr */ } proxy; /* diff --git a/include/linux/atmmpc.h b/include/linux/atmmpc.h index ea1650425a1..2aba5787fa6 100644 --- a/include/linux/atmmpc.h +++ b/include/linux/atmmpc.h @@ -4,6 +4,7 @@ #include <linux/atmapi.h> #include <linux/atmioc.h> #include <linux/atm.h> +#include <linux/types.h> #define ATMMPC_CTRL _IO('a', ATMIOC_MPOA) #define ATMMPC_DATA _IO('a', ATMIOC_MPOA+1) @@ -18,39 +19,39 @@ struct atmmpc_ioc { }; typedef struct in_ctrl_info { - uint8_t Last_NHRP_CIE_code; - uint8_t Last_Q2931_cause_value; - uint8_t eg_MPC_ATM_addr[ATM_ESA_LEN]; + __u8 Last_NHRP_CIE_code; + __u8 Last_Q2931_cause_value; + __u8 eg_MPC_ATM_addr[ATM_ESA_LEN]; __be32 tag; __be32 in_dst_ip; /* IP address this ingress MPC sends packets to */ - uint16_t holding_time; - uint32_t request_id; + __u16 holding_time; + __u32 request_id; } in_ctrl_info; typedef struct eg_ctrl_info { - uint8_t DLL_header[256]; - uint8_t DH_length; + __u8 DLL_header[256]; + __u8 DH_length; __be32 cache_id; __be32 tag; __be32 mps_ip; __be32 eg_dst_ip; /* IP address to which ingress MPC sends packets */ - uint8_t in_MPC_data_ATM_addr[ATM_ESA_LEN]; - uint16_t holding_time; + __u8 in_MPC_data_ATM_addr[ATM_ESA_LEN]; + __u16 holding_time; } eg_ctrl_info; struct mpc_parameters { - uint16_t mpc_p1; /* Shortcut-Setup Frame Count */ - uint16_t mpc_p2; /* Shortcut-Setup Frame Time */ - uint8_t mpc_p3[8]; /* Flow-detection Protocols */ - uint16_t mpc_p4; /* MPC Initial Retry Time */ - uint16_t mpc_p5; /* MPC Retry Time Maximum */ - uint16_t mpc_p6; /* Hold Down Time */ + __u16 mpc_p1; /* Shortcut-Setup Frame Count */ + __u16 mpc_p2; /* Shortcut-Setup Frame Time */ + __u8 mpc_p3[8]; /* Flow-detection Protocols */ + __u16 mpc_p4; /* MPC Initial Retry Time */ + __u16 mpc_p5; /* MPC Retry Time Maximum */ + __u16 mpc_p6; /* Hold Down Time */ } ; struct k_message { - uint16_t type; + __u16 type; __be32 ip_mask; - uint8_t MPS_ctrl[ATM_ESA_LEN]; + __u8 MPS_ctrl[ATM_ESA_LEN]; union { in_ctrl_info in_info; eg_ctrl_info eg_info; @@ -61,11 +62,11 @@ struct k_message { struct llc_snap_hdr { /* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */ - uint8_t dsap; /* Destination Service Access Point (0xAA) */ - uint8_t ssap; /* Source Service Access Point (0xAA) */ - uint8_t ui; /* Unnumbered Information (0x03) */ - uint8_t org[3]; /* Organizational identification (0x000000) */ - uint8_t type[2]; /* Ether type (for IP) (0x0800) */ + __u8 dsap; /* Destination Service Access Point (0xAA) */ + __u8 ssap; /* Source Service Access Point (0xAA) */ + __u8 ui; /* Unnumbered Information (0x03) */ + __u8 org[3]; /* Organizational identification (0x000000) */ + __u8 type[2]; /* Ether type (for IP) (0x0800) */ }; /* TLVs this MPC recognizes */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 67e5dbfc296..3c7a358241a 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -36,7 +36,8 @@ * 1500 - 1599 kernel LSPP events * 1600 - 1699 kernel crypto events * 1700 - 1799 kernel anomaly records - * 1800 - 1999 future kernel use (maybe integrity labels and related events) + * 1800 - 1899 kernel integrity events + * 1900 - 1999 future kernel use * 2000 is for otherwise unclassified kernel audit messages (legacy) * 2001 - 2099 unused (kernel) * 2100 - 2199 user space anomaly records @@ -125,6 +126,12 @@ #define AUDIT_LAST_KERN_ANOM_MSG 1799 #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */ #define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */ +#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */ +#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */ +#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */ +#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */ +#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */ +#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */ #define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */ @@ -592,6 +599,8 @@ extern void audit_log_untrustedstring(struct audit_buffer *ab, extern void audit_log_d_path(struct audit_buffer *ab, const char *prefix, struct path *path); +extern void audit_log_key(struct audit_buffer *ab, + char *key); extern void audit_log_lost(const char *message); extern int audit_update_lsm_rules(void); @@ -614,6 +623,7 @@ extern int audit_enabled; #define audit_log_n_untrustedstring(a,n,s) do { ; } while (0) #define audit_log_untrustedstring(a,s) do { ; } while (0) #define audit_log_d_path(b, p, d) do { ; } while (0) +#define audit_log_key(b, k) do { ; } while (0) #define audit_enabled 0 #endif #endif diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h index 91a773993a5..850f39b33e7 100644 --- a/include/linux/auto_dev-ioctl.h +++ b/include/linux/auto_dev-ioctl.h @@ -10,8 +10,13 @@ #ifndef _LINUX_AUTO_DEV_IOCTL_H #define _LINUX_AUTO_DEV_IOCTL_H +#include <linux/auto_fs.h> + +#ifdef __KERNEL__ #include <linux/string.h> -#include <linux/types.h> +#else +#include <string.h> +#endif /* __KERNEL__ */ #define AUTOFS_DEVICE_NAME "autofs" diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index c21e5972a3e..7b09c8348fd 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h @@ -14,13 +14,14 @@ #ifndef _LINUX_AUTO_FS_H #define _LINUX_AUTO_FS_H +#include <linux/types.h> #ifdef __KERNEL__ #include <linux/fs.h> #include <linux/limits.h> -#include <asm/types.h> -#endif /* __KERNEL__ */ - #include <linux/ioctl.h> +#else +#include <sys/ioctl.h> +#endif /* __KERNEL__ */ /* This file describes autofs v3 */ #define AUTOFS_PROTO_VERSION 3 diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index 55fa478bd63..8b49ac48a5b 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h @@ -12,6 +12,7 @@ #define _LINUX_AUTO_FS4_H /* Include common v3 definitions */ +#include <linux/types.h> #include <linux/auto_fs.h> /* autofs v4 definitions */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index bee52abb8a4..f169bcb90b5 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -13,6 +13,8 @@ #include <linux/proportions.h> #include <linux/kernel.h> #include <linux/fs.h> +#include <linux/sched.h> +#include <linux/writeback.h> #include <asm/atomic.h> struct page; @@ -23,9 +25,11 @@ struct dentry; * Bits in backing_dev_info.state */ enum bdi_state { - BDI_pdflush, /* A pdflush thread is working this device */ - BDI_write_congested, /* The write queue is getting full */ - BDI_read_congested, /* The read queue is getting full */ + BDI_pending, /* On its way to being activated */ + BDI_wb_alloc, /* Default embedded wb allocated */ + BDI_async_congested, /* The async (write) queue is getting full */ + BDI_sync_congested, /* The sync queue is getting full */ + BDI_registered, /* bdi_register() was done */ BDI_unused, /* Available bits start here */ }; @@ -39,7 +43,22 @@ enum bdi_stat_item { #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) +struct bdi_writeback { + struct list_head list; /* hangs off the bdi */ + + struct backing_dev_info *bdi; /* our parent bdi */ + unsigned int nr; + + unsigned long last_old_flush; /* last old data flush */ + + struct task_struct *task; /* writeback task */ + struct list_head b_dirty; /* dirty inodes */ + struct list_head b_io; /* parked for writeback */ + struct list_head b_more_io; /* parked for more writeback */ +}; + struct backing_dev_info { + struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ unsigned long state; /* Always use atomic bitops on this */ unsigned int capabilities; /* Device capabilities */ @@ -48,6 +67,8 @@ struct backing_dev_info { void (*unplug_io_fn)(struct backing_dev_info *, struct page *); void *unplug_io_data; + char *name; + struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; struct prop_local_percpu completions; @@ -56,6 +77,14 @@ struct backing_dev_info { unsigned int min_ratio; unsigned int max_ratio, max_prop_frac; + struct bdi_writeback wb; /* default writeback info for this bdi */ + spinlock_t wb_lock; /* protects update side of wb_list */ + struct list_head wb_list; /* the flusher threads hanging off this bdi */ + unsigned long wb_mask; /* bitmask of registered tasks */ + unsigned int wb_cnt; /* number of registered tasks */ + + struct list_head work_list; + struct device *dev; #ifdef CONFIG_DEBUG_FS @@ -71,6 +100,19 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); void bdi_unregister(struct backing_dev_info *bdi); +void bdi_start_writeback(struct writeback_control *wbc); +int bdi_writeback_task(struct bdi_writeback *wb); +int bdi_has_dirty_io(struct backing_dev_info *bdi); + +extern spinlock_t bdi_lock; +extern struct list_head bdi_list; + +static inline int wb_has_dirty_io(struct bdi_writeback *wb) +{ + return !list_empty(&wb->b_dirty) || + !list_empty(&wb->b_io) || + !list_empty(&wb->b_more_io); +} static inline void __add_bdi_stat(struct backing_dev_info *bdi, enum bdi_stat_item item, s64 amount) @@ -215,23 +257,28 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) static inline int bdi_read_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, 1 << BDI_read_congested); + return bdi_congested(bdi, 1 << BDI_sync_congested); } static inline int bdi_write_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, 1 << BDI_write_congested); + return bdi_congested(bdi, 1 << BDI_async_congested); } static inline int bdi_rw_congested(struct backing_dev_info *bdi) { - return bdi_congested(bdi, (1 << BDI_read_congested)| - (1 << BDI_write_congested)); + return bdi_congested(bdi, (1 << BDI_sync_congested) | + (1 << BDI_async_congested)); } -void clear_bdi_congested(struct backing_dev_info *bdi, int rw); -void set_bdi_congested(struct backing_dev_info *bdi, int rw); -long congestion_wait(int rw, long timeout); +enum { + BLK_RW_ASYNC = 0, + BLK_RW_SYNC = 1, +}; + +void clear_bdi_congested(struct backing_dev_info *bdi, int sync); +void set_bdi_congested(struct backing_dev_info *bdi, int sync); +long congestion_wait(int sync, long timeout); static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) @@ -256,6 +303,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi) return bdi->capabilities & BDI_CAP_SWAP_BACKED; } +static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi) +{ + return bdi == &default_backing_dev_info; +} + static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) { return bdi_cap_writeback_dirty(mapping->backing_dev_info); @@ -271,4 +323,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping) return bdi_cap_swap_backed(mapping->backing_dev_info); } +static inline int bdi_sched_wait(void *word) +{ + schedule(); + return 0; +} + #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/bfs_fs.h b/include/linux/bfs_fs.h index 8ed6dfdcd78..1c0b355aa51 100644 --- a/include/linux/bfs_fs.h +++ b/include/linux/bfs_fs.h @@ -6,6 +6,8 @@ #ifndef _LINUX_BFS_FS_H #define _LINUX_BFS_FS_H +#include <linux/types.h> + #define BFS_BSIZE_BITS 9 #define BFS_BSIZE (1<<BFS_BSIZE_BITS) @@ -17,7 +19,6 @@ #define BFS_VDIR 2L #define BFS_VREG 1L - /* BFS inode layout on disk */ struct bfs_inode { __le16 i_ino; diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 77b4a9e4600..2046b5b8af4 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -35,8 +35,7 @@ struct linux_binprm{ #endif struct mm_struct *mm; unsigned long p; /* current top of mem */ - unsigned int sh_bang:1, - misc_bang:1, + unsigned int cred_prepared:1,/* true if creds already prepared (multiple * preps happen for interpreters) */ cap_effective:1;/* true if has elevated effective capabilities, @@ -83,7 +82,19 @@ struct linux_binfmt { int hasvdso; }; -extern int register_binfmt(struct linux_binfmt *); +extern int __register_binfmt(struct linux_binfmt *fmt, int insert); + +/* Registration of default binfmt handlers */ +static inline int register_binfmt(struct linux_binfmt *fmt) +{ + return __register_binfmt(fmt, 0); +} +/* Same as above, but adds a new binfmt at the top of the list */ +static inline int insert_binfmt(struct linux_binfmt *fmt) +{ + return __register_binfmt(fmt, 1); +} + extern void unregister_binfmt(struct linux_binfmt *); extern int prepare_binprm(struct linux_binprm *); @@ -106,6 +117,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, int executable_stack); extern int bprm_mm_init(struct linux_binprm *bprm); extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); +extern int prepare_bprm_creds(struct linux_binprm *bprm); extern void install_exec_creds(struct linux_binprm *bprm); extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); extern int set_binfmt(struct linux_binfmt *new); diff --git a/include/linux/bio.h b/include/linux/bio.h index 18462c5b8ff..2892b710771 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -132,6 +132,7 @@ struct bio { * top 4 bits of bio flags indicate the pool this bio came from */ #define BIO_POOL_BITS (4) +#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) @@ -144,31 +145,53 @@ struct bio { * bit 1 -- rw-ahead when set * bit 2 -- barrier * Insert a serialization point in the IO queue, forcing previously - * submitted IO to be completed before this oen is issued. - * bit 3 -- synchronous I/O hint: the block layer will unplug immediately - * Note that this does NOT indicate that the IO itself is sync, just - * that the block layer will not postpone issue of this IO by plugging. - * bit 4 -- metadata request + * submitted IO to be completed before this one is issued. + * bit 3 -- synchronous I/O hint. + * bit 4 -- Unplug the device immediately after submitting this bio. + * bit 5 -- metadata request * Used for tracing to differentiate metadata and data IO. May also * get some preferential treatment in the IO scheduler - * bit 5 -- discard sectors + * bit 6 -- discard sectors * Informs the lower level device that this range of sectors is no longer * used by the file system and may thus be freed by the device. Used * for flash based storage. - * bit 6 -- fail fast device errors - * bit 7 -- fail fast transport errors - * bit 8 -- fail fast driver errors + * bit 7 -- fail fast device errors + * bit 8 -- fail fast transport errors + * bit 9 -- fail fast driver errors * Don't want driver retries for any fast fail whatever the reason. + * bit 10 -- Tell the IO scheduler not to wait for more requests after this + one has been submitted, even if it is a SYNC request. */ #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ #define BIO_RW_BARRIER 2 -#define BIO_RW_SYNC 3 -#define BIO_RW_META 4 -#define BIO_RW_DISCARD 5 -#define BIO_RW_FAILFAST_DEV 6 -#define BIO_RW_FAILFAST_TRANSPORT 7 -#define BIO_RW_FAILFAST_DRIVER 8 +#define BIO_RW_SYNCIO 3 +#define BIO_RW_UNPLUG 4 +#define BIO_RW_META 5 +#define BIO_RW_DISCARD 6 +#define BIO_RW_FAILFAST_DEV 7 +#define BIO_RW_FAILFAST_TRANSPORT 8 +#define BIO_RW_FAILFAST_DRIVER 9 +#define BIO_RW_NOIDLE 10 + +#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) + +/* + * Old defines, these should eventually be replaced by direct usage of + * bio_rw_flagged() + */ +#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER) +#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO) +#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG) +#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV) +#define bio_failfast_transport(bio) \ + bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT) +#define bio_failfast_driver(bio) \ + bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER) +#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) +#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) +#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) +#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE) /* * upper 16 bits of bi_rw define the io priority of this bio @@ -193,23 +216,14 @@ struct bio { #define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_sectors(bio) ((bio)->bi_size >> 9) -#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) -#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) -#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV)) -#define bio_failfast_transport(bio) \ - ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT)) -#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER)) -#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) -#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) -#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) -static inline unsigned int bio_cur_sectors(struct bio *bio) +static inline unsigned int bio_cur_bytes(struct bio *bio) { if (bio->bi_vcnt) - return bio_iovec(bio)->bv_len >> 9; + return bio_iovec(bio)->bv_len; else /* dataless requests such as discard */ - return bio->bi_size >> 9; + return bio->bi_size; } static inline void *bio_data(struct bio *bio) @@ -265,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio) #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ (((addr1) | (mask)) == (((addr2) - 1) | (mask))) #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ - __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) + __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) #define BIO_SEG_BOUNDARY(q, b1, b2) \ BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) @@ -305,21 +319,20 @@ static inline int bio_has_allocated_vec(struct bio *bio) */ struct bio_integrity_payload { struct bio *bip_bio; /* parent bio */ - struct bio_vec *bip_vec; /* integrity data vector */ sector_t bip_sector; /* virtual start sector */ void *bip_buf; /* generated integrity data */ bio_end_io_t *bip_end_io; /* saved I/O completion fn */ - int bip_error; /* saved I/O error */ unsigned int bip_size; - unsigned short bip_pool; /* pool the ivec came from */ + unsigned short bip_slab; /* slab the bip came from */ unsigned short bip_vcnt; /* # of integrity bio_vecs */ unsigned short bip_idx; /* current bip_vec index */ struct work_struct bip_work; /* I/O completion */ + struct bio_vec bip_vec[0]; /* embedded bvec array */ }; #endif /* CONFIG_BLK_DEV_INTEGRITY */ @@ -440,12 +453,13 @@ extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; #ifdef CONFIG_HIGHMEM /* - * remember to add offset! and never ever reenable interrupts between a - * bvec_kmap_irq and bvec_kunmap_irq!! + * remember never ever reenable interrupts between a bvec_kmap_irq and + * bvec_kunmap_irq! * * This function MUST be inlined - it plays with the CPU interrupt flags. */ -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) +static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec, + unsigned long *flags) { unsigned long addr; @@ -461,7 +475,8 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) return (char *) addr + bvec->bv_offset; } -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) +static __always_inline void bvec_kunmap_irq(char *buffer, + unsigned long *flags) { unsigned long ptr = (unsigned long) buffer & PAGE_MASK; @@ -493,6 +508,120 @@ static inline int bio_has_data(struct bio *bio) return bio && bio->bi_io_vec != NULL; } +/* + * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. + * + * A bio_list anchors a singly-linked list of bios chained through the bi_next + * member of the bio. The bio_list also caches the last list member to allow + * fast access to the tail. + */ +struct bio_list { + struct bio *head; + struct bio *tail; +}; + +static inline int bio_list_empty(const struct bio_list *bl) +{ + return bl->head == NULL; +} + +static inline void bio_list_init(struct bio_list *bl) +{ + bl->head = bl->tail = NULL; +} + +#define bio_list_for_each(bio, bl) \ + for (bio = (bl)->head; bio; bio = bio->bi_next) + +static inline unsigned bio_list_size(const struct bio_list *bl) +{ + unsigned sz = 0; + struct bio *bio; + + bio_list_for_each(bio, bl) + sz++; + + return sz; +} + +static inline void bio_list_add(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = NULL; + + if (bl->tail) + bl->tail->bi_next = bio; + else + bl->head = bio; + + bl->tail = bio; +} + +static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) +{ + bio->bi_next = bl->head; + + bl->head = bio; + + if (!bl->tail) + bl->tail = bio; +} + +static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->tail) + bl->tail->bi_next = bl2->head; + else + bl->head = bl2->head; + + bl->tail = bl2->tail; +} + +static inline void bio_list_merge_head(struct bio_list *bl, + struct bio_list *bl2) +{ + if (!bl2->head) + return; + + if (bl->head) + bl2->tail->bi_next = bl->head; + else + bl->tail = bl2->tail; + + bl->head = bl2->head; +} + +static inline struct bio *bio_list_peek(struct bio_list *bl) +{ + return bl->head; +} + +static inline struct bio *bio_list_pop(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + if (bio) { + bl->head = bl->head->bi_next; + if (!bl->head) + bl->tail = NULL; + + bio->bi_next = NULL; + } + + return bio; +} + +static inline struct bio *bio_list_get(struct bio_list *bl) +{ + struct bio *bio = bl->head; + + bl->head = bl->tail = NULL; + + return bio; +} + #if defined(CONFIG_BLK_DEV_INTEGRITY) #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) @@ -520,10 +649,10 @@ extern void bio_integrity_endio(struct bio *, int); extern void bio_integrity_advance(struct bio *, unsigned int); extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); extern void bio_integrity_split(struct bio *, struct bio_pair *, int); -extern int bio_integrity_clone(struct bio *, struct bio *, struct bio_set *); +extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *); extern int bioset_integrity_create(struct bio_set *, int); extern void bioset_integrity_free(struct bio_set *); -extern void bio_integrity_init_slab(void); +extern void bio_integrity_init(void); #else /* CONFIG_BLK_DEV_INTEGRITY */ @@ -531,7 +660,7 @@ extern void bio_integrity_init_slab(void); #define bioset_integrity_create(a, b) (0) #define bio_integrity_prep(a) (0) #define bio_integrity_enabled(a) (0) -#define bio_integrity_clone(a, b, c) (0) +#define bio_integrity_clone(a, b, c, d) (0) #define bioset_integrity_free(a) do { } while (0) #define bio_integrity_free(a, b) do { } while (0) #define bio_integrity_endio(a, b) do { } while (0) @@ -540,7 +669,7 @@ extern void bio_integrity_init_slab(void); #define bio_integrity_split(a, b, c) do { } while (0) #define bio_integrity_set_tag(a, b, c) do { } while (0) #define bio_integrity_get_tag(a, b, c) do { } while (0) -#define bio_integrity_init_slab(a) do { } while (0) +#define bio_integrity_init(a) do { } while (0) #endif /* CONFIG_BLK_DEV_INTEGRITY */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 2878811c613..756d78b8c1c 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -94,13 +94,13 @@ extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, int shift, int bits); extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, int shift, int bits); -extern void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); -extern void __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, +extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); extern int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, int bits); @@ -171,13 +171,12 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, } } -static inline void bitmap_and(unsigned long *dst, const unsigned long *src1, +static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { if (small_const_nbits(nbits)) - *dst = *src1 & *src2; - else - __bitmap_and(dst, src1, src2, nbits); + return (*dst = *src1 & *src2) != 0; + return __bitmap_and(dst, src1, src2, nbits); } static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, @@ -198,13 +197,12 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, __bitmap_xor(dst, src1, src2, nbits); } -static inline void bitmap_andnot(unsigned long *dst, const unsigned long *src1, +static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, int nbits) { if (small_const_nbits(nbits)) - *dst = *src1 & ~(*src2); - else - __bitmap_andnot(dst, src1, src2, nbits); + return (*dst = *src1 & ~(*src2)) != 0; + return __bitmap_andnot(dst, src1, src2, nbits); } static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 61829139795..c05a29cb9bb 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -112,6 +112,25 @@ static inline unsigned fls_long(unsigned long l) return fls64(l); } +/** + * __ffs64 - find first set bit in a 64 bit word + * @word: The 64 bit word + * + * On 64 bit arches this is a synomyn for __ffs + * The result is not defined if no bits are set, so check that @word + * is non-zero before calling this. + */ +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + #ifdef __KERNEL__ #ifdef CONFIG_GENERIC_FIND_FIRST_BIT diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 044467ef7b1..69103e053c9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -38,6 +38,10 @@ struct request; typedef void (rq_end_io_fn)(struct request *, int); struct request_list { + /* + * count[], starved[], and wait[] are indexed by + * BLK_RW_SYNC/BLK_RW_ASYNC + */ int count[2]; int starved[2]; int elvpriv; @@ -103,11 +107,13 @@ enum rq_flag_bits { __REQ_QUIET, /* don't worry about errors */ __REQ_PREEMPT, /* set for "ide_preempt" requests */ __REQ_ORDERED_COLOR, /* is before or after barrier */ - __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ + __REQ_RW_SYNC, /* request is sync (sync write or read) */ __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_RW_META, /* metadata io request */ __REQ_COPY_USER, /* contains copies of user pages */ __REQ_INTEGRITY, /* integrity metadata has been remapped */ + __REQ_NOIDLE, /* Don't anticipate more IO after this one */ + __REQ_IO_STAT, /* account I/O stat */ __REQ_NR_BITS, /* stops here */ }; @@ -134,6 +140,8 @@ enum rq_flag_bits { #define REQ_RW_META (1 << __REQ_RW_META) #define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) +#define REQ_NOIDLE (1 << __REQ_NOIDLE) +#define REQ_IO_STAT (1 << __REQ_IO_STAT) #define BLK_MAX_CDB 16 @@ -153,19 +161,9 @@ struct request { enum rq_cmd_type_bits cmd_type; unsigned long atomic_flags; - /* Maintain bio traversal state for part by part I/O submission. - * hard_* are block layer internals, no driver should touch them! - */ - - sector_t sector; /* next sector to submit */ - sector_t hard_sector; /* next sector to complete */ - unsigned long nr_sectors; /* no. of sectors left to submit */ - unsigned long hard_nr_sectors; /* no. of sectors left to complete */ - /* no. of sectors left to submit in the current segment */ - unsigned int current_nr_sectors; - - /* no. of sectors left to complete in the current segment */ - unsigned int hard_cur_sectors; + /* the following two fields are internal, NEVER access directly */ + sector_t __sector; /* sector cursor */ + unsigned int __data_len; /* total data len */ struct bio *bio; struct bio *biotail; @@ -198,8 +196,8 @@ struct request { unsigned short ioprio; - void *special; - char *buffer; + void *special; /* opaque pointer available for LLD use */ + char *buffer; /* kaddr of the current segment if available */ int tag; int errors; @@ -213,10 +211,9 @@ struct request { unsigned char __cmd[BLK_MAX_CDB]; unsigned char *cmd; - unsigned int data_len; unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; - void *data; + unsigned int resid_len; /* residual count */ void *sense; unsigned long deadline; @@ -299,10 +296,24 @@ struct blk_queue_tag { #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) -struct blk_cmd_filter { - unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; - unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; - struct kobject kobj; +struct queue_limits { + unsigned long bounce_pfn; + unsigned long seg_boundary_mask; + + unsigned int max_hw_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; + + unsigned short logical_block_size; + unsigned short max_hw_segments; + unsigned short max_phys_segments; + + unsigned char misaligned; + unsigned char no_cluster; }; struct request_queue @@ -356,7 +367,6 @@ struct request_queue /* * queue needs bounce pages for pages above this limit */ - unsigned long bounce_pfn; gfp_t bounce_gfp; /* @@ -385,14 +395,6 @@ struct request_queue unsigned int nr_congestion_off; unsigned int nr_batching; - unsigned int max_sectors; - unsigned int max_hw_sectors; - unsigned short max_phys_segments; - unsigned short max_hw_segments; - unsigned short hardsect_size; - unsigned int max_segment_size; - - unsigned long seg_boundary_mask; void *dma_drain_buffer; unsigned int dma_drain_size; unsigned int dma_pad_mask; @@ -402,12 +404,14 @@ struct request_queue struct list_head tag_busy_list; unsigned int nr_sorted; - unsigned int in_flight; + unsigned int in_flight[2]; unsigned int rq_timeout; struct timer_list timeout; struct list_head timeout_list; + struct queue_limits limits; + /* * sg stuff */ @@ -430,14 +434,13 @@ struct request_queue #if defined(CONFIG_BLK_DEV_BSG) struct bsg_class_device bsg_dev; #endif - struct blk_cmd_filter cmd_filter; }; #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ -#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ -#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ +#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ +#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ @@ -449,6 +452,11 @@ struct request_queue #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ +#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ + +#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ + (1 << QUEUE_FLAG_CLUSTER) | \ + (1 << QUEUE_FLAG_STACKABLE)) static inline int queue_is_locked(struct request_queue *q) { @@ -504,6 +512,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, __clear_bit(flag, &q->queue_flags); } +static inline int queue_in_flight(struct request_queue *q) +{ + return q->in_flight[0] + q->in_flight[1]; +} + static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) { WARN_ON_ONCE(!queue_is_locked(q)); @@ -565,6 +578,7 @@ enum { #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) +#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_stackable(q) \ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) @@ -581,6 +595,8 @@ enum { blk_failfast_transport(rq) || \ blk_failfast_driver(rq)) #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) +#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) +#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) @@ -603,32 +619,42 @@ enum { #define rq_data_dir(rq) ((rq)->cmd_flags & 1) /* - * We regard a request as sync, if it's a READ or a SYNC write. + * We regard a request as sync, if either a read or a sync write */ -#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) +static inline bool rw_is_sync(unsigned int rw_flags) +{ + return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); +} + +static inline bool rq_is_sync(struct request *rq) +{ + return rw_is_sync(rq->cmd_flags); +} + #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) +#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) -static inline int blk_queue_full(struct request_queue *q, int rw) +static inline int blk_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); - return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); + if (sync) + return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); + return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); } -static inline void blk_set_queue_full(struct request_queue *q, int rw) +static inline void blk_set_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - queue_flag_set(QUEUE_FLAG_READFULL, q); + if (sync) + queue_flag_set(QUEUE_FLAG_SYNCFULL, q); else - queue_flag_set(QUEUE_FLAG_WRITEFULL, q); + queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); } -static inline void blk_clear_queue_full(struct request_queue *q, int rw) +static inline void blk_clear_queue_full(struct request_queue *q, int sync) { - if (rw == READ) - queue_flag_clear(QUEUE_FLAG_READFULL, q); + if (sync) + queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); else - queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); + queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); } @@ -692,6 +718,7 @@ struct rq_map_data { int nr_entries; unsigned long offset; int null_mapped; + int from_user; }; struct req_iterator { @@ -700,6 +727,8 @@ struct req_iterator { }; /* This should not be used directly - use rq_for_each_segment */ +#define for_each_bio(_bio) \ + for (; _bio; _bio = _bio->bi_next) #define __rq_for_each_bio(_bio, rq) \ if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) @@ -719,10 +748,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); +extern struct request *blk_make_request(struct request_queue *, struct bio *, + gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); extern int blk_lld_busy(struct request_queue *q); +extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data); +extern void blk_rq_unprep_clone(struct request *rq); extern int blk_insert_cloned_request(struct request_queue *q, struct request *rq); extern void blk_plug_device(struct request_queue *); @@ -735,28 +771,22 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, struct scsi_ioctl_command __user *); /* - * Temporary export, until SCSI gets fixed up. - */ -extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, - struct bio *bio); - -/* * A queue has just exitted congestion. Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be * put back. */ -static inline void blk_clear_queue_congested(struct request_queue *q, int rw) +static inline void blk_clear_queue_congested(struct request_queue *q, int sync) { - clear_bdi_congested(&q->backing_dev_info, rw); + clear_bdi_congested(&q->backing_dev_info, sync); } /* * A queue has just entered congestion. Flag that in the queue's VM-visible * state flags and increment the global gounter of congested queues. */ -static inline void blk_set_queue_congested(struct request_queue *q, int rw) +static inline void blk_set_queue_congested(struct request_queue *q, int sync) { - set_bdi_congested(&q->backing_dev_info, rw); + set_bdi_congested(&q->backing_dev_info, sync); } extern void blk_start_queue(struct request_queue *q); @@ -765,7 +795,6 @@ extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); extern void __blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *); -extern void blk_start_queueing(struct request_queue *); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); @@ -798,41 +827,73 @@ static inline void blk_run_address_space(struct address_space *mapping) blk_run_backing_dev(mapping->backing_dev_info, NULL); } -extern void blkdev_dequeue_request(struct request *req); +/* + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment + */ +static inline sector_t blk_rq_pos(const struct request *rq) +{ + return rq->__sector; +} + +static inline unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->__data_len; +} + +static inline int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} + +static inline unsigned int blk_rq_sectors(const struct request *rq) +{ + return blk_rq_bytes(rq) >> 9; +} + +static inline unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return blk_rq_cur_bytes(rq) >> 9; +} + +/* + * Request issue related functions. + */ +extern struct request *blk_peek_request(struct request_queue *q); +extern void blk_start_request(struct request *rq); +extern struct request *blk_fetch_request(struct request_queue *q); /* - * blk_end_request() and friends. - * __blk_end_request() and end_request() must be called with - * the request queue spinlock acquired. + * Request completion related functions. + * + * blk_update_request() completes given number of bytes and updates + * the request without completing it. + * + * blk_end_request() and friends. __blk_end_request() must be called + * with the request queue spinlock acquired. * * Several drivers define their own end_request and call * blk_end_request() for parts of the original function. * This prevents code duplication in drivers. */ -extern int blk_end_request(struct request *rq, int error, - unsigned int nr_bytes); -extern int __blk_end_request(struct request *rq, int error, - unsigned int nr_bytes); -extern int blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes); -extern void end_request(struct request *, int); -extern int blk_end_request_callback(struct request *rq, int error, - unsigned int nr_bytes, - int (drv_callback)(struct request *)); +extern bool blk_update_request(struct request *rq, int error, + unsigned int nr_bytes); +extern bool blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void blk_end_request_all(struct request *rq, int error); +extern bool blk_end_request_cur(struct request *rq, int error); +extern bool __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void __blk_end_request_all(struct request *rq, int error); +extern bool __blk_end_request_cur(struct request *rq, int error); + extern void blk_complete_request(struct request *); extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); extern void blk_abort_queue(struct request_queue *); -extern void blk_update_request(struct request *rq, int error, - unsigned int nr_bytes); - -/* - * blk_end_request() takes bytes instead of sectors as a complete size. - * blk_rq_bytes() returns bytes left to complete in the entire request. - * blk_rq_cur_bytes() returns bytes left to complete in the current segment. - */ -extern unsigned int blk_rq_bytes(struct request *rq); -extern unsigned int blk_rq_cur_bytes(struct request *rq); /* * Access functions for manipulating queue properties @@ -844,10 +905,22 @@ extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_max_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); -extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_alignment_offset(struct request_queue *q, + unsigned int alignment); +extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); +extern void blk_queue_io_min(struct request_queue *q, unsigned int min); +extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern void blk_set_default_limits(struct queue_limits *lim); +extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset); +extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); @@ -915,13 +988,7 @@ static inline int sb_issue_discard(struct super_block *sb, return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); } -/* -* command filter functions -*/ -extern int blk_verify_command(struct blk_cmd_filter *filter, - unsigned char *cmd, fmode_t has_write_perm); -extern void blk_unregister_filter(struct gendisk *disk); -extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); +extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); #define MAX_PHYS_SEGMENTS 128 #define MAX_HW_SEGMENTS 128 @@ -934,19 +1001,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) -static inline int queue_hardsect_size(struct request_queue *q) +static inline unsigned long queue_bounce_pfn(struct request_queue *q) +{ + return q->limits.bounce_pfn; +} + +static inline unsigned long queue_segment_boundary(struct request_queue *q) +{ + return q->limits.seg_boundary_mask; +} + +static inline unsigned int queue_max_sectors(struct request_queue *q) +{ + return q->limits.max_sectors; +} + +static inline unsigned int queue_max_hw_sectors(struct request_queue *q) +{ + return q->limits.max_hw_sectors; +} + +static inline unsigned short queue_max_hw_segments(struct request_queue *q) +{ + return q->limits.max_hw_segments; +} + +static inline unsigned short queue_max_phys_segments(struct request_queue *q) +{ + return q->limits.max_phys_segments; +} + +static inline unsigned int queue_max_segment_size(struct request_queue *q) +{ + return q->limits.max_segment_size; +} + +static inline unsigned short queue_logical_block_size(struct request_queue *q) { int retval = 512; - if (q && q->hardsect_size) - retval = q->hardsect_size; + if (q && q->limits.logical_block_size) + retval = q->limits.logical_block_size; return retval; } -static inline int bdev_hardsect_size(struct block_device *bdev) +static inline unsigned short bdev_logical_block_size(struct block_device *bdev) +{ + return queue_logical_block_size(bdev_get_queue(bdev)); +} + +static inline unsigned int queue_physical_block_size(struct request_queue *q) +{ + return q->limits.physical_block_size; +} + +static inline unsigned int queue_io_min(struct request_queue *q) +{ + return q->limits.io_min; +} + +static inline unsigned int queue_io_opt(struct request_queue *q) +{ + return q->limits.io_opt; +} + +static inline int queue_alignment_offset(struct request_queue *q) +{ + if (q && q->limits.misaligned) + return -1; + + if (q && q->limits.alignment_offset) + return q->limits.alignment_offset; + + return 0; +} + +static inline int queue_sector_alignment_offset(struct request_queue *q, + sector_t sector) { - return queue_hardsect_size(bdev_get_queue(bdev)); + return ((sector << 9) - q->limits.alignment_offset) + & (q->limits.io_min - 1); } static inline int queue_dma_alignment(struct request_queue *q) @@ -1076,6 +1211,8 @@ struct block_device_operations { int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *); int (*media_changed) (struct gendisk *); + unsigned long long (*set_capacity) (struct gendisk *, + unsigned long long); int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); struct module *owner; diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 1dba3493d52..7e4350ece0f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -1,6 +1,7 @@ #ifndef BLKTRACE_H #define BLKTRACE_H +#include <linux/types.h> #ifdef __KERNEL__ #include <linux/blkdev.h> #include <linux/relay.h> @@ -14,6 +15,7 @@ enum blktrace_cat { BLK_TC_WRITE = 1 << 1, /* writes */ BLK_TC_BARRIER = 1 << 2, /* barrier */ BLK_TC_SYNC = 1 << 3, /* sync IO */ + BLK_TC_SYNCIO = BLK_TC_SYNC, BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ BLK_TC_REQUEUE = 1 << 5, /* requeueing */ BLK_TC_ISSUE = 1 << 6, /* issue */ @@ -114,9 +116,9 @@ struct blk_io_trace { * The remap event */ struct blk_io_trace_remap { - __be32 device; __be32 device_from; - __be64 sector; + __be32 device_to; + __be64 sector_from; }; enum { @@ -142,6 +144,9 @@ struct blk_user_trace_setup { #ifdef __KERNEL__ #if defined(CONFIG_BLK_DEV_IO_TRACE) + +#include <linux/sysfs.h> + struct blk_trace { int trace_state; struct rchan *rchan; @@ -160,8 +165,9 @@ struct blk_trace { extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); -extern int do_blk_trace_setup(struct request_queue *q, - char *name, dev_t dev, struct blk_user_trace_setup *buts); +extern int do_blk_trace_setup(struct request_queue *q, char *name, + dev_t dev, struct block_device *bdev, + struct blk_user_trace_setup *buts); extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); /** @@ -188,20 +194,42 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); extern void blk_add_driver_data(struct request_queue *q, struct request *rq, void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, + struct block_device *bdev, char __user *arg); extern int blk_trace_startstop(struct request_queue *q, int start); extern int blk_trace_remove(struct request_queue *q); +extern int blk_trace_init_sysfs(struct device *dev); + +extern struct attribute_group blk_trace_attr_group; #else /* !CONFIG_BLK_DEV_IO_TRACE */ -#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) -#define blk_trace_shutdown(q) do { } while (0) -#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) -#define blk_add_driver_data(q, rq, data, len) do {} while (0) -#define blk_trace_setup(q, name, dev, arg) (-ENOTTY) -#define blk_trace_startstop(q, start) (-ENOTTY) -#define blk_trace_remove(q) (-ENOTTY) -#define blk_add_trace_msg(q, fmt, ...) do { } while (0) +# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) +# define blk_trace_shutdown(q) do { } while (0) +# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) +# define blk_add_driver_data(q, rq, data, len) do {} while (0) +# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) +# define blk_trace_startstop(q, start) (-ENOTTY) +# define blk_trace_remove(q) (-ENOTTY) +# define blk_add_trace_msg(q, fmt, ...) do { } while (0) +static inline int blk_trace_init_sysfs(struct device *dev) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_IO_TRACE */ + +#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) + +static inline int blk_cmd_buf_len(struct request *rq) +{ + return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; +} + +extern void blk_dump_cmd(char *buf, struct request *rq); +extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); +extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); + +#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 95837bfb525..bc3ab707369 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size); #define BOOTMEM_DEFAULT 0 #define BOOTMEM_EXCLUSIVE (1<<0) +extern int reserve_bootmem(unsigned long addr, + unsigned long size, + int flags); extern int reserve_bootmem_node(pg_data_t *pgdat, - unsigned long physaddr, - unsigned long size, - int flags); -#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE -extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); -#endif + unsigned long physaddr, + unsigned long size, + int flags); -extern void *__alloc_bootmem_nopanic(unsigned long size, +extern void *__alloc_bootmem(unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem(unsigned long size, +extern void *__alloc_bootmem_nopanic(unsigned long size, unsigned long align, unsigned long goal); -extern void *__alloc_bootmem_low(unsigned long size, - unsigned long align, - unsigned long goal); extern void *__alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, unsigned long align, @@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); +extern void *__alloc_bootmem_low(unsigned long size, + unsigned long align, + unsigned long goal); extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); -#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE + #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_nopanic(x) \ __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low(x) \ - __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) #define alloc_bootmem_pages(x) \ __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_nopanic(x) \ __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low_pages(x) \ - __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages_node(pgdat, x) \ __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_pages_node_nopanic(pgdat, x) \ + __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) + +#define alloc_bootmem_low(x) \ + __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) +#define alloc_bootmem_low_pages(x) \ + __alloc_bootmem_low(x, PAGE_SIZE, 0) #define alloc_bootmem_low_pages_node(pgdat, x) \ __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) -#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, int flags); @@ -144,10 +146,10 @@ extern void *alloc_large_system_hash(const char *tablename, #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ -/* Only NUMA needs hash distribution. - * IA64 and x86_64 have sufficient vmalloc space. +/* Only NUMA needs hash distribution. 64bit NUMA architectures have + * sufficient vmalloc space. */ -#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64)) +#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT) #define HASHDIST_DEFAULT 1 #else #define HASHDIST_DEFAULT 0 diff --git a/include/linux/bsg.h b/include/linux/bsg.h index cf0303a6061..ecb4730d086 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -1,12 +1,22 @@ #ifndef BSG_H #define BSG_H +#include <linux/types.h> + #define BSG_PROTOCOL_SCSI 0 #define BSG_SUB_PROTOCOL_SCSI_CMD 0 #define BSG_SUB_PROTOCOL_SCSI_TMF 1 #define BSG_SUB_PROTOCOL_SCSI_TRANSPORT 2 +/* + * For flags member below + * sg.h sg_io_hdr also has bits defined for it's flags member. However + * none of these bits are implemented/used by bsg. The bits below are + * allocated to not conflict with sg.h ones anyway. + */ +#define BSG_FLAG_Q_AT_TAIL 0x10 /* default, == 0 at this bit, is Q_AT_HEAD */ + struct sg_io_v4 { __s32 guard; /* [i] 'Q' to differentiate from v3 */ __u32 protocol; /* [i] 0 -> SCSI , .... */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index bd7ac793be1..16ed0284d78 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -155,6 +155,7 @@ void create_empty_buffers(struct page *, unsigned long, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); void end_buffer_write_sync(struct buffer_head *bh, int uptodate); +void end_buffer_async_write(struct buffer_head *bh, int uptodate); /* Things to do with buffers at mapping->private_list */ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); @@ -165,15 +166,8 @@ int sync_mapping_buffers(struct address_space *mapping); void unmap_underlying_metadata(struct block_device *bdev, sector_t block); void mark_buffer_async_write(struct buffer_head *bh); -void invalidate_bdev(struct block_device *); -int sync_blockdev(struct block_device *bdev); void __wait_on_buffer(struct buffer_head *); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); -int fsync_bdev(struct block_device *); -struct super_block *freeze_bdev(struct block_device *); -int thaw_bdev(struct block_device *, struct super_block *); -int fsync_super(struct super_block *); -int fsync_no_super(struct block_device *); struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, unsigned size); struct buffer_head *__getblk(struct block_device *bdev, sector_t block, @@ -204,6 +198,8 @@ extern int buffer_heads_over_limit; void block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); +int block_write_full_page_endio(struct page *page, get_block_t *get_block, + struct writeback_control *wbc, bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, unsigned long from); @@ -223,7 +219,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); int block_commit_write(struct page *page, unsigned from, unsigned to); -int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, +int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block); void block_sync_page(struct page *); sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); @@ -339,22 +335,10 @@ extern int __set_page_dirty_buffers(struct page *page); static inline void buffer_init(void) {} static inline int try_to_free_buffers(struct page *page) { return 1; } -static inline int sync_blockdev(struct block_device *bdev) { return 0; } static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } -static inline void invalidate_bdev(struct block_device *bdev) {} - -static inline struct super_block *freeze_bdev(struct block_device *sb) -{ - return NULL; -} - -static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) -{ - return 0; -} #endif /* CONFIG_BLOCK */ #endif /* _LINUX_BUFFER_HEAD_H */ diff --git a/include/linux/bug.h b/include/linux/bug.h index 54398d2c6d8..d276b5510c8 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -1,7 +1,6 @@ #ifndef _LINUX_BUG_H #define _LINUX_BUG_H -#include <linux/module.h> #include <asm/bug.h> enum bug_trap_type { @@ -24,10 +23,6 @@ const struct bug_entry *find_bug(unsigned long bugaddr); enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); -int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, - struct module *); -void module_bug_cleanup(struct module *); - /* These are defined by the architecture */ int is_valid_bugaddr(unsigned long addr); @@ -38,13 +33,6 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr, { return BUG_TRAP_TYPE_BUG; } -static inline int module_bug_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *mod) -{ - return 0; -} -static inline void module_bug_cleanup(struct module *mod) {} #endif /* CONFIG_GENERIC_BUG */ #endif /* _LINUX_BUG_H */ diff --git a/include/linux/c2port.h b/include/linux/c2port.h index 7b5a2388ba6..2a5cd867c36 100644 --- a/include/linux/c2port.h +++ b/include/linux/c2port.h @@ -10,6 +10,7 @@ */ #include <linux/device.h> +#include <linux/kmemcheck.h> #define C2PORT_NAME_LEN 32 @@ -20,8 +21,10 @@ /* Main struct */ struct c2port_ops; struct c2port_device { + kmemcheck_bitfield_begin(flags); unsigned int access:1; unsigned int flash_access:1; + kmemcheck_bitfield_end(flags); int id; char name[C2PORT_NAME_LEN]; diff --git a/include/linux/can/Kbuild b/include/linux/can/Kbuild index eff898aac02..8cb05aae661 100644 --- a/include/linux/can/Kbuild +++ b/include/linux/can/Kbuild @@ -1,3 +1,4 @@ header-y += raw.h header-y += bcm.h header-y += error.h +header-y += netlink.h diff --git a/include/linux/can/bcm.h b/include/linux/can/bcm.h index 7f293273c44..1432b278c52 100644 --- a/include/linux/can/bcm.h +++ b/include/linux/can/bcm.h @@ -14,6 +14,8 @@ #ifndef CAN_BCM_H #define CAN_BCM_H +#include <linux/types.h> + /** * struct bcm_msg_head - head of messages to/from the broadcast manager * @opcode: opcode, see enum below. diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h new file mode 100644 index 00000000000..5824b20b5fc --- /dev/null +++ b/include/linux/can/dev.h @@ -0,0 +1,71 @@ +/* + * linux/can/dev.h + * + * Definitions for the CAN network device driver interface + * + * Copyright (C) 2006 Andrey Volkov <avolkov@varma-el.com> + * Varma Electronics Oy + * + * Copyright (C) 2008 Wolfgang Grandegger <wg@grandegger.com> + * + * Send feedback to <socketcan-users@lists.berlios.de> + */ + +#ifndef CAN_DEV_H +#define CAN_DEV_H + +#include <linux/can/netlink.h> +#include <linux/can/error.h> + +/* + * CAN mode + */ +enum can_mode { + CAN_MODE_STOP = 0, + CAN_MODE_START, + CAN_MODE_SLEEP +}; + +/* + * CAN common private data + */ +#define CAN_ECHO_SKB_MAX 4 + +struct can_priv { + struct can_device_stats can_stats; + + struct can_bittiming bittiming; + struct can_bittiming_const *bittiming_const; + struct can_clock clock; + + enum can_state state; + u32 ctrlmode; + + int restart_ms; + struct timer_list restart_timer; + + struct sk_buff *echo_skb[CAN_ECHO_SKB_MAX]; + + int (*do_set_bittiming)(struct net_device *dev); + int (*do_set_mode)(struct net_device *dev, enum can_mode mode); + int (*do_get_state)(const struct net_device *dev, + enum can_state *state); +}; + +struct net_device *alloc_candev(int sizeof_priv); +void free_candev(struct net_device *dev); + +int open_candev(struct net_device *dev); +void close_candev(struct net_device *dev); + +int register_candev(struct net_device *dev); +void unregister_candev(struct net_device *dev); + +int can_restart_now(struct net_device *dev); +void can_bus_off(struct net_device *dev); + +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx); +void can_get_echo_skb(struct net_device *dev, int idx); +void can_free_echo_skb(struct net_device *dev, int idx); + +#endif /* CAN_DEV_H */ diff --git a/include/linux/can/netlink.h b/include/linux/can/netlink.h new file mode 100644 index 00000000000..9ecbb7871c0 --- /dev/null +++ b/include/linux/can/netlink.h @@ -0,0 +1,113 @@ +/* + * linux/can/netlink.h + * + * Definitions for the CAN netlink interface + * + * Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com> + * + * Send feedback to <socketcan-users@lists.berlios.de> + * + */ + +#ifndef CAN_NETLINK_H +#define CAN_NETLINK_H + +#include <linux/types.h> + +/* + * CAN bit-timing parameters + * + * For futher information, please read chapter "8 BIT TIMING + * REQUIREMENTS" of the "Bosch CAN Specification version 2.0" + * at http://www.semiconductors.bosch.de/pdf/can2spec.pdf. + */ +struct can_bittiming { + __u32 bitrate; /* Bit-rate in bits/second */ + __u32 sample_point; /* Sample point in one-tenth of a percent */ + __u32 tq; /* Time quanta (TQ) in nanoseconds */ + __u32 prop_seg; /* Propagation segment in TQs */ + __u32 phase_seg1; /* Phase buffer segment 1 in TQs */ + __u32 phase_seg2; /* Phase buffer segment 2 in TQs */ + __u32 sjw; /* Synchronisation jump width in TQs */ + __u32 brp; /* Bit-rate prescaler */ +}; + +/* + * CAN harware-dependent bit-timing constant + * + * Used for calculating and checking bit-timing parameters + */ +struct can_bittiming_const { + char name[16]; /* Name of the CAN controller hardware */ + __u32 tseg1_min; /* Time segement 1 = prop_seg + phase_seg1 */ + __u32 tseg1_max; + __u32 tseg2_min; /* Time segement 2 = phase_seg2 */ + __u32 tseg2_max; + __u32 sjw_max; /* Synchronisation jump width */ + __u32 brp_min; /* Bit-rate prescaler */ + __u32 brp_max; + __u32 brp_inc; +}; + +/* + * CAN clock parameters + */ +struct can_clock { + __u32 freq; /* CAN system clock frequency in Hz */ +}; + +/* + * CAN operational and error states + */ +enum can_state { + CAN_STATE_ERROR_ACTIVE = 0, /* RX/TX error count < 96 */ + CAN_STATE_ERROR_WARNING, /* RX/TX error count < 128 */ + CAN_STATE_ERROR_PASSIVE, /* RX/TX error count < 256 */ + CAN_STATE_BUS_OFF, /* RX/TX error count >= 256 */ + CAN_STATE_STOPPED, /* Device is stopped */ + CAN_STATE_SLEEPING, /* Device is sleeping */ + CAN_STATE_MAX +}; + +/* + * CAN controller mode + */ +struct can_ctrlmode { + __u32 mask; + __u32 flags; +}; + +#define CAN_CTRLMODE_LOOPBACK 0x1 /* Loopback mode */ +#define CAN_CTRLMODE_LISTENONLY 0x2 /* Listen-only mode */ +#define CAN_CTRLMODE_3_SAMPLES 0x4 /* Triple sampling mode */ + +/* + * CAN device statistics + */ +struct can_device_stats { + __u32 bus_error; /* Bus errors */ + __u32 error_warning; /* Changes to error warning state */ + __u32 error_passive; /* Changes to error passive state */ + __u32 bus_off; /* Changes to bus off state */ + __u32 arbitration_lost; /* Arbitration lost errors */ + __u32 restarts; /* CAN controller re-starts */ +}; + +/* + * CAN netlink interface + */ +enum { + IFLA_CAN_UNSPEC, + IFLA_CAN_BITTIMING, + IFLA_CAN_BITTIMING_CONST, + IFLA_CAN_CLOCK, + IFLA_CAN_STATE, + IFLA_CAN_CTRLMODE, + IFLA_CAN_RESTART_MS, + IFLA_CAN_RESTART, + __IFLA_CAN_MAX +}; + +#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1) + +#endif /* CAN_NETLINK_H */ diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h new file mode 100644 index 00000000000..01ee2aeb048 --- /dev/null +++ b/include/linux/can/platform/sja1000.h @@ -0,0 +1,35 @@ +#ifndef _CAN_PLATFORM_SJA1000_H_ +#define _CAN_PLATFORM_SJA1000_H_ + +/* clock divider register */ +#define CDR_CLKOUT_MASK 0x07 +#define CDR_CLK_OFF 0x08 /* Clock off (CLKOUT pin) */ +#define CDR_RXINPEN 0x20 /* TX1 output is RX irq output */ +#define CDR_CBP 0x40 /* CAN input comparator bypass */ +#define CDR_PELICAN 0x80 /* PeliCAN mode */ + +/* output control register */ +#define OCR_MODE_BIPHASE 0x00 +#define OCR_MODE_TEST 0x01 +#define OCR_MODE_NORMAL 0x02 +#define OCR_MODE_CLOCK 0x03 +#define OCR_MODE_MASK 0x07 +#define OCR_TX0_INVERT 0x04 +#define OCR_TX0_PULLDOWN 0x08 +#define OCR_TX0_PULLUP 0x10 +#define OCR_TX0_PUSHPULL 0x18 +#define OCR_TX1_INVERT 0x20 +#define OCR_TX1_PULLDOWN 0x40 +#define OCR_TX1_PULLUP 0x80 +#define OCR_TX1_PUSHPULL 0xc0 +#define OCR_TX_MASK 0xfc +#define OCR_TX_SHIFT 2 + +struct sja1000_platform_data { + u32 clock; /* CAN bus oscillator frequency in Hz */ + + u8 ocr; /* output control register */ + u8 cdr; /* clock divider register */ +}; + +#endif /* !_CAN_PLATFORM_SJA1000_H_ */ diff --git a/include/linux/capability.h b/include/linux/capability.h index 02bdb768d43..c3021105edc 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -69,10 +69,6 @@ typedef struct __user_cap_data_struct { #define VFS_CAP_U32 VFS_CAP_U32_2 #define VFS_CAP_REVISION VFS_CAP_REVISION_2 -#ifdef CONFIG_SECURITY_FILE_CAPABILITIES -extern int file_caps_enabled; -#endif - struct vfs_cap_data { __le32 magic_etc; /* Little endian */ struct { @@ -96,6 +92,10 @@ struct vfs_cap_data { #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 +#ifdef CONFIG_SECURITY_FILE_CAPABILITIES +extern int file_caps_enabled; +#endif + typedef struct kernel_cap_struct { __u32 cap[_KERNEL_CAPABILITY_U32S]; } kernel_cap_t; @@ -377,7 +377,21 @@ struct cpu_vfs_cap_data { #define CAP_FOR_EACH_U32(__capi) \ for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) +/* + * CAP_FS_MASK and CAP_NFSD_MASKS: + * + * The fs mask is all the privileges that fsuid==0 historically meant. + * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE. + * + * It has never meant setting security.* and trusted.* xattrs. + * + * We could also define fsmask as follows: + * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions + * 2. The security.* and trusted.* xattrs are fs-related MAC permissions + */ + # define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ + | CAP_TO_MASK(CAP_MKNOD) \ | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ | CAP_TO_MASK(CAP_FOWNER) \ @@ -392,9 +406,12 @@ struct cpu_vfs_cap_data { # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}) -# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }) -# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \ - CAP_FS_MASK_B1 } }) +# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ + CAP_FS_MASK_B1 } }) +# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ + | CAP_TO_MASK(CAP_SYS_RESOURCE), \ + CAP_FS_MASK_B1 } }) #endif /* _KERNEL_CAPABILITY_U32S != 2 */ diff --git a/include/linux/cb710.h b/include/linux/cb710.h new file mode 100644 index 00000000000..8cc10411bab --- /dev/null +++ b/include/linux/cb710.h @@ -0,0 +1,208 @@ +/* + * cb710/cb710.h + * + * Copyright by MichaÅ‚ MirosÅ‚aw, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_CB710_DRIVER_H +#define LINUX_CB710_DRIVER_H + +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> + +struct cb710_slot; + +typedef int (*cb710_irq_handler_t)(struct cb710_slot *); + +/* per-virtual-slot structure */ +struct cb710_slot { + struct platform_device pdev; + void __iomem *iobase; + cb710_irq_handler_t irq_handler; +}; + +/* per-device structure */ +struct cb710_chip { + struct pci_dev *pdev; + void __iomem *iobase; + unsigned platform_id; +#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS + atomic_t slot_refs_count; +#endif + unsigned slot_mask; + unsigned slots; + spinlock_t irq_lock; + struct cb710_slot slot[0]; +}; + +/* NOTE: cb710_chip.slots is modified only during device init/exit and + * they are all serialized wrt themselves */ + +/* cb710_chip.slot_mask values */ +#define CB710_SLOT_MMC 1 +#define CB710_SLOT_MS 2 +#define CB710_SLOT_SM 4 + +/* slot port accessors - so the logic is more clear in the code */ +#define CB710_PORT_ACCESSORS(t) \ +static inline void cb710_write_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t value) \ +{ \ + iowrite##t(value, slot->iobase + port); \ +} \ + \ +static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \ + unsigned port) \ +{ \ + return ioread##t(slot->iobase + port); \ +} \ + \ +static inline void cb710_modify_port_##t(struct cb710_slot *slot, \ + unsigned port, u##t set, u##t clear) \ +{ \ + iowrite##t( \ + (ioread##t(slot->iobase + port) & ~clear)|set, \ + slot->iobase + port); \ +} + +CB710_PORT_ACCESSORS(8) +CB710_PORT_ACCESSORS(16) +CB710_PORT_ACCESSORS(32) + +void cb710_pci_update_config_reg(struct pci_dev *pdev, + int reg, uint32_t and, uint32_t xor); +void cb710_set_irq_handler(struct cb710_slot *slot, + cb710_irq_handler_t handler); + +/* some device struct walking */ + +static inline struct cb710_slot *cb710_pdev_to_slot( + struct platform_device *pdev) +{ + return container_of(pdev, struct cb710_slot, pdev); +} + +static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot) +{ + return dev_get_drvdata(slot->pdev.dev.parent); +} + +static inline struct device *cb710_slot_dev(struct cb710_slot *slot) +{ + return &slot->pdev.dev; +} + +static inline struct device *cb710_chip_dev(struct cb710_chip *chip) +{ + return &chip->pdev->dev; +} + +/* debugging aids */ + +#ifdef CONFIG_CB710_DEBUG +void cb710_dump_regs(struct cb710_chip *chip, unsigned dump); +#else +#define cb710_dump_regs(c, d) do {} while (0) +#endif + +#define CB710_DUMP_REGS_MMC 0x0F +#define CB710_DUMP_REGS_MS 0x30 +#define CB710_DUMP_REGS_SM 0xC0 +#define CB710_DUMP_REGS_ALL 0xFF +#define CB710_DUMP_REGS_MASK 0xFF + +#define CB710_DUMP_ACCESS_8 0x100 +#define CB710_DUMP_ACCESS_16 0x200 +#define CB710_DUMP_ACCESS_32 0x400 +#define CB710_DUMP_ACCESS_ALL 0x700 +#define CB710_DUMP_ACCESS_MASK 0x700 + +#endif /* LINUX_CB710_DRIVER_H */ +/* + * cb710/sgbuf2.h + * + * Copyright by MichaÅ‚ MirosÅ‚aw, 2008-2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_CB710_SG_H +#define LINUX_CB710_SG_H + +#include <linux/highmem.h> +#include <linux/scatterlist.h> + +/* + * 32-bit PIO mapping sg iterator + * + * Hides scatterlist access issues - fragment boundaries, alignment, page + * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices + * without DMA support). + * + * Best-case reading (transfer from device): + * sg_miter_start(, SG_MITER_TO_SG); + * cb710_sg_dwiter_write_from_io(); + * sg_miter_stop(); + * + * Best-case writing (transfer to device): + * sg_miter_start(, SG_MITER_FROM_SG); + * cb710_sg_dwiter_read_to_io(); + * sg_miter_stop(); + */ + +uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter); +void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data); + +/** + * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Reads @count 32-bit words from register @port and stores it in + * buffer iterated by @miter. Data that would overflow the buffer + * is silently ignored. Iterator is advanced by 4*@count bytes + * or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + cb710_sg_dwiter_write_next_block(miter, ioread32(port)); +} + +/** + * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer + * @miter: sg mapping iter + * @port: PIO port - IO or MMIO address + * @count: number of 32-bit words to transfer + * + * Description: + * Writes @count 32-bit words to register @port from buffer iterated + * through @miter. If buffer ends before @count words are written + * missing data is replaced by zeroes. @miter is advanced by 4*@count + * bytes or to the buffer's end whichever is closer. + * + * Context: + * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise. + */ +static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter, + void __iomem *port, size_t count) +{ + while (count-- > 0) + iowrite32(cb710_sg_dwiter_read_next_block(miter), port); +} + +#endif /* LINUX_CB710_SG_H */ diff --git a/include/linux/cdev.h b/include/linux/cdev.h index fb4591977b0..f389e319a45 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -28,6 +28,8 @@ int cdev_add(struct cdev *, dev_t, unsigned); void cdev_del(struct cdev *); +int cdev_index(struct inode *inode); + void cd_forget(struct inode *); extern struct backing_dev_info directly_mappable_cdev_bdi; diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 0b49e08d3cb..78e90479662 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -11,6 +11,7 @@ #ifndef _LINUX_CDROM_H #define _LINUX_CDROM_H +#include <linux/types.h> #include <asm/byteorder.h> /******************************************************* diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index e267e62827b..90bba9e6228 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -15,6 +15,7 @@ #include <linux/cgroupstats.h> #include <linux/prio_heap.h> #include <linux/rwsem.h> +#include <linux/idr.h> #ifdef CONFIG_CGROUPS @@ -22,6 +23,7 @@ struct cgroupfs_root; struct cgroup_subsys; struct inode; struct cgroup; +struct css_id; extern int cgroup_init_early(void); extern int cgroup_init(void); @@ -47,18 +49,24 @@ enum cgroup_subsys_id { /* Per-subsystem/per-cgroup state maintained by the system. */ struct cgroup_subsys_state { - /* The cgroup that this subsystem is attached to. Useful + /* + * The cgroup that this subsystem is attached to. Useful * for subsystems that want to know about the cgroup - * hierarchy structure */ + * hierarchy structure + */ struct cgroup *cgroup; - /* State maintained by the cgroup system to allow subsystems + /* + * State maintained by the cgroup system to allow subsystems * to be "busy". Should be accessed via css_get(), - * css_tryget() and and css_put(). */ + * css_tryget() and and css_put(). + */ atomic_t refcnt; unsigned long flags; + /* ID for this css, if possible */ + struct css_id *id; }; /* bits in struct cgroup_subsys_state flags field */ @@ -99,6 +107,7 @@ static inline bool css_tryget(struct cgroup_subsys_state *css) while (!atomic_inc_not_zero(&css->refcnt)) { if (test_bit(CSS_REMOVED, &css->flags)) return false; + cpu_relax(); } return true; } @@ -119,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css) enum { /* Control Group is dead */ CGRP_REMOVED, - /* Control Group has previously had a child cgroup or a task, - * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ + /* + * Control Group has previously had a child cgroup or a task, + * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) + */ CGRP_RELEASABLE, /* Control Group requires release notifications to userspace */ CGRP_NOTIFY_ON_RELEASE, + /* + * A thread in rmdir() is wating for this cgroup. + */ + CGRP_WAIT_ON_RMDIR, }; struct cgroup { unsigned long flags; /* "unsigned long" so bitops work */ - /* count users of this cgroup. >0 means busy, but doesn't - * necessarily indicate the number of tasks in the - * cgroup */ + /* + * count users of this cgroup. >0 means busy, but doesn't + * necessarily indicate the number of tasks in the cgroup + */ atomic_t count; /* @@ -141,7 +157,7 @@ struct cgroup { struct list_head sibling; /* my parent's children */ struct list_head children; /* my children */ - struct cgroup *parent; /* my parent */ + struct cgroup *parent; /* my parent */ struct dentry *dentry; /* cgroup fs entry, RCU protected */ /* Private pointers for each registered subsystem */ @@ -163,24 +179,22 @@ struct cgroup { */ struct list_head release_list; - /* pids_mutex protects the fields below */ + /* pids_mutex protects pids_list and cached pid arrays. */ struct rw_semaphore pids_mutex; - /* Array of process ids in the cgroup */ - pid_t *tasks_pids; - /* How many files are using the current tasks_pids array */ - int pids_use_count; - /* Length of the current tasks_pids array */ - int pids_length; + + /* Linked list of struct cgroup_pids */ + struct list_head pids_list; /* For RCU-protected deletion */ struct rcu_head rcu_head; }; -/* A css_set is a structure holding pointers to a set of +/* + * A css_set is a structure holding pointers to a set of * cgroup_subsys_state objects. This saves space in the task struct * object and speeds up fork()/exit(), since a single inc/dec and a - * list_add()/del() can bump the reference count on the entire - * cgroup set for a task. + * list_add()/del() can bump the reference count on the entire cgroup + * set for a task. */ struct css_set { @@ -225,13 +239,8 @@ struct cgroup_map_cb { void *state; }; -/* struct cftype: - * - * The files in the cgroup filesystem mostly have a very simple read/write - * handling, some common function will take care of it. Nevertheless some cases - * (read tasks) are special and therefore I define this structure for every - * kind of file. - * +/* + * struct cftype: handler definitions for cgroup control files * * When reading/writing to a file: * - the cgroup to use is file->f_dentry->d_parent->d_fsdata @@ -240,10 +249,17 @@ struct cgroup_map_cb { #define MAX_CFTYPE_NAME 64 struct cftype { - /* By convention, the name should begin with the name of the - * subsystem, followed by a period */ + /* + * By convention, the name should begin with the name of the + * subsystem, followed by a period + */ char name[MAX_CFTYPE_NAME]; int private; + /* + * If not 0, file mode is set to this value, otherwise it will + * be figured out automatically + */ + mode_t mode; /* * If non-zero, defines the maximum length of string that can @@ -318,15 +334,20 @@ struct cgroup_scanner { void (*process_task)(struct task_struct *p, struct cgroup_scanner *scan); struct ptr_heap *heap; + void *data; }; -/* Add a new file to the given cgroup directory. Should only be - * called by subsystems from within a populate() method */ +/* + * Add a new file to the given cgroup directory. Should only be + * called by subsystems from within a populate() method + */ int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, const struct cftype *cft); -/* Add a set of new files to the given cgroup directory. Should - * only be called by subsystems from within a populate() method */ +/* + * Add a set of new files to the given cgroup directory. Should + * only be called by subsystems from within a populate() method + */ int cgroup_add_files(struct cgroup *cgrp, struct cgroup_subsys *subsys, const struct cftype cft[], @@ -338,15 +359,35 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); int cgroup_task_count(const struct cgroup *cgrp); -/* Return true if the cgroup is a descendant of the current cgroup */ -int cgroup_is_descendant(const struct cgroup *cgrp); +/* Return true if cgrp is a descendant of the task's cgroup */ +int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); + +/* + * When the subsys has to access css and may add permanent refcnt to css, + * it should take care of racy conditions with rmdir(). Following set of + * functions, is for stop/restart rmdir if necessary. + * Because these will call css_get/put, "css" should be alive css. + * + * cgroup_exclude_rmdir(); + * ...do some jobs which may access arbitrary empty cgroup + * cgroup_release_and_wakeup_rmdir(); + * + * When someone removes a cgroup while cgroup_exclude_rmdir() holds it, + * it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up. + */ + +void cgroup_exclude_rmdir(struct cgroup_subsys_state *css); +void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css); -/* Control Group subsystem type. See Documentation/cgroups.txt for details */ +/* + * Control Group subsystem type. + * See Documentation/cgroups/cgroups.txt for details + */ struct cgroup_subsys { struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, struct cgroup *cgrp); - void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); + int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, struct task_struct *tsk); @@ -363,6 +404,11 @@ struct cgroup_subsys { int active; int disabled; int early_init; + /* + * True if this subsys uses ID. ID is not available before cgroup_init() + * (not available in early_init time.) + */ + bool use_id; #define MAX_CGROUP_TYPE_NAMELEN 32 const char *name; @@ -377,6 +423,7 @@ struct cgroup_subsys { * - initiating hotplug events */ struct mutex hierarchy_mutex; + struct lock_class_key subsys_key; /* * Link to parent, and list entry in parent's children. @@ -384,6 +431,9 @@ struct cgroup_subsys { */ struct cgroupfs_root *root; struct list_head sibling; + /* used when use_id == true */ + struct idr idr; + spinlock_t id_lock; }; #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; @@ -417,7 +467,8 @@ struct cgroup_iter { struct list_head *task; }; -/* To iterate across the tasks in a cgroup: +/* + * To iterate across the tasks in a cgroup: * * 1) call cgroup_iter_start to intialize an iterator * @@ -426,9 +477,10 @@ struct cgroup_iter { * * 3) call cgroup_iter_end() to destroy the iterator. * - * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset. - * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task() - * callback, but not while calling the process_task() callback. + * Or, call cgroup_scan_tasks() to iterate through every task in a + * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling + * the test_task() callback, but not while calling the process_task() + * callback. */ void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); struct task_struct *cgroup_iter_next(struct cgroup *cgrp, @@ -437,6 +489,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); int cgroup_scan_tasks(struct cgroup_scanner *scan); int cgroup_attach_task(struct cgroup *, struct task_struct *); +/* + * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works + * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. + * CSS ID is assigned at cgroup allocation (create) automatically + * and removed when subsys calls free_css_id() function. This is because + * the lifetime of cgroup_subsys_state is subsys's matter. + * + * Looking up and scanning function should be called under rcu_read_lock(). + * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls. + * But the css returned by this routine can be "not populated yet" or "being + * destroyed". The caller should check css and cgroup's status. + */ + +/* + * Typically Called at ->destroy(), or somewhere the subsys frees + * cgroup_subsys_state. + */ +void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); + +/* Find a cgroup_subsys_state which has given ID */ + +struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); + +/* + * Get a cgroup whose id is greater than or equal to id under tree of root. + * Returning a cgroup_subsys_state or NULL. + */ +struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id, + struct cgroup_subsys_state *root, int *foundid); + +/* Returns true if root is ancestor of cg */ +bool css_is_ancestor(struct cgroup_subsys_state *cg, + const struct cgroup_subsys_state *root); + +/* Get id and depth of css */ +unsigned short css_id(struct cgroup_subsys_state *css); +unsigned short css_depth(struct cgroup_subsys_state *css); + #else /* !CONFIG_CGROUPS */ static inline int cgroup_init_early(void) { return 0; } diff --git a/include/linux/cgroupstats.h b/include/linux/cgroupstats.h index 4f53abf6855..3753c33160d 100644 --- a/include/linux/cgroupstats.h +++ b/include/linux/cgroupstats.h @@ -15,6 +15,7 @@ #ifndef _LINUX_CGROUPSTATS_H #define _LINUX_CGROUPSTATS_H +#include <linux/types.h> #include <linux/taskstats.h> /* diff --git a/include/linux/clk.h b/include/linux/clk.h index 778777316ea..1d37f42ac29 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -125,4 +125,34 @@ int clk_set_parent(struct clk *clk, struct clk *parent); */ struct clk *clk_get_parent(struct clk *clk); +/** + * clk_get_sys - get a clock based upon the device name + * @dev_id: device name + * @con_id: connection ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev_id and @con_id to determine the clock consumer, and + * thereby the clock producer. In contrast to clk_get() this function + * takes the device name instead of the device itself for identification. + * + * Drivers must assume that the clock source is not enabled. + * + * clk_get_sys should not be called from within interrupt context. + */ +struct clk *clk_get_sys(const char *dev_id, const char *con_id); + +/** + * clk_add_alias - add a new clock alias + * @alias: name for clock alias + * @alias_dev_name: device name + * @id: platform specific clock name + * @dev: device + * + * Allows using generic clock names for drivers by adding a new alias. + * Assumes clkdev, see clkdev.h for more info. + */ +int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, + struct device *dev); + #endif diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index cea153697ec..3a1dbba4d3a 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -36,6 +36,7 @@ enum clock_event_nofitiers { CLOCK_EVT_NOTIFY_BROADCAST_EXIT, CLOCK_EVT_NOTIFY_SUSPEND, CLOCK_EVT_NOTIFY_RESUME, + CLOCK_EVT_NOTIFY_CPU_DYING, CLOCK_EVT_NOTIFY_CPU_DEAD, }; diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index f88d32f8ff7..1219be4fb42 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -22,8 +22,109 @@ typedef u64 cycle_t; struct clocksource; /** + * struct cyclecounter - hardware abstraction for a free running counter + * Provides completely state-free accessors to the underlying hardware. + * Depending on which hardware it reads, the cycle counter may wrap + * around quickly. Locking rules (if necessary) have to be defined + * by the implementor and user of specific instances of this API. + * + * @read: returns the current cycle value + * @mask: bitmask for two's complement + * subtraction of non 64 bit counters, + * see CLOCKSOURCE_MASK() helper macro + * @mult: cycle to nanosecond multiplier + * @shift: cycle to nanosecond divisor (power of two) + */ +struct cyclecounter { + cycle_t (*read)(const struct cyclecounter *cc); + cycle_t mask; + u32 mult; + u32 shift; +}; + +/** + * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds + * Contains the state needed by timecounter_read() to detect + * cycle counter wrap around. Initialize with + * timecounter_init(). Also used to convert cycle counts into the + * corresponding nanosecond counts with timecounter_cyc2time(). Users + * of this code are responsible for initializing the underlying + * cycle counter hardware, locking issues and reading the time + * more often than the cycle counter wraps around. The nanosecond + * counter will only wrap around after ~585 years. + * + * @cc: the cycle counter used by this instance + * @cycle_last: most recent cycle counter value seen by + * timecounter_read() + * @nsec: continuously increasing count + */ +struct timecounter { + const struct cyclecounter *cc; + cycle_t cycle_last; + u64 nsec; +}; + +/** + * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds + * @tc: Pointer to cycle counter. + * @cycles: Cycles + * + * XXX - This could use some mult_lxl_ll() asm optimization. Same code + * as in cyc2ns, but with unsigned result. + */ +static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, + cycle_t cycles) +{ + u64 ret = (u64)cycles; + ret = (ret * cc->mult) >> cc->shift; + return ret; +} + +/** + * timecounter_init - initialize a time counter + * @tc: Pointer to time counter which is to be initialized/reset + * @cc: A cycle counter, ready to be used. + * @start_tstamp: Arbitrary initial time stamp. + * + * After this call the current cycle register (roughly) corresponds to + * the initial time stamp. Every call to timecounter_read() increments + * the time stamp counter by the number of elapsed nanoseconds. + */ +extern void timecounter_init(struct timecounter *tc, + const struct cyclecounter *cc, + u64 start_tstamp); + +/** + * timecounter_read - return nanoseconds elapsed since timecounter_init() + * plus the initial time stamp + * @tc: Pointer to time counter. + * + * In other words, keeps track of time since the same epoch as + * the function which generated the initial time stamp. + */ +extern u64 timecounter_read(struct timecounter *tc); + +/** + * timecounter_cyc2time - convert a cycle counter to same + * time base as values returned by + * timecounter_read() + * @tc: Pointer to time counter. + * @cycle: a value returned by tc->cc->read() + * + * Cycle counts that are converted correctly as long as they + * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], + * with "max cycle count" == cs->mask+1. + * + * This allows conversion of cycle counter values which were generated + * in the past. + */ +extern u64 timecounter_cyc2time(struct timecounter *tc, + cycle_t cycle_tstamp); + +/** * struct clocksource - hardware abstraction for a free running counter * Provides mostly state-free accessors to the underlying hardware. + * This is the structure used for system time. * * @name: ptr to clocksource name * @list: list head for registration @@ -42,7 +143,9 @@ struct clocksource; * 400-499: Perfect * The ideal clocksource. A must-use where * available. - * @read: returns a cycle value + * @read: returns a cycle value, passes clocksource as argument + * @enable: optional function to enable the clocksource + * @disable: optional function to disable the clocksource * @mask: bitmask for two's complement * subtraction of non 64 bit counters * @mult: cycle to nanosecond multiplier (adjusted by NTP) @@ -61,7 +164,9 @@ struct clocksource { char *name; struct list_head list; int rating; - cycle_t (*read)(void); + cycle_t (*read)(struct clocksource *cs); + int (*enable)(struct clocksource *cs); + void (*disable)(struct clocksource *cs); cycle_t mask; u32 mult; u32 mult_orig; @@ -170,7 +275,54 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) */ static inline cycle_t clocksource_read(struct clocksource *cs) { - return cs->read(); + return cs->read(cs); +} + +/** + * clocksource_enable: - enable clocksource + * @cs: pointer to clocksource + * + * Enables the specified clocksource. The clocksource callback + * function should start up the hardware and setup mult and field + * members of struct clocksource to reflect hardware capabilities. + */ +static inline int clocksource_enable(struct clocksource *cs) +{ + int ret = 0; + + if (cs->enable) + ret = cs->enable(cs); + + /* + * The frequency may have changed while the clocksource + * was disabled. If so the code in ->enable() must update + * the mult value to reflect the new frequency. Make sure + * mult_orig follows this change. + */ + cs->mult_orig = cs->mult; + + return ret; +} + +/** + * clocksource_disable: - disable clocksource + * @cs: pointer to clocksource + * + * Disables the specified clocksource. The clocksource callback + * function should power down the now unused hardware block to + * save power. + */ +static inline void clocksource_disable(struct clocksource *cs) +{ + /* + * Save mult_orig in mult so clocksource_enable() can + * restore the value regardless if ->enable() updates + * the value of mult or not. + */ + cs->mult = cs->mult_orig; + + if (cs->disable) + cs->disable(cs); } /** diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h index 605ebe24bb2..72bfefdbd76 100644 --- a/include/linux/cm4000_cs.h +++ b/include/linux/cm4000_cs.h @@ -1,6 +1,8 @@ #ifndef _CM4000_H_ #define _CM4000_H_ +#include <linux/types.h> + #define MAX_ATR 33 #define CM4000_MAX_DEV 4 @@ -10,9 +12,9 @@ * not to break compilation of userspace apps. -HW */ typedef struct atreq { - int32_t atr_len; + __s32 atr_len; unsigned char atr[64]; - int32_t power_act; + __s32 power_act; unsigned char bIFSD; unsigned char bIFSC; } atreq_t; @@ -22,13 +24,13 @@ typedef struct atreq { * member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace * will lay out the structure members differently than the 64bit kernel. * - * I've changed "ptsreq.protocol" from "unsigned long" to "u_int32_t". + * I've changed "ptsreq.protocol" from "unsigned long" to "__u32". * On 32bit this will make no difference. With 64bit kernels, it will make * 32bit apps work, too. */ typedef struct ptsreq { - u_int32_t protocol; /*T=0: 2^0, T=1: 2^1*/ + __u32 protocol; /*T=0: 2^0, T=1: 2^1*/ unsigned char flags; unsigned char pts1; unsigned char pts2; diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h index 1c86d65bc4b..b8125b2eb66 100644 --- a/include/linux/cn_proc.h +++ b/include/linux/cn_proc.h @@ -65,20 +65,20 @@ struct proc_event { } ack; struct fork_proc_event { - pid_t parent_pid; - pid_t parent_tgid; - pid_t child_pid; - pid_t child_tgid; + __kernel_pid_t parent_pid; + __kernel_pid_t parent_tgid; + __kernel_pid_t child_pid; + __kernel_pid_t child_tgid; } fork; struct exec_proc_event { - pid_t process_pid; - pid_t process_tgid; + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; } exec; struct id_proc_event { - pid_t process_pid; - pid_t process_tgid; + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; union { __u32 ruid; /* task uid */ __u32 rgid; /* task gid */ @@ -90,8 +90,8 @@ struct proc_event { } id; struct exit_proc_event { - pid_t process_pid; - pid_t process_tgid; + __kernel_pid_t process_pid; + __kernel_pid_t process_tgid; __u32 exit_code, exit_signal; } exit; } event_data; diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h index 07ae8f84605..5b5d4731f95 100644 --- a/include/linux/coda_psdev.h +++ b/include/linux/coda_psdev.h @@ -6,6 +6,7 @@ #define CODA_PSDEV_MAJOR 67 #define MAX_CODADEVS 5 /* how many do we allow */ +#ifdef __KERNEL__ struct kstatfs; /* communication pending/processing queues */ @@ -24,7 +25,6 @@ static inline struct venus_comm *coda_vcp(struct super_block *sb) return (struct venus_comm *)((sb)->s_fs_info); } - /* upcalls */ int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); int venus_getattr(struct super_block *sb, struct CodaFid *fid, @@ -64,6 +64,12 @@ int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb); int venus_fsync(struct super_block *sb, struct CodaFid *fid); int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); +/* + * Statistics + */ + +extern struct venus_comm coda_comms[]; +#endif /* __KERNEL__ */ /* messages between coda filesystem in kernel and Venus */ struct upc_req { @@ -82,11 +88,4 @@ struct upc_req { #define REQ_WRITE 0x4 #define REQ_ABORT 0x8 - -/* - * Statistics - */ - -extern struct venus_comm coda_comms[]; - #endif diff --git a/include/linux/com20020.h b/include/linux/com20020.h index ac6d9a43e08..5dcfb944b6c 100644 --- a/include/linux/com20020.h +++ b/include/linux/com20020.h @@ -29,6 +29,7 @@ int com20020_check(struct net_device *dev); int com20020_found(struct net_device *dev, int shared); +extern const struct net_device_ops com20020_netdev_ops; /* The number of low I/O ports used by the card. */ #define ARCNET_TOTAL_SIZE 8 diff --git a/include/linux/compat.h b/include/linux/compat.h index 3fd2194ff57..af931ee43dd 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -125,6 +125,13 @@ struct compat_dirent { char d_name[256]; }; +struct compat_ustat { + compat_daddr_t f_tfree; + compat_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + typedef union compat_sigval { compat_int_t sival_int; compat_uptr_t sival_ptr; @@ -178,11 +185,18 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, unsigned nsems, const struct compat_timespec __user *timeout); asmlinkage long compat_sys_keyctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5); +asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage ssize_t compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen); asmlinkage ssize_t compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec, unsigned long vlen); +asmlinkage ssize_t compat_sys_preadv(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, u32 pos_low, u32 pos_high); +asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, + const struct compat_iovec __user *vec, + unsigned long vlen, u32 pos_low, u32 pos_high); int compat_do_execve(char * filename, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs * regs); @@ -208,6 +222,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); +long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, + struct compat_siginfo __user *uinfo); static inline int compat_timeval_compare(struct compat_timeval *lhs, struct compat_timeval *rhs) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 1514d534dee..a3ed7cb8ca3 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -52,7 +52,15 @@ #define __deprecated __attribute__((deprecated)) #define __packed __attribute__((packed)) #define __weak __attribute__((weak)) -#define __naked __attribute__((naked)) + +/* + * it doesn't make sense on ARM (currently the only user of __naked) to trace + * naked functions because then mcount is called without stack and frame pointer + * being set up and there is no chance to restore the lr register to the value + * before mcount was called. + */ +#define __naked __attribute__((naked)) notrace + #define __noreturn __attribute__((noreturn)) /* diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index 8005effc04f..b721129e046 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h @@ -16,6 +16,12 @@ #define __must_check __attribute__((warn_unused_result)) #endif +#ifdef CONFIG_GCOV_KERNEL +# if __GNUC_MINOR__ < 4 +# error "GCOV profiling support for gcc versions below 3.4 not included" +# endif /* __GNUC_MINOR__ */ +#endif /* CONFIG_GCOV_KERNEL */ + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 09992718f9e..450fa597c94 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h @@ -3,8 +3,10 @@ #endif /* GCC 4.1.[01] miscompiles __weak */ -#if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 -# error Your version of gcc miscompiles the __weak directive +#ifdef __KERNEL__ +# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 +# error Your version of gcc miscompiles the __weak directive +# endif #endif #define __used __attribute__((__used__)) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d95da1020f1..04fb5135b4e 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -68,6 +68,7 @@ struct ftrace_branch_data { unsigned long miss; unsigned long hit; }; + unsigned long miss_hit[2]; }; }; @@ -75,7 +76,8 @@ struct ftrace_branch_data { * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ -#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING) +#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ + && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #define likely_notrace(x) __builtin_expect(!!(x), 1) @@ -113,7 +115,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * "Define 'is'", Bill Clinton * "Define 'if'", Steven Rostedt */ -#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \ +#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) +#define __trace_if(cond) \ + if (__builtin_constant_p((cond)) ? !!(cond) : \ ({ \ int ______r; \ static struct ftrace_branch_data \ @@ -125,10 +129,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); .line = __LINE__, \ }; \ ______r = !!(cond); \ - if (______r) \ - ______f.hit++; \ - else \ - ______f.miss++; \ + ______f.miss_hit[______r]++; \ ______r; \ })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ @@ -260,6 +261,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define __section(S) __attribute__ ((__section__(#S))) #endif +/* Are two types/vars the same type (ignoring qualifiers)? */ +#ifndef __same_type +# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +#endif + /* * Prevent the compiler from merging or refetching accesses. The compiler * is also forbidden from reordering successive instances of ACCESS_ONCE(), diff --git a/include/linux/connector.h b/include/linux/connector.h index 34f2789d9b9..47ebf416f51 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -39,8 +39,12 @@ #define CN_IDX_V86D 0x4 #define CN_VAL_V86D_UVESAFB 0x1 #define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ +#define CN_DST_IDX 0x6 +#define CN_DST_VAL 0x1 +#define CN_IDX_DM 0x7 /* Device Mapper */ +#define CN_VAL_DM_USERSPACE_LOG 0x1 -#define CN_NETLINK_USERS 6 +#define CN_NETLINK_USERS 8 /* * Maximum connector's message size. @@ -109,6 +113,12 @@ struct cn_queue_dev { unsigned char name[CN_CBQ_NAMELEN]; struct workqueue_struct *cn_queue; + /* Sent to kevent to create cn_queue only when needed */ + struct work_struct wq_creation; + /* Tell if the wq_creation job is pending/completed */ + atomic_t wq_requested; + /* Wait for cn_queue to be created */ + wait_queue_head_t wq_created; struct list_head queue_list; spinlock_t queue_lock; @@ -126,7 +136,7 @@ struct cn_callback_data { void *ddata; void *callback_priv; - void (*callback) (void *); + void (*callback) (struct cn_msg *); void *free; }; @@ -157,13 +167,15 @@ struct cn_dev { struct cn_queue_dev *cbdev; }; -int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); +int cn_add_callback(struct cb_id *, char *, void (*callback) (struct cn_msg *)); void cn_del_callback(struct cb_id *); int cn_netlink_send(struct cn_msg *, u32, gfp_t); -int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); +int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *)); void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); +int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work); + struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); void cn_queue_free_dev(struct cn_queue_dev *dev); diff --git a/include/linux/console.h b/include/linux/console.h index a67a90cf826..dcca5339ceb 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -137,8 +137,8 @@ extern void resume_console(void); int mda_console_init(void); void prom_con_init(void); -void vcs_make_sysfs(struct tty_struct *tty); -void vcs_remove_sysfs(struct tty_struct *tty); +void vcs_make_sysfs(int index); +void vcs_remove_sysfs(int index); /* Some debug stub to catch some of the obvious races in the VT code */ #if 1 diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index d71f7c0f931..38fe59dc89a 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -89,7 +89,6 @@ struct vc_data { unsigned int vc_need_wrap : 1; unsigned int vc_can_do_color : 1; unsigned int vc_report_mouse : 2; - unsigned int vc_kmalloced : 1; unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf_count; int vc_utf_char; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index c2747ac2ae4..47536197ffd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -23,7 +23,6 @@ #include <linux/node.h> #include <linux/compiler.h> #include <linux/cpumask.h> -#include <linux/mutex.h> struct cpu { int node_id; /* The node which contains the CPU */ @@ -49,6 +48,15 @@ struct notifier_block; #ifdef CONFIG_SMP /* Need to know about CPUs going up/down? */ +#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) +#define cpu_notifier(fn, pri) { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = pri }; \ + register_cpu_notifier(&fn##_nb); \ +} +#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ #ifdef CONFIG_HOTPLUG_CPU extern int register_cpu_notifier(struct notifier_block *nb); extern void unregister_cpu_notifier(struct notifier_block *nb); @@ -70,12 +78,13 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) int cpu_up(unsigned int cpu); void notify_cpu_starting(unsigned int cpu); -extern void cpu_hotplug_init(void); extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); #else /* CONFIG_SMP */ +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) + static inline int register_cpu_notifier(struct notifier_block *nb) { return 0; @@ -85,10 +94,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) { } -static inline void cpu_hotplug_init(void) -{ -} - static inline void cpu_maps_update_begin(void) { } @@ -103,34 +108,15 @@ extern struct sysdev_class cpu_sysdev_class; #ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ -static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) -{ - mutex_lock(cpu_hp_mutex); -} - -static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) -{ - mutex_unlock(cpu_hp_mutex); -} - extern void get_online_cpus(void); extern void put_online_cpus(void); -#define hotcpu_notifier(fn, pri) { \ - static struct notifier_block fn##_nb __cpuinitdata = \ - { .notifier_call = fn, .priority = pri }; \ - register_cpu_notifier(&fn##_nb); \ -} +#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) int cpu_down(unsigned int cpu); #else /* CONFIG_HOTPLUG_CPU */ -static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) -{ } -static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) -{ } - #define get_online_cpus() do { } while (0) #define put_online_cpus() do { } while (0) #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 384b38d3e8e..161042746af 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -234,7 +234,6 @@ struct cpufreq_driver { int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); int (*resume) (struct cpufreq_policy *policy); struct freq_attr **attr; - bool hide_interface; }; /* flags */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9f315382610..796df12091b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -43,10 +43,10 @@ * int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask * int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask * - * void cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] + * int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection] * void cpus_or(dst, src1, src2) dst = src1 | src2 [union] * void cpus_xor(dst, src1, src2) dst = src1 ^ src2 - * void cpus_andnot(dst, src1, src2) dst = src1 & ~src2 + * int cpus_andnot(dst, src1, src2) dst = src1 & ~src2 * void cpus_complement(dst, src) dst = ~src * * int cpus_equal(mask1, mask2) Does mask1 == mask2? @@ -179,10 +179,10 @@ static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) } #define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, +static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { - bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); + return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); } #define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) @@ -201,10 +201,10 @@ static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, #define cpus_andnot(dst, src1, src2) \ __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, +static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, const cpumask_t *src2p, int nbits) { - bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); + return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); } #define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS) @@ -738,11 +738,11 @@ static inline void cpumask_clear(struct cpumask *dstp) * @src1p: the first input * @src2p: the second input */ -static inline void cpumask_and(struct cpumask *dstp, +static inline int cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { - bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), + return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -779,11 +779,11 @@ static inline void cpumask_xor(struct cpumask *dstp, * @src1p: the first input * @src2p: the second input */ -static inline void cpumask_andnot(struct cpumask *dstp, +static inline int cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { - bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), + return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), cpumask_bits(src2p), nr_cpumask_bits); } @@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t; bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask); @@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, return true; } +static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + cpumask_clear(*mask); + return true; +} + +static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, + int node) +{ + cpumask_clear(*mask); + return true; +} + static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) { } diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 90c6074a36c..a5740fc4d04 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -12,12 +12,12 @@ #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/cgroup.h> +#include <linux/mm.h> #ifdef CONFIG_CPUSETS extern int number_of_cpusets; /* How many cpusets are defined in system? */ -extern int cpuset_init_early(void); extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); @@ -26,22 +26,31 @@ extern void cpuset_cpus_allowed_locked(struct task_struct *p, extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); -void cpuset_update_task_memory_state(void); int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); -extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); -extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); +extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); +extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); -static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) { return number_of_cpusets <= 1 || - __cpuset_zone_allowed_softwall(z, gfp_mask); + __cpuset_node_allowed_softwall(node, gfp_mask); } -static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) { return number_of_cpusets <= 1 || - __cpuset_zone_allowed_hardwall(z, gfp_mask); + __cpuset_node_allowed_hardwall(node, gfp_mask); +} + +static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) +{ + return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); +} + +static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) +{ + return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); } extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, @@ -81,21 +90,25 @@ extern void rebuild_sched_domains(void); extern void cpuset_print_task_mems_allowed(struct task_struct *p); +static inline void set_mems_allowed(nodemask_t nodemask) +{ + current->mems_allowed = nodemask; +} + #else /* !CONFIG_CPUSETS */ -static inline int cpuset_init_early(void) { return 0; } static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { - *mask = cpu_possible_map; + cpumask_copy(mask, cpu_possible_mask); } static inline void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask) { - *mask = cpu_possible_map; + cpumask_copy(mask, cpu_possible_mask); } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) @@ -105,13 +118,22 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) static inline void cpuset_init_current_mems_allowed(void) {} -static inline void cpuset_update_task_memory_state(void) {} static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) { return 1; } +static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) +{ + return 1; +} + +static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) +{ + return 1; +} + static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) { return 1; @@ -167,6 +189,10 @@ static inline void cpuset_print_task_mems_allowed(struct task_struct *p) { } +static inline void set_mems_allowed(nodemask_t nodemask) +{ +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h index 3be4e5a27d8..6fc2bed368b 100644 --- a/include/linux/cramfs_fs.h +++ b/include/linux/cramfs_fs.h @@ -2,9 +2,8 @@ #define __CRAMFS_H #include <linux/types.h> +#include <linux/magic.h> -#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ -#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */ #define CRAMFS_SIGNATURE "Compressed ROMFS" /* diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 2dac064d835..0026f267da2 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -3,7 +3,6 @@ #ifdef CONFIG_CRASH_DUMP #include <linux/kexec.h> -#include <linux/smp_lock.h> #include <linux/device.h> #include <linux/proc_fs.h> diff --git a/include/linux/cred.h b/include/linux/cred.h index 3282ee4318e..24520a539c6 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -13,6 +13,7 @@ #define _LINUX_CRED_H #include <linux/capability.h> +#include <linux/init.h> #include <linux/key.h> #include <asm/atomic.h> @@ -113,6 +114,13 @@ struct thread_group_cred { */ struct cred { atomic_t usage; +#ifdef CONFIG_DEBUG_CREDENTIALS + atomic_t subscribers; /* number of processes subscribed */ + void *put_addr; + unsigned magic; +#define CRED_MAGIC 0x43736564 +#define CRED_MAGIC_DEAD 0x44656144 +#endif uid_t uid; /* real UID of the task */ gid_t gid; /* real GID of the task */ uid_t suid; /* saved UID of the task */ @@ -142,7 +150,9 @@ struct cred { }; extern void __put_cred(struct cred *); +extern void exit_creds(struct task_struct *); extern int copy_creds(struct task_struct *, unsigned long); +extern struct cred *cred_alloc_blank(void); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); extern struct cred *prepare_usermodehelper_creds(void); @@ -157,6 +167,60 @@ extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); extern void __init cred_init(void); +/* + * check for validity of credentials + */ +#ifdef CONFIG_DEBUG_CREDENTIALS +extern void __invalid_creds(const struct cred *, const char *, unsigned); +extern void __validate_process_creds(struct task_struct *, + const char *, unsigned); + +static inline bool creds_are_invalid(const struct cred *cred) +{ + if (cred->magic != CRED_MAGIC) + return true; + if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers)) + return true; +#ifdef CONFIG_SECURITY_SELINUX + if ((unsigned long) cred->security < PAGE_SIZE) + return true; + if ((*(u32*)cred->security & 0xffffff00) == + (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)) + return true; +#endif + return false; +} + +static inline void __validate_creds(const struct cred *cred, + const char *file, unsigned line) +{ + if (unlikely(creds_are_invalid(cred))) + __invalid_creds(cred, file, line); +} + +#define validate_creds(cred) \ +do { \ + __validate_creds((cred), __FILE__, __LINE__); \ +} while(0) + +#define validate_process_creds() \ +do { \ + __validate_process_creds(current, __FILE__, __LINE__); \ +} while(0) + +extern void validate_creds_for_do_exit(struct task_struct *); +#else +static inline void validate_creds(const struct cred *cred) +{ +} +static inline void validate_creds_for_do_exit(struct task_struct *tsk) +{ +} +static inline void validate_process_creds(void) +{ +} +#endif + /** * get_new_cred - Get a reference on a new set of credentials * @cred: The new credentials to reference @@ -185,7 +249,9 @@ static inline struct cred *get_new_cred(struct cred *cred) */ static inline const struct cred *get_cred(const struct cred *cred) { - return get_new_cred((struct cred *) cred); + struct cred *nonconst_cred = (struct cred *) cred; + validate_creds(cred); + return get_new_cred(nonconst_cred); } /** @@ -203,7 +269,7 @@ static inline void put_cred(const struct cred *_cred) { struct cred *cred = (struct cred *) _cred; - BUG_ON(atomic_read(&(cred)->usage) <= 0); + validate_creds(cred); if (atomic_dec_and_test(&(cred)->usage)) __put_cred(cred); } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3bacd71509f..fd929889e8d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -40,6 +40,7 @@ #define CRYPTO_ALG_TYPE_SHASH 0x00000009 #define CRYPTO_ALG_TYPE_AHASH 0x0000000a #define CRYPTO_ALG_TYPE_RNG 0x0000000c +#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c @@ -114,7 +115,6 @@ struct crypto_async_request; struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; -struct crypto_ahash; struct crypto_rng; struct crypto_tfm; struct crypto_type; @@ -145,16 +145,6 @@ struct ablkcipher_request { void *__ctx[] CRYPTO_MINALIGN_ATTR; }; -struct ahash_request { - struct crypto_async_request base; - - unsigned int nbytes; - struct scatterlist *src; - u8 *result; - - void *__ctx[] CRYPTO_MINALIGN_ATTR; -}; - /** * struct aead_request - AEAD request * @base: Common attributes for async crypto requests @@ -219,18 +209,6 @@ struct ablkcipher_alg { unsigned int ivsize; }; -struct ahash_alg { - int (*init)(struct ahash_request *req); - int (*reinit)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - - unsigned int digestsize; -}; - struct aead_alg { int (*setkey)(struct crypto_aead *tfm, const u8 *key, unsigned int keylen); @@ -317,7 +295,6 @@ struct rng_alg { #define cra_cipher cra_u.cipher #define cra_digest cra_u.digest #define cra_hash cra_u.hash -#define cra_ahash cra_u.ahash #define cra_compress cra_u.compress #define cra_rng cra_u.rng @@ -345,7 +322,6 @@ struct crypto_alg { struct cipher_alg cipher; struct digest_alg digest; struct hash_alg hash; - struct ahash_alg ahash; struct compress_alg compress; struct rng_alg rng; } cra_u; @@ -432,18 +408,6 @@ struct hash_tfm { unsigned int digestsize; }; -struct ahash_tfm { - int (*init)(struct ahash_request *req); - int (*update)(struct ahash_request *req); - int (*final)(struct ahash_request *req); - int (*digest)(struct ahash_request *req); - int (*setkey)(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen); - - unsigned int digestsize; - unsigned int reqsize; -}; - struct compress_tfm { int (*cot_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, @@ -464,7 +428,6 @@ struct rng_tfm { #define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher #define crt_hash crt_u.hash -#define crt_ahash crt_u.ahash #define crt_compress crt_u.compress #define crt_rng crt_u.rng @@ -478,7 +441,6 @@ struct crypto_tfm { struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; struct hash_tfm hash; - struct ahash_tfm ahash; struct compress_tfm compress; struct rng_tfm rng; } crt_u; @@ -548,11 +510,13 @@ struct crypto_attr_u32 { * Transform user interface. */ -struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, - const struct crypto_type *frontend, - u32 type, u32 mask); struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); -void crypto_free_tfm(struct crypto_tfm *tfm); +void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); + +static inline void crypto_free_tfm(struct crypto_tfm *tfm) +{ + return crypto_destroy_tfm(tfm, tfm); +} int alg_test(const char *driver, const char *alg, u32 type, u32 mask); @@ -767,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc( static inline void ablkcipher_request_free(struct ablkcipher_request *req) { - kfree(req); + kzfree(req); } static inline void ablkcipher_request_set_callback( @@ -898,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, static inline void aead_request_free(struct aead_request *req) { - kfree(req); + kzfree(req); } static inline void aead_request_set_callback(struct aead_request *req, diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index d06fbf28634..1fbdea4f08e 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h @@ -82,9 +82,9 @@ struct cyclades_monitor { * open) */ struct cyclades_idle_stats { - time_t in_use; /* Time device has been in use (secs) */ - time_t recv_idle; /* Time since last char received (secs) */ - time_t xmit_idle; /* Time since last char transmitted (secs) */ + __kernel_time_t in_use; /* Time device has been in use (secs) */ + __kernel_time_t recv_idle; /* Time since last char received (secs) */ + __kernel_time_t xmit_idle; /* Time since last char transmitted (secs) */ unsigned long recv_bytes; /* Bytes received */ unsigned long xmit_bytes; /* Bytes transmitted */ unsigned long overruns; /* Input overruns */ @@ -142,19 +142,6 @@ struct CYZ_BOOT_CTRL { #ifndef DP_WINDOW_SIZE -/* #include "cyclomz.h" */ -/****************** ****************** *******************/ -/* - * The data types defined below are used in all ZFIRM interface - * data structures. They accomodate differences between HW - * architectures and compilers. - */ - -typedef __u64 ucdouble; /* 64 bits, unsigned */ -typedef __u32 uclong; /* 32 bits, unsigned */ -typedef __u16 ucshort; /* 16 bits, unsigned */ -typedef __u8 ucchar; /* 8 bits, unsigned */ - /* * Memory Window Sizes */ @@ -507,16 +494,20 @@ struct ZFW_CTRL { /* Per card data structure */ struct cyclades_card { - void __iomem *base_addr; - void __iomem *ctl_addr; - int irq; - unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ - unsigned int first_line; /* minor number of first channel on card */ - unsigned int nports; /* Number of ports in the card */ - int bus_index; /* address shift - 0 for ISA, 1 for PCI */ - int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ - spinlock_t card_lock; - struct cyclades_port *ports; + void __iomem *base_addr; + union { + void __iomem *p9050; + struct RUNTIME_9060 __iomem *p9060; + } ctl_addr; + int irq; + unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ + unsigned int first_line; /* minor number of first channel on card */ + unsigned int nports; /* Number of ports in the card */ + int bus_index; /* address shift - 0 for ISA, 1 for PCI */ + int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ + u32 hw_ver; + spinlock_t card_lock; + struct cyclades_port *ports; }; /*************************************** diff --git a/include/linux/dca.h b/include/linux/dca.h index b00a753eda5..9c20c7e87d0 100644 --- a/include/linux/dca.h +++ b/include/linux/dca.h @@ -1,3 +1,23 @@ +/* + * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ #ifndef DCA_H #define DCA_H /* DCA Provider API */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c66d22487bf..30b93b2a01a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -112,7 +112,7 @@ struct dentry { struct list_head d_subdirs; /* our children */ struct list_head d_alias; /* inode alias list */ unsigned long d_time; /* used by d_revalidate */ - struct dentry_operations *d_op; + const struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ void *d_fsdata; /* fs-specific data */ @@ -180,10 +180,12 @@ d_iput: no no no yes #define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ #define DCACHE_UNHASHED 0x0010 -#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ +#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */ #define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ +#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ + extern spinlock_t dcache_lock; extern seqlock_t rename_lock; @@ -351,6 +353,11 @@ static inline int d_unhashed(struct dentry *dentry) return (dentry->d_flags & DCACHE_UNHASHED); } +static inline int d_unlinked(struct dentry *dentry) +{ + return d_unhashed(dentry) && !IS_ROOT(dentry); +} + static inline struct dentry *dget_parent(struct dentry *dentry) { struct dentry *ret; @@ -368,7 +375,7 @@ static inline int d_mountpoint(struct dentry *dentry) return dentry->d_mounted; } -extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *); +extern struct vfsmount *lookup_mnt(struct path *); extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); extern int sysctl_vfs_cache_pressure; diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h index b0ef274e003..b7cdbb4373d 100644 --- a/include/linux/dcbnl.h +++ b/include/linux/dcbnl.h @@ -20,10 +20,12 @@ #ifndef __LINUX_DCBNL_H__ #define __LINUX_DCBNL_H__ +#include <linux/types.h> + #define DCB_PROTO_VERSION 1 struct dcbmsg { - unsigned char dcb_family; + __u8 dcb_family; __u8 cmd; __u16 dcb_pad; }; @@ -48,6 +50,8 @@ struct dcbmsg { * @DCB_CMD_SNUMTCS: set the number of traffic classes * @DCB_CMD_GBCN: set backward congestion notification configuration * @DCB_CMD_SBCN: get backward congestion notification configration. + * @DCB_CMD_GAPP: get application protocol configuration + * @DCB_CMD_SAPP: set application protocol configuration */ enum dcbnl_commands { DCB_CMD_UNDEFINED, @@ -78,6 +82,9 @@ enum dcbnl_commands { DCB_CMD_BCN_GCFG, DCB_CMD_BCN_SCFG, + DCB_CMD_GAPP, + DCB_CMD_SAPP, + __DCB_CMD_ENUM_MAX, DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1, }; @@ -112,6 +119,7 @@ enum dcbnl_attrs { DCB_ATTR_CAP, DCB_ATTR_NUMTCS, DCB_ATTR_BCN, + DCB_ATTR_APP, __DCB_ATTR_ENUM_MAX, DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1, @@ -336,5 +344,17 @@ enum dcb_general_attr_values { DCB_ATTR_VALUE_UNDEFINED = 0xff }; +#define DCB_APP_IDTYPE_ETHTYPE 0x00 +#define DCB_APP_IDTYPE_PORTNUM 0x01 +enum dcbnl_app_attrs { + DCB_APP_ATTR_UNDEFINED, + + DCB_APP_ATTR_IDTYPE, + DCB_APP_ATTR_ID, + DCB_APP_ATTR_PRIORITY, + + __DCB_APP_ATTR_ENUM_MAX, + DCB_APP_ATTR_MAX = __DCB_APP_ATTR_ENUM_MAX - 1, +}; #endif /* __LINUX_DCBNL_H__ */ diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 61734e27abb..7434a8353e2 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -355,46 +355,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) return __dccp_hdr_len(dccp_hdr(skb)); } - -/* initial values for each feature */ -#define DCCPF_INITIAL_SEQUENCE_WINDOW 100 -#define DCCPF_INITIAL_ACK_RATIO 2 -#define DCCPF_INITIAL_CCID DCCPC_CCID2 -/* FIXME: for now we're default to 1 but it should really be 0 */ -#define DCCPF_INITIAL_SEND_NDP_COUNT 1 - -/** - * struct dccp_minisock - Minimal DCCP connection representation - * - * Will be used to pass the state from dccp_request_sock to dccp_sock. - * - * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2) - * @dccpms_pending - List of features being negotiated - * @dccpms_conf - - */ -struct dccp_minisock { - __u64 dccpms_sequence_window; - struct list_head dccpms_pending; - struct list_head dccpms_conf; -}; - -struct dccp_opt_conf { - __u8 *dccpoc_val; - __u8 dccpoc_len; -}; - -struct dccp_opt_pend { - struct list_head dccpop_node; - __u8 dccpop_type; - __u8 dccpop_feat; - __u8 *dccpop_val; - __u8 dccpop_len; - int dccpop_conf; - struct dccp_opt_conf *dccpop_sc; -}; - -extern void dccp_minisock_init(struct dccp_minisock *dmsk); - /** * struct dccp_request_sock - represent DCCP-specific connection request * @dreq_inet_rsk: structure inherited from @@ -483,13 +443,14 @@ struct dccp_ackvec; * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo * @dccps_l_ack_ratio - feature-local Ack Ratio * @dccps_r_ack_ratio - feature-remote Ack Ratio + * @dccps_l_seq_win - local Sequence Window (influences ack number validity) + * @dccps_r_seq_win - remote Sequence Window (influences seq number validity) * @dccps_pcslen - sender partial checksum coverage (via sockopt) * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) * @dccps_ndp_count - number of Non Data Packets since last data packet * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) - * @dccps_minisock - associated minisock (accessed via dccp_msk) * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) * @dccps_hc_rx_ackvec - rx half connection ack vector * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) @@ -523,12 +484,13 @@ struct dccp_sock { __u32 dccps_timestamp_time; __u16 dccps_l_ack_ratio; __u16 dccps_r_ack_ratio; + __u64 dccps_l_seq_win:48; + __u64 dccps_r_seq_win:48; __u8 dccps_pcslen:4; __u8 dccps_pcrlen:4; __u8 dccps_send_ndp_count:1; __u64 dccps_ndp_count:48; unsigned long dccps_rate_last; - struct dccp_minisock dccps_minisock; struct list_head dccps_featneg; struct dccp_ackvec *dccps_hc_rx_ackvec; struct ccid *dccps_hc_rx_ccid; @@ -546,11 +508,6 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk) return (struct dccp_sock *)sk; } -static inline struct dccp_minisock *dccp_msk(const struct sock *sk) -{ - return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock; -} - static inline const char *dccp_role(const struct sock *sk) { switch (dccp_sk(sk)->dccps_role) { diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 096476f1fb3..29b3ce3f2a1 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -2,12 +2,20 @@ #define __LINUX_DEBUG_LOCKING_H #include <linux/kernel.h> +#include <asm/atomic.h> +#include <asm/system.h> struct task_struct; extern int debug_locks; extern int debug_locks_silent; + +static inline int __debug_locks_off(void) +{ + return xchg(&debug_locks, 0); +} + /* * Generic 'turn off all lock debugging' function: */ diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 23936b16426..eb5c2ba2f81 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode, struct dentry *debugfs_create_blob(const char *name, mode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob); + +bool debugfs_initialized(void); + #else #include <linux/err.h> @@ -162,6 +165,13 @@ static inline struct dentry *debugfs_create_x32(const char *name, mode_t mode, return ERR_PTR(-ENODEV); } +static inline struct dentry *debugfs_create_size_t(const char *name, mode_t mode, + struct dentry *parent, + size_t *value) +{ + return ERR_PTR(-ENODEV); +} + static inline struct dentry *debugfs_create_bool(const char *name, mode_t mode, struct dentry *parent, u32 *value) @@ -176,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode, return ERR_PTR(-ENODEV); } +static inline bool debugfs_initialized(void) +{ + return false; +} + #endif #endif diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h new file mode 100644 index 00000000000..115272137a9 --- /dev/null +++ b/include/linux/decompress/bunzip2.h @@ -0,0 +1,10 @@ +#ifndef DECOMPRESS_BUNZIP2_H +#define DECOMPRESS_BUNZIP2_H + +int bunzip2(unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h new file mode 100644 index 00000000000..0c7111a55a1 --- /dev/null +++ b/include/linux/decompress/generic.h @@ -0,0 +1,39 @@ +#ifndef DECOMPRESS_GENERIC_H +#define DECOMPRESS_GENERIC_H + +typedef int (*decompress_fn) (unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *outbuf, + int *posp, + void(*error)(char *x)); + +/* inbuf - input buffer + *len - len of pre-read data in inbuf + *fill - function to fill inbuf when empty + *flush - function to write out outbuf + *outbuf - output buffer + *posp - if non-null, input position (number of bytes read) will be + * returned here + * + *If len != 0, inbuf should contain all the necessary input data, and fill + *should be NULL + *If len = 0, inbuf can be NULL, in which case the decompressor will allocate + *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes. + *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE + *bytes should be read per call. Replace XXX with the appropriate decompressor + *name, i.e. LZMA_IOBUF_SIZE. + * + *If flush = NULL, outbuf must be large enough to buffer all the expected + *output. If flush != NULL, the output buffer will be allocated by the + *decompressor (outbuf = NULL), and the flush function will be called to + *flush the output buffer at the appropriate time (decompressor and stream + *dependent). + */ + + +/* Utility routine to detect the decompression method */ +decompress_fn decompress_method(const unsigned char *inbuf, int len, + const char **name); + +#endif diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h new file mode 100644 index 00000000000..f9b06ccc3e5 --- /dev/null +++ b/include/linux/decompress/inflate.h @@ -0,0 +1,13 @@ +#ifndef INFLATE_H +#define INFLATE_H + +/* Other housekeeping constants */ +#define INBUFSIZ 4096 + +int gunzip(unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *pos, + void(*error_fn)(char *x)); +#endif diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h new file mode 100644 index 00000000000..12ff8c3f1d0 --- /dev/null +++ b/include/linux/decompress/mm.h @@ -0,0 +1,87 @@ +/* + * linux/compr_mm.h + * + * Memory management for pre-boot and ramdisk uncompressors + * + * Authors: Alain Knaff <alain@knaff.lu> + * + */ + +#ifndef DECOMPR_MM_H +#define DECOMPR_MM_H + +#ifdef STATIC + +/* Code active when included from pre-boot environment: */ + +/* A trivial malloc implementation, adapted from + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + */ +static unsigned long malloc_ptr; +static int malloc_count; + +static void *malloc(int size) +{ + void *p; + + if (size < 0) + error("Malloc error"); + if (!malloc_ptr) + malloc_ptr = free_mem_ptr; + + malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ + + p = (void *)malloc_ptr; + malloc_ptr += size; + + if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) + error("Out of memory"); + + malloc_count++; + return p; +} + +static void free(void *where) +{ + malloc_count--; + if (!malloc_count) + malloc_ptr = free_mem_ptr; +} + +#define large_malloc(a) malloc(a) +#define large_free(a) free(a) + +#define set_error_fn(x) + +#define INIT + +#else /* STATIC */ + +/* Code active when compiled standalone for use when loading ramdisk: */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/vmalloc.h> + +/* Use defines rather than static inline in order to avoid spurious + * warnings when not needed (indeed large_malloc / large_free are not + * needed by inflate */ + +#define malloc(a) kmalloc(a, GFP_KERNEL) +#define free(a) kfree(a) + +#define large_malloc(a) vmalloc(a) +#define large_free(a) vfree(a) + +static void(*error)(char *m); +#define set_error_fn(x) error = x; + +#define INIT __init +#define STATIC + +#include <linux/init.h> + +#endif /* STATIC */ + +#endif /* DECOMPR_MM_H */ diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h new file mode 100644 index 00000000000..7796538f1bf --- /dev/null +++ b/include/linux/decompress/unlzma.h @@ -0,0 +1,12 @@ +#ifndef DECOMPRESS_UNLZMA_H +#define DECOMPRESS_UNLZMA_H + +int unlzma(unsigned char *, int, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *posp, + void(*error)(char *x) + ); + +#endif diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8209e08969f..df7607e6dce 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -11,6 +11,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> +struct dm_dev; struct dm_target; struct dm_table; struct mapped_device; @@ -21,6 +22,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; union map_info { void *ptr; unsigned long long ll; + unsigned flush_request; }; /* @@ -80,6 +82,18 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, struct bio_vec *biovec, int max_size); +typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, + struct dm_dev *dev, + sector_t start, sector_t len, + void *data); + +typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, + iterate_devices_callout_fn fn, + void *data); + +typedef void (*dm_io_hints_fn) (struct dm_target *ti, + struct queue_limits *limits); + /* * Returns: * 0: The target can handle the next I/O immediately. @@ -92,7 +106,8 @@ void dm_error(const char *message); /* * Combine device limits. */ -void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev); +int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data); struct dm_dev { struct block_device *bdev; @@ -116,7 +131,6 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d); /* * Target features */ -#define DM_TARGET_SUPPORTS_BARRIERS 0x00000001 struct target_type { uint64_t features; @@ -139,18 +153,11 @@ struct target_type { dm_ioctl_fn ioctl; dm_merge_fn merge; dm_busy_fn busy; -}; + dm_iterate_devices_fn iterate_devices; + dm_io_hints_fn io_hints; -struct io_restrictions { - unsigned long bounce_pfn; - unsigned long seg_boundary_mask; - unsigned max_hw_sectors; - unsigned max_sectors; - unsigned max_segment_size; - unsigned short hardsect_size; - unsigned short max_hw_segments; - unsigned short max_phys_segments; - unsigned char no_cluster; /* inverted so that 0 is default */ + /* For internal device-mapper use. */ + struct list_head list; }; struct dm_target { @@ -161,15 +168,18 @@ struct dm_target { sector_t begin; sector_t len; - /* FIXME: turn this into a mask, and merge with io_restrictions */ /* Always a power of 2 */ sector_t split_io; /* - * These are automatically filled in by - * dm_table_get_device. + * A number of zero-length barrier requests that will be submitted + * to the target for the purpose of flushing cache. + * + * The request number will be placed in union map_info->flush_request. + * It is a responsibility of the target driver to remap these requests + * to the real underlying devices. */ - struct io_restrictions limits; + unsigned num_flush_requests; /* target specific data */ void *private; @@ -228,6 +238,7 @@ struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct mapped_device *md); int dm_noflush_suspending(struct dm_target *ti); union map_info *dm_get_mapinfo(struct bio *bio); +union map_info *dm_get_rq_mapinfo(struct request *rq); /* * Geometry functions. @@ -390,4 +401,12 @@ static inline unsigned long to_bytes(sector_t n) return (n << SECTOR_SHIFT); } +/*----------------------------------------------------------------- + * Helper for block layer and dm core operations + *---------------------------------------------------------------*/ +void dm_dispatch_request(struct request *rq); +void dm_requeue_unmapped_request(struct request *rq); +void dm_kill_unmapped_request(struct request *rq, int error); +int dm_underlying_device_busy(struct request_queue *q); + #endif /* _LINUX_DEVICE_MAPPER_H */ diff --git a/include/linux/device.h b/include/linux/device.h index 45e5b1921fb..aebb81036db 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -25,9 +25,8 @@ #include <asm/atomic.h> #include <asm/device.h> -#define BUS_ID_SIZE 20 - struct device; +struct device_private; struct device_driver; struct driver_private; struct class; @@ -61,8 +60,6 @@ struct bus_type { void (*shutdown)(struct device *dev); int (*suspend)(struct device *dev, pm_message_t state); - int (*suspend_late)(struct device *dev, pm_message_t state); - int (*resume_early)(struct device *dev); int (*resume)(struct device *dev); struct dev_pm_ops *pm; @@ -115,6 +112,8 @@ extern int bus_unregister_notifier(struct bus_type *bus, #define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */ #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be unbound */ +#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000005 /* driver is unbound + from the device */ extern struct kset *bus_get_kset(struct bus_type *bus); extern struct klist *bus_get_device_klist(struct bus_type *bus); @@ -147,6 +146,8 @@ extern void put_driver(struct device_driver *drv); extern struct device_driver *driver_find(const char *name, struct bus_type *bus); extern int driver_probe_done(void); +extern void wait_for_device_probe(void); + /* sysfs interface for exporting driver attributes */ @@ -191,6 +192,7 @@ struct class { struct kobject *dev_kobj; int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*nodename)(struct device *dev); void (*class_release)(struct class *class); void (*dev_release)(struct device *dev); @@ -286,11 +288,9 @@ struct device_type { const char *name; struct attribute_group **groups; int (*uevent)(struct device *dev, struct kobj_uevent_env *env); + char *(*nodename)(struct device *dev); void (*release)(struct device *dev); - int (*suspend)(struct device *dev, pm_message_t state); - int (*resume)(struct device *dev); - struct dev_pm_ops *pm; }; @@ -365,15 +365,11 @@ struct device_dma_parameters { }; struct device { - struct klist klist_children; - struct klist_node knode_parent; /* node in sibling list */ - struct klist_node knode_driver; - struct klist_node knode_bus; struct device *parent; + struct device_private *p; + struct kobject kobj; - char bus_id[BUS_ID_SIZE]; /* position on parent bus */ - unsigned uevent_suppress:1; const char *init_name; /* initial name of the device */ struct device_type *type; @@ -425,8 +421,7 @@ struct device { static inline const char *dev_name(const struct device *dev) { - /* will be changed into kobject_name(&dev->kobj) in the near future */ - return dev->bus_id; + return kobject_name(&dev->kobj); } extern int dev_set_name(struct device *dev, const char *name, ...) @@ -461,6 +456,16 @@ static inline void dev_set_drvdata(struct device *dev, void *data) dev->driver_data = data; } +static inline unsigned int dev_get_uevent_suppress(const struct device *dev) +{ + return dev->kobj.uevent_suppress; +} + +static inline void dev_set_uevent_suppress(struct device *dev, int val) +{ + dev->kobj.uevent_suppress = val; +} + static inline int device_is_registered(struct device *dev) { return dev->kobj.state_in_sysfs; @@ -481,7 +486,9 @@ extern int device_for_each_child(struct device *dev, void *data, extern struct device *device_find_child(struct device *dev, void *data, int (*match)(struct device *dev, void *data)); extern int device_rename(struct device *dev, char *new_name); -extern int device_move(struct device *dev, struct device *new_parent); +extern int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order); +extern const char *device_get_nodename(struct device *dev, const char **tmp); /* * Root device objects for grouping under /sys/devices @@ -537,6 +544,7 @@ extern int (*platform_notify_remove)(struct device *dev); extern struct device *get_device(struct device *dev); extern void put_device(struct device *dev); +extern void wait_for_device_probe(void); /* drivers/base/power/shutdown.c */ extern void device_shutdown(void); @@ -568,7 +576,7 @@ extern const char *dev_driver_string(const struct device *dev); #if defined(DEBUG) #define dev_dbg(dev, format, arg...) \ dev_printk(KERN_DEBUG , dev , format , ## arg) -#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG) +#elif defined(CONFIG_DYNAMIC_DEBUG) #define dev_dbg(dev, format, ...) do { \ dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \ } while (0) diff --git a/include/linux/dlm.h b/include/linux/dlm.h index b9cd38603fd..0b3518c4235 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h @@ -81,8 +81,8 @@ struct dlm_lksb { * the cluster, the calling node joins it. */ -int dlm_new_lockspace(char *name, int namelen, dlm_lockspace_t **lockspace, - uint32_t flags, int lvblen); +int dlm_new_lockspace(const char *name, int namelen, + dlm_lockspace_t **lockspace, uint32_t flags, int lvblen); /* * dlm_release_lockspace diff --git a/include/linux/dlm_netlink.h b/include/linux/dlm_netlink.h index 19276332707..647c8ef2722 100644 --- a/include/linux/dlm_netlink.h +++ b/include/linux/dlm_netlink.h @@ -9,6 +9,8 @@ #ifndef _DLM_NETLINK_H #define _DLM_NETLINK_H +#include <linux/types.h> + enum { DLM_STATUS_WAITING = 1, DLM_STATUS_GRANTED = 2, @@ -18,16 +20,16 @@ enum { #define DLM_LOCK_DATA_VERSION 1 struct dlm_lock_data { - uint16_t version; - uint32_t lockspace_id; + __u16 version; + __u32 lockspace_id; int nodeid; int ownpid; - uint32_t id; - uint32_t remid; - uint64_t xid; - int8_t status; - int8_t grmode; - int8_t rqmode; + __u32 id; + __u32 remid; + __u64 xid; + __s8 status; + __s8 grmode; + __s8 rqmode; unsigned long timestamp; int resource_namelen; char resource_name[DLM_RESNAME_MAXLEN]; diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h index 18d5fdbceb7..2dd21243104 100644 --- a/include/linux/dlm_plock.h +++ b/include/linux/dlm_plock.h @@ -9,6 +9,8 @@ #ifndef __DLM_PLOCK_DOT_H__ #define __DLM_PLOCK_DOT_H__ +#include <linux/types.h> + #define DLM_PLOCK_MISC_NAME "dlm_plock" #define DLM_PLOCK_VERSION_MAJOR 1 diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h index 600c5fb2daa..5e8b11d88f6 100644 --- a/include/linux/dm-dirty-log.h +++ b/include/linux/dm-dirty-log.h @@ -28,6 +28,9 @@ struct dm_dirty_log_type { const char *name; struct module *module; + /* For internal device-mapper use */ + struct list_head list; + int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, unsigned argc, char **argv); void (*dtr)(struct dm_dirty_log *log); @@ -113,6 +116,16 @@ struct dm_dirty_log_type { */ int (*status)(struct dm_dirty_log *log, status_type_t status_type, char *result, unsigned maxlen); + + /* + * is_remote_recovering is necessary for cluster mirroring. It provides + * a way to detect recovery on another node, so we aren't writing + * concurrently. This function is likely to block (when a cluster log + * is used). + * + * Returns: 0, 1 + */ + int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); }; int dm_dirty_log_type_register(struct dm_dirty_log_type *type); diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index 28c2940eb30..2ab84c83c31 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -113,20 +113,30 @@ struct dm_ioctl { * return -ENOTTY) fill out this field, even if the * command failed. */ - uint32_t version[3]; /* in/out */ - uint32_t data_size; /* total size of data passed in + __u32 version[3]; /* in/out */ + __u32 data_size; /* total size of data passed in * including this struct */ - uint32_t data_start; /* offset to start of data + __u32 data_start; /* offset to start of data * relative to start of this struct */ - uint32_t target_count; /* in/out */ - int32_t open_count; /* out */ - uint32_t flags; /* in/out */ - uint32_t event_nr; /* in/out */ - uint32_t padding; + __u32 target_count; /* in/out */ + __s32 open_count; /* out */ + __u32 flags; /* in/out */ - uint64_t dev; /* in/out */ + /* + * event_nr holds either the event number (input and output) or the + * udev cookie value (input only). + * The DM_DEV_WAIT ioctl takes an event number as input. + * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls + * use the field as a cookie to return in the DM_COOKIE + * variable with the uevents they issue. + * For output, the ioctls return the event number, not the cookie. + */ + __u32 event_nr; /* in/out */ + __u32 padding; + + __u64 dev; /* in/out */ char name[DM_NAME_LEN]; /* device name */ char uuid[DM_UUID_LEN]; /* unique identifier for @@ -139,9 +149,9 @@ struct dm_ioctl { * dm_ioctl. */ struct dm_target_spec { - uint64_t sector_start; - uint64_t length; - int32_t status; /* used when reading from kernel only */ + __u64 sector_start; + __u64 length; + __s32 status; /* used when reading from kernel only */ /* * Location of the next dm_target_spec. @@ -153,7 +163,7 @@ struct dm_target_spec { * (that follows the dm_ioctl struct) to the start of the "next" * dm_target_spec. */ - uint32_t next; + __u32 next; char target_type[DM_MAX_TYPE_NAME]; @@ -168,17 +178,17 @@ struct dm_target_spec { * Used to retrieve the target dependencies. */ struct dm_target_deps { - uint32_t count; /* Array size */ - uint32_t padding; /* unused */ - uint64_t dev[0]; /* out */ + __u32 count; /* Array size */ + __u32 padding; /* unused */ + __u64 dev[0]; /* out */ }; /* * Used to get a list of all dm devices. */ struct dm_name_list { - uint64_t dev; - uint32_t next; /* offset to the next record from + __u64 dev; + __u32 next; /* offset to the next record from the _start_ of this */ char name[0]; }; @@ -187,8 +197,8 @@ struct dm_name_list { * Used to retrieve the target versions */ struct dm_target_versions { - uint32_t next; - uint32_t version[3]; + __u32 next; + __u32 version[3]; char name[0]; }; @@ -197,7 +207,7 @@ struct dm_target_versions { * Used to pass message to a target */ struct dm_target_msg { - uint64_t sector; /* Device sector */ + __u64 sector; /* Device sector */ char message[0]; }; @@ -256,9 +266,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 14 +#define DM_VERSION_MINOR 15 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2008-04-23)" +#define DM_VERSION_EXTRA "-ioctl (2009-04-01)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h new file mode 100644 index 00000000000..8a1f972c0fe --- /dev/null +++ b/include/linux/dm-log-userspace.h @@ -0,0 +1,397 @@ +/* + * Copyright (C) 2006-2009 Red Hat, Inc. + * + * This file is released under the LGPL. + */ + +#ifndef __DM_LOG_USERSPACE_H__ +#define __DM_LOG_USERSPACE_H__ + +#include <linux/dm-ioctl.h> /* For DM_UUID_LEN */ + +/* + * The device-mapper userspace log module consists of a kernel component and + * a user-space component. The kernel component implements the API defined + * in dm-dirty-log.h. Its purpose is simply to pass the parameters and + * return values of those API functions between kernel and user-space. + * + * Below are defined the 'request_types' - DM_ULOG_CTR, DM_ULOG_DTR, etc. + * These request types represent the different functions in the device-mapper + * dirty log API. Each of these is described in more detail below. + * + * The user-space program must listen for requests from the kernel (representing + * the various API functions) and process them. + * + * User-space begins by setting up the communication link (error checking + * removed for clarity): + * fd = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); + * addr.nl_family = AF_NETLINK; + * addr.nl_groups = CN_IDX_DM; + * addr.nl_pid = 0; + * r = bind(fd, (struct sockaddr *) &addr, sizeof(addr)); + * opt = addr.nl_groups; + * setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &opt, sizeof(opt)); + * + * User-space will then wait to receive requests form the kernel, which it + * will process as described below. The requests are received in the form, + * ((struct dm_ulog_request) + (additional data)). Depending on the request + * type, there may or may not be 'additional data'. In the descriptions below, + * you will see 'Payload-to-userspace' and 'Payload-to-kernel'. The + * 'Payload-to-userspace' is what the kernel sends in 'additional data' as + * necessary parameters to complete the request. The 'Payload-to-kernel' is + * the 'additional data' returned to the kernel that contains the necessary + * results of the request. The 'data_size' field in the dm_ulog_request + * structure denotes the availability and amount of payload data. + */ + +/* + * DM_ULOG_CTR corresponds to (found in dm-dirty-log.h): + * int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, + * unsigned argc, char **argv); + * + * Payload-to-userspace: + * A single string containing all the argv arguments separated by ' 's + * Payload-to-kernel: + * None. ('data_size' in the dm_ulog_request struct should be 0.) + * + * The UUID contained in the dm_ulog_request structure is the reference that + * will be used by all request types to a specific log. The constructor must + * record this assotiation with instance created. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_CTR 1 + +/* + * DM_ULOG_DTR corresponds to (found in dm-dirty-log.h): + * void (*dtr)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * A single string containing all the argv arguments separated by ' 's + * Payload-to-kernel: + * None. ('data_size' in the dm_ulog_request struct should be 0.) + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being destroyed. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_DTR 2 + +/* + * DM_ULOG_PRESUSPEND corresponds to (found in dm-dirty-log.h): + * int (*presuspend)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being presuspended. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_PRESUSPEND 3 + +/* + * DM_ULOG_POSTSUSPEND corresponds to (found in dm-dirty-log.h): + * int (*postsuspend)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being postsuspended. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_POSTSUSPEND 4 + +/* + * DM_ULOG_RESUME corresponds to (found in dm-dirty-log.h): + * int (*resume)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * The UUID contained in the dm_ulog_request structure is all that is + * necessary to identify the log instance being resumed. There is no + * payload data. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_RESUME 5 + +/* + * DM_ULOG_GET_REGION_SIZE corresponds to (found in dm-dirty-log.h): + * uint32_t (*get_region_size)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * uint64_t - contains the region size + * + * The region size is something that was determined at constructor time. + * It is returned in the payload area and 'data_size' is set to + * reflect this. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field appropriately. + */ +#define DM_ULOG_GET_REGION_SIZE 6 + +/* + * DM_ULOG_IS_CLEAN corresponds to (found in dm-dirty-log.h): + * int (*is_clean)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t - the region to get clean status on + * Payload-to-kernel: + * int64_t - 1 if clean, 0 otherwise + * + * Payload is sizeof(uint64_t) and contains the region for which the clean + * status is being made. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - filling the payload with 0 (not clean) or + * 1 (clean), setting 'data_size' and 'error' appropriately. + */ +#define DM_ULOG_IS_CLEAN 7 + +/* + * DM_ULOG_IN_SYNC corresponds to (found in dm-dirty-log.h): + * int (*in_sync)(struct dm_dirty_log *log, region_t region, + * int can_block); + * + * Payload-to-userspace: + * uint64_t - the region to get sync status on + * Payload-to-kernel: + * int64_t - 1 if in-sync, 0 otherwise + * + * Exactly the same as 'is_clean' above, except this time asking "has the + * region been recovered?" vs. "is the region not being modified?" + */ +#define DM_ULOG_IN_SYNC 8 + +/* + * DM_ULOG_FLUSH corresponds to (found in dm-dirty-log.h): + * int (*flush)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * None. + * + * No incoming or outgoing payload. Simply flush log state to disk. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_FLUSH 9 + +/* + * DM_ULOG_MARK_REGION corresponds to (found in dm-dirty-log.h): + * void (*mark_region)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t [] - region(s) to mark + * Payload-to-kernel: + * None. + * + * Incoming payload contains the one or more regions to mark dirty. + * The number of regions contained in the payload can be determined from + * 'data_size/sizeof(uint64_t)'. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_MARK_REGION 10 + +/* + * DM_ULOG_CLEAR_REGION corresponds to (found in dm-dirty-log.h): + * void (*clear_region)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t [] - region(s) to clear + * Payload-to-kernel: + * None. + * + * Incoming payload contains the one or more regions to mark clean. + * The number of regions contained in the payload can be determined from + * 'data_size/sizeof(uint64_t)'. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_CLEAR_REGION 11 + +/* + * DM_ULOG_GET_RESYNC_WORK corresponds to (found in dm-dirty-log.h): + * int (*get_resync_work)(struct dm_dirty_log *log, region_t *region); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * { + * int64_t i; -- 1 if recovery necessary, 0 otherwise + * uint64_t r; -- The region to recover if i=1 + * } + * 'data_size' should be set appropriately. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field appropriately. + */ +#define DM_ULOG_GET_RESYNC_WORK 12 + +/* + * DM_ULOG_SET_REGION_SYNC corresponds to (found in dm-dirty-log.h): + * void (*set_region_sync)(struct dm_dirty_log *log, + * region_t region, int in_sync); + * + * Payload-to-userspace: + * { + * uint64_t - region to set sync state on + * int64_t - 0 if not-in-sync, 1 if in-sync + * } + * Payload-to-kernel: + * None. + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and clearing + * 'data_size' appropriately. + */ +#define DM_ULOG_SET_REGION_SYNC 13 + +/* + * DM_ULOG_GET_SYNC_COUNT corresponds to (found in dm-dirty-log.h): + * region_t (*get_sync_count)(struct dm_dirty_log *log); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * uint64_t - the number of in-sync regions + * + * No incoming payload. Kernel-bound payload contains the number of + * regions that are in-sync (in a size_t). + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_GET_SYNC_COUNT 14 + +/* + * DM_ULOG_STATUS_INFO corresponds to (found in dm-dirty-log.h): + * int (*status)(struct dm_dirty_log *log, STATUSTYPE_INFO, + * char *result, unsigned maxlen); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * Character string containing STATUSTYPE_INFO + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_STATUS_INFO 15 + +/* + * DM_ULOG_STATUS_TABLE corresponds to (found in dm-dirty-log.h): + * int (*status)(struct dm_dirty_log *log, STATUSTYPE_TABLE, + * char *result, unsigned maxlen); + * + * Payload-to-userspace: + * None. + * Payload-to-kernel: + * Character string containing STATUSTYPE_TABLE + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_STATUS_TABLE 16 + +/* + * DM_ULOG_IS_REMOTE_RECOVERING corresponds to (found in dm-dirty-log.h): + * int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); + * + * Payload-to-userspace: + * uint64_t - region to determine recovery status on + * Payload-to-kernel: + * { + * int64_t is_recovering; -- 0 if no, 1 if yes + * uint64_t in_sync_hint; -- lowest region still needing resync + * } + * + * When the request has been processed, user-space must return the + * dm_ulog_request to the kernel - setting the 'error' field and + * 'data_size' appropriately. + */ +#define DM_ULOG_IS_REMOTE_RECOVERING 17 + +/* + * (DM_ULOG_REQUEST_MASK & request_type) to get the request type + * + * Payload-to-userspace: + * A single string containing all the argv arguments separated by ' 's + * Payload-to-kernel: + * None. ('data_size' in the dm_ulog_request struct should be 0.) + * + * We are reserving 8 bits of the 32-bit 'request_type' field for the + * various request types above. The remaining 24-bits are currently + * set to zero and are reserved for future use and compatibility concerns. + * + * User-space should always use DM_ULOG_REQUEST_TYPE to aquire the + * request type from the 'request_type' field to maintain forward compatibility. + */ +#define DM_ULOG_REQUEST_MASK 0xFF +#define DM_ULOG_REQUEST_TYPE(request_type) \ + (DM_ULOG_REQUEST_MASK & (request_type)) + +struct dm_ulog_request { + /* + * The local unique identifier (luid) and the universally unique + * identifier (uuid) are used to tie a request to a specific + * mirror log. A single machine log could probably make due with + * just the 'luid', but a cluster-aware log must use the 'uuid' and + * the 'luid'. The uuid is what is required for node to node + * communication concerning a particular log, but the 'luid' helps + * differentiate between logs that are being swapped and have the + * same 'uuid'. (Think "live" and "inactive" device-mapper tables.) + */ + uint64_t luid; + char uuid[DM_UUID_LEN]; + char padding[7]; /* Padding because DM_UUID_LEN = 129 */ + + int32_t error; /* Used to report back processing errors */ + + uint32_t seq; /* Sequence number for request */ + uint32_t request_type; /* DM_ULOG_* defined above */ + uint32_t data_size; /* How much data (not including this struct) */ + + char data[0]; +}; + +#endif /* __DM_LOG_USERSPACE_H__ */ diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h new file mode 100644 index 00000000000..171ad8aedc8 --- /dev/null +++ b/include/linux/dma-debug.h @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2008 Advanced Micro Devices, Inc. + * + * Author: Joerg Roedel <joerg.roedel@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __DMA_DEBUG_H +#define __DMA_DEBUG_H + +#include <linux/types.h> + +struct device; +struct scatterlist; +struct bus_type; + +#ifdef CONFIG_DMA_API_DEBUG + +extern void dma_debug_add_bus(struct bus_type *bus); + +extern void dma_debug_init(u32 num_entries); + +extern int dma_debug_resize_entries(u32 num_entries); + +extern void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single); + +extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, bool map_single); + +extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction); + +extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir); + +extern void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt); + +extern void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr); + +extern void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction); + +extern void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction); + +extern void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction); + +extern void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, int direction); + +extern void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction); + +extern void debug_dma_dump_mappings(struct device *dev); + +#else /* CONFIG_DMA_API_DEBUG */ + +static inline void dma_debug_add_bus(struct bus_type *bus) +{ +} + +static inline void dma_debug_init(u32 num_entries) +{ +} + +static inline int dma_debug_resize_entries(u32 num_entries) +{ + return 0; +} + +static inline void debug_dma_map_page(struct device *dev, struct page *page, + size_t offset, size_t size, + int direction, dma_addr_t dma_addr, + bool map_single) +{ +} + +static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, + bool map_single) +{ +} + +static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction) +{ +} + +static inline void debug_dma_unmap_sg(struct device *dev, + struct scatterlist *sglist, + int nelems, int dir) +{ +} + +static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt) +{ +} + +static inline void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr) +{ +} + +static inline void debug_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, + size_t size, int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + int direction) +{ +} + +static inline void debug_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, + int nelems, int direction) +{ +} + +static inline void debug_dma_dump_mappings(struct device *dev) +{ +} + +#endif /* CONFIG_DMA_API_DEBUG */ + +#endif /* __DMA_DEBUG_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ba9114ec5d3..c0f6c3cd788 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -3,6 +3,8 @@ #include <linux/device.h> #include <linux/err.h> +#include <linux/dma-attrs.h> +#include <linux/scatterlist.h> /* These definitions mirror those in pci.h, so they can be used * interchangeably with their PCI_ counterparts */ @@ -13,26 +15,74 @@ enum dma_data_direction { DMA_NONE = 3, }; +struct dma_map_ops { + void* (*alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); + void (*free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + dma_addr_t (*map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs); + void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); + void (*unmap_sg)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs); + void (*sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); + void (*sync_single_range_for_cpu)(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + enum dma_data_direction dir); + void (*sync_single_range_for_device)(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, + enum dma_data_direction dir); + void (*sync_sg_for_cpu)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + void (*sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); + int (*dma_supported)(struct device *dev, u64 mask); + int is_phys; +}; + #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) +typedef u64 DMA_nnBIT_MASK __deprecated; + /* * NOTE: do not use the below macros in new code and do not add new definitions * here. * * Instead, just open-code DMA_BIT_MASK(n) within your driver */ -#define DMA_64BIT_MASK DMA_BIT_MASK(64) -#define DMA_48BIT_MASK DMA_BIT_MASK(48) -#define DMA_47BIT_MASK DMA_BIT_MASK(47) -#define DMA_40BIT_MASK DMA_BIT_MASK(40) -#define DMA_39BIT_MASK DMA_BIT_MASK(39) -#define DMA_35BIT_MASK DMA_BIT_MASK(35) -#define DMA_32BIT_MASK DMA_BIT_MASK(32) -#define DMA_31BIT_MASK DMA_BIT_MASK(31) -#define DMA_30BIT_MASK DMA_BIT_MASK(30) -#define DMA_29BIT_MASK DMA_BIT_MASK(29) -#define DMA_28BIT_MASK DMA_BIT_MASK(28) -#define DMA_24BIT_MASK DMA_BIT_MASK(24) +#define DMA_64BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(64) +#define DMA_48BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(48) +#define DMA_47BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(47) +#define DMA_40BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(40) +#define DMA_39BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(39) +#define DMA_35BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(35) +#define DMA_32BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(32) +#define DMA_31BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(31) +#define DMA_30BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(30) +#define DMA_29BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(29) +#define DMA_28BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(28) +#define DMA_24BIT_MASK (DMA_nnBIT_MASK)DMA_BIT_MASK(24) #define DMA_MASK_NONE 0x0ULL @@ -48,26 +98,32 @@ static inline int is_device_dma_capable(struct device *dev) return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; } -static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size) -{ - return addr + size <= mask; -} - #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> #else #include <asm-generic/dma-mapping-broken.h> #endif -/* Backwards compat, remove in 2.7.x */ -#define dma_sync_single dma_sync_single_for_cpu -#define dma_sync_sg dma_sync_sg_for_cpu +/* for backwards compatibility, removed soon */ +static inline void __deprecated dma_sync_single(struct device *dev, + dma_addr_t addr, size_t size, + enum dma_data_direction dir) +{ + dma_sync_single_for_cpu(dev, addr, size, dir); +} + +static inline void __deprecated dma_sync_sg(struct device *dev, + struct scatterlist *sg, int nelems, + enum dma_data_direction dir) +{ + dma_sync_sg_for_cpu(dev, sg, nelems, dir); +} static inline u64 dma_get_mask(struct device *dev) { if (dev && dev->dma_mask && *dev->dma_mask) return *dev->dma_mask; - return DMA_32BIT_MASK; + return DMA_BIT_MASK(32); } extern u64 dma_get_required_mask(struct device *dev); diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index af1dab41674..5619f852273 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h @@ -11,6 +11,11 @@ #define DMA_PTE_READ (1) #define DMA_PTE_WRITE (2) +#define DMA_PTE_SNP (1 << 11) + +#define CONTEXT_TT_MULTI_LEVEL 0 +#define CONTEXT_TT_DEV_IOTLB 1 +#define CONTEXT_TT_PASS_THROUGH 2 struct intel_iommu; struct dmar_domain; @@ -20,11 +25,16 @@ extern void free_dmar_iommu(struct intel_iommu *iommu); #ifdef CONFIG_DMAR extern int iommu_calculate_agaw(struct intel_iommu *iommu); +extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); #else static inline int iommu_calculate_agaw(struct intel_iommu *iommu) { return 0; } +static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) +{ + return 0; +} #endif extern int dmar_disabled; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c73f1e2b59b..ffefba81c81 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -23,9 +23,6 @@ #include <linux/device.h> #include <linux/uio.h> -#include <linux/kref.h> -#include <linux/completion.h> -#include <linux/rcupdate.h> #include <linux/dma-mapping.h> /** @@ -81,12 +78,18 @@ enum dma_transaction_type { * dependency chains * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) + * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single + * (if not set, do the source dma-unmapping as page) + * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single + * (if not set, do the destination dma-unmapping as page) */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), + DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), + DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), }; /** @@ -97,7 +100,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; /** * struct dma_chan_percpu - the per-CPU part of struct dma_chan - * @refcount: local_t used for open-coded "bigref" counting * @memcpy_count: transaction counter * @bytes_transferred: byte counter */ @@ -114,13 +116,11 @@ struct dma_chan_percpu { * @cookie: last cookie value returned to client * @chan_id: channel ID for sysfs * @dev: class device for sysfs - * @refcount: kref, used in "bigref" slow-mode - * @slow_ref: indicates that the DMA channel is free - * @rcu: the DMA channel's RCU head * @device_node: used to add this to the device chan list * @local: per-cpu pointer to a struct dma_chan_percpu * @client-count: how many clients are using this channel * @table_count: number of appearances in the mem-to-mem allocation table + * @private: private data for certain client-channel associations */ struct dma_chan { struct dma_device *device; @@ -134,6 +134,7 @@ struct dma_chan { struct dma_chan_percpu *local; int client_count; int table_count; + void *private; }; /** @@ -207,12 +208,11 @@ struct dma_async_tx_descriptor { /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported + * @privatecnt: how many DMA channels are requested by dma_request_channel * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability - * @refcount: reference count - * @done: IO completion struct * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the @@ -225,11 +225,13 @@ struct dma_async_tx_descriptor { * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation * @device_terminate_all: terminate all pending operations + * @device_is_tx_complete: poll for transaction completion * @device_issue_pending: push pending transactions to hardware */ struct dma_device { unsigned int chancnt; + unsigned int privatecnt; struct list_head channels; struct list_head global_node; dma_cap_mask_t cap_mask; @@ -282,6 +284,36 @@ static inline void dmaengine_put(void) } #endif +#ifdef CONFIG_NET_DMA +#define net_dmaengine_get() dmaengine_get() +#define net_dmaengine_put() dmaengine_put() +#else +static inline void net_dmaengine_get(void) +{ +} +static inline void net_dmaengine_put(void) +{ +} +#endif + +#ifdef CONFIG_ASYNC_TX_DMA +#define async_dmaengine_get() dmaengine_get() +#define async_dmaengine_put() dmaengine_put() +#define async_dma_find_channel(type) dma_find_channel(type) +#else +static inline void async_dmaengine_get(void) +{ +} +static inline void async_dmaengine_put(void) +{ +} +static inline struct dma_chan * +async_dma_find_channel(enum dma_transaction_type type) +{ + return NULL; +} +#endif + dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, @@ -297,6 +329,11 @@ static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) tx->flags |= DMA_CTRL_ACK; } +static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) +{ + tx->flags &= ~DMA_CTRL_ACK; +} + static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) { return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; @@ -323,6 +360,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) set_bit(tx_type, dstp->bits); } +#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) +static inline void +__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) +{ + clear_bit(tx_type, dstp->bits); +} + #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) static inline void __dma_cap_zero(dma_cap_mask_t *dstp) { @@ -400,11 +444,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); #ifdef CONFIG_DMA_ENGINE enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); +void dma_issue_pending_all(void); #else static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { return DMA_SUCCESS; } +static inline void dma_issue_pending_all(void) +{ + do { } while (0); +} #endif /* --- DMA device --- */ @@ -413,7 +462,6 @@ int dma_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); -void dma_issue_pending_all(void); #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); void dma_release_channel(struct dma_chan *chan); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index f28440784cf..4a2b162c256 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -24,16 +24,17 @@ #include <linux/acpi.h> #include <linux/types.h> #include <linux/msi.h> +#include <linux/irqreturn.h> -#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) struct intel_iommu; - +#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) struct dmar_drhd_unit { struct list_head list; /* list of drhd units */ struct acpi_dmar_header *hdr; /* ACPI header */ u64 reg_base_addr; /* register base address*/ struct pci_dev **devices; /* target device array */ int devices_cnt; /* target device count */ + u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; struct intel_iommu *iommu; @@ -44,12 +45,20 @@ extern struct list_head dmar_drhd_units; #define for_each_drhd_unit(drhd) \ list_for_each_entry(drhd, &dmar_drhd_units, list) +#define for_each_active_iommu(i, drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, drhd->ignored) {} else + +#define for_each_iommu(i, drhd) \ + list_for_each_entry(drhd, &dmar_drhd_units, list) \ + if (i=drhd->iommu, 0) {} else + extern int dmar_table_init(void); extern int dmar_dev_scope_init(void); /* Intel IOMMU detection */ extern void detect_intel_iommu(void); - +extern int enable_drhd_fault_handling(void); extern int parse_ioapics_under_ir(void); extern int alloc_iommu(struct dmar_drhd_unit *); @@ -63,12 +72,12 @@ static inline int dmar_table_init(void) { return -ENODEV; } +static inline int enable_drhd_fault_handling(void) +{ + return -1; +} #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ -#ifdef CONFIG_INTR_REMAP -extern int intr_remapping_enabled; -extern int enable_intr_remapping(int); - struct irte { union { struct { @@ -97,6 +106,13 @@ struct irte { __u64 high; }; }; +#ifdef CONFIG_INTR_REMAP +extern int intr_remapping_enabled; +extern int intr_remapping_supported(void); +extern int enable_intr_remapping(int); +extern void disable_intr_remapping(void); +extern int reenable_intr_remapping(int); + extern int get_irte(int irq, struct irte *entry); extern int modify_irte(int irq, struct irte *irte_modified); extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); @@ -110,15 +126,54 @@ extern int free_irte(int irq); extern int irq_remapped(int irq); extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); extern struct intel_iommu *map_ioapic_to_ir(int apic); +extern int set_ioapic_sid(struct irte *irte, int apic); +extern int set_msi_sid(struct irte *irte, struct pci_dev *dev); #else +static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +{ + return -1; +} +static inline int modify_irte(int irq, struct irte *irte_modified) +{ + return -1; +} +static inline int free_irte(int irq) +{ + return -1; +} +static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle) +{ + return -1; +} +static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, + u16 sub_handle) +{ + return -1; +} +static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) +{ + return NULL; +} +static inline struct intel_iommu *map_ioapic_to_ir(int apic) +{ + return NULL; +} +static inline int set_ioapic_sid(struct irte *irte, int apic) +{ + return 0; +} +static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) +{ + return 0; +} + #define irq_remapped(irq) (0) #define enable_intr_remapping(mode) (-1) +#define disable_intr_remapping() (0) +#define reenable_intr_remapping(mode) (0) #define intr_remapping_enabled (0) #endif -#ifdef CONFIG_DMAR -extern const char *dmar_get_fault_reason(u8 fault_reason); - /* Can't use the common MSI interrupt functions * since DMAR is not a pci device */ @@ -127,8 +182,10 @@ extern void dmar_msi_mask(unsigned int irq); extern void dmar_msi_read(int irq, struct msi_msg *msg); extern void dmar_msi_write(int irq, struct msi_msg *msg); extern int dmar_set_interrupt(struct intel_iommu *iommu); +extern irqreturn_t dmar_fault(int irq, void *dev_id); extern int arch_setup_dmar_msi(unsigned int irq); +#ifdef CONFIG_DMAR extern int iommu_detected, no_iommu; extern struct list_head dmar_rmrr_units; struct dmar_rmrr_unit { @@ -142,6 +199,15 @@ struct dmar_rmrr_unit { #define for_each_rmrr_units(rmrr) \ list_for_each_entry(rmrr, &dmar_rmrr_units, list) + +struct dmar_atsr_unit { + struct list_head list; /* list of ATSR units */ + struct acpi_dmar_header *hdr; /* ACPI header */ + struct pci_dev **devices; /* target devices */ + int devices_cnt; /* target device count */ + u8 include_all:1; /* include all ports */ +}; + /* Intel DMAR initialization functions */ extern int intel_iommu_init(void); #else diff --git a/include/linux/dmi.h b/include/linux/dmi.h index 34161907b2f..a8a3e1ac281 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h @@ -38,15 +38,17 @@ struct dmi_device { #ifdef CONFIG_DMI extern int dmi_check_system(const struct dmi_system_id *list); +const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); extern const char * dmi_get_system_info(int field); extern const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from); extern void dmi_scan_machine(void); -extern int dmi_get_year(int field); +extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp); extern int dmi_name_in_vendors(const char *str); extern int dmi_name_in_serial(const char *str); extern int dmi_available; -extern int dmi_walk(void (*decode)(const struct dmi_header *)); +extern int dmi_walk(void (*decode)(const struct dmi_header *, void *), + void *private_data); extern bool dmi_match(enum dmi_field f, const char *str); #else @@ -56,14 +58,25 @@ static inline const char * dmi_get_system_info(int field) { return NULL; } static inline const struct dmi_device * dmi_find_device(int type, const char *name, const struct dmi_device *from) { return NULL; } static inline void dmi_scan_machine(void) { return; } -static inline int dmi_get_year(int year) { return 0; } +static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp) +{ + if (yearp) + *yearp = 0; + if (monthp) + *monthp = 0; + if (dayp) + *dayp = 0; + return false; +} static inline int dmi_name_in_vendors(const char *s) { return 0; } static inline int dmi_name_in_serial(const char *s) { return 0; } #define dmi_available 0 -static inline int dmi_walk(void (*decode)(const struct dmi_header *)) - { return -1; } +static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), + void *private_data) { return -1; } static inline bool dmi_match(enum dmi_field f, const char *str) { return false; } +static inline const struct dmi_system_id * + dmi_first_match(const struct dmi_system_id *list) { return NULL; } #endif diff --git a/include/linux/dn.h b/include/linux/dn.h index 02bba040fcf..fe999082319 100644 --- a/include/linux/dn.h +++ b/include/linux/dn.h @@ -1,6 +1,8 @@ #ifndef _LINUX_DN_H #define _LINUX_DN_H +#include <linux/types.h> + /* DECnet Data Structures and Constants diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h index 102a902b439..ecc06286226 100644 --- a/include/linux/dnotify.h +++ b/include/linux/dnotify.h @@ -10,7 +10,7 @@ struct dnotify_struct { struct dnotify_struct * dn_next; - unsigned long dn_mask; + __u32 dn_mask; int dn_fd; struct file * dn_filp; fl_owner_t dn_owner; @@ -21,23 +21,18 @@ struct dnotify_struct { #ifdef CONFIG_DNOTIFY -extern void __inode_dir_notify(struct inode *, unsigned long); +#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ + FS_MODIFY | FS_MODIFY_CHILD |\ + FS_ACCESS | FS_ACCESS_CHILD |\ + FS_ATTRIB | FS_ATTRIB_CHILD |\ + FS_CREATE | FS_DN_RENAME |\ + FS_MOVED_FROM | FS_MOVED_TO) + extern void dnotify_flush(struct file *, fl_owner_t); extern int fcntl_dirnotify(int, struct file *, unsigned long); -extern void dnotify_parent(struct dentry *, unsigned long); - -static inline void inode_dir_notify(struct inode *inode, unsigned long event) -{ - if (inode->i_dnotify_mask & (event)) - __inode_dir_notify(inode, event); -} #else -static inline void __inode_dir_notify(struct inode *inode, unsigned long event) -{ -} - static inline void dnotify_flush(struct file *filp, fl_owner_t id) { } @@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) return -EINVAL; } -static inline void dnotify_parent(struct dentry *dentry, unsigned long event) -{ -} - -static inline void inode_dir_notify(struct inode *inode, unsigned long event) -{ -} - #endif /* CONFIG_DNOTIFY */ #endif /* __KERNEL __ */ diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h deleted file mode 100644 index d3c65e48a2e..00000000000 --- a/include/linux/ds1wm.h +++ /dev/null @@ -1,12 +0,0 @@ -/* platform data for the DS1WM driver */ - -struct ds1wm_platform_data { - int bus_shift; /* number of shifts needed to calculate the - * offset between DS1WM registers; - * e.g. on h5xxx and h2200 this is 2 - * (registers aligned to 4-byte boundaries), - * while on hx4700 this is 1 */ - int active_high; - void (*enable)(struct platform_device *pdev); - void (*disable)(struct platform_device *pdev); -}; diff --git a/include/linux/dst.h b/include/linux/dst.h new file mode 100644 index 00000000000..e26fed84b1a --- /dev/null +++ b/include/linux/dst.h @@ -0,0 +1,587 @@ +/* + * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DST_H +#define __DST_H + +#include <linux/types.h> +#include <linux/connector.h> + +#define DST_NAMELEN 32 +#define DST_NAME "dst" + +enum { + /* Remove node with given id from storage */ + DST_DEL_NODE = 0, + /* Add remote node with given id to the storage */ + DST_ADD_REMOTE, + /* Add local node with given id to the storage to be exported and used by remote peers */ + DST_ADD_EXPORT, + /* Crypto initialization command (hash/cipher used to protect the connection) */ + DST_CRYPTO, + /* Security attributes for given connection (permissions for example) */ + DST_SECURITY, + /* Register given node in the block layer subsystem */ + DST_START, + DST_CMD_MAX +}; + +struct dst_ctl +{ + /* Storage name */ + char name[DST_NAMELEN]; + /* Command flags */ + __u32 flags; + /* Command itself (see above) */ + __u32 cmd; + /* Maximum number of pages per single request in this device */ + __u32 max_pages; + /* Stale/error transaction scanning timeout in milliseconds */ + __u32 trans_scan_timeout; + /* Maximum number of retry sends before completing transaction as broken */ + __u32 trans_max_retries; + /* Storage size */ + __u64 size; +}; + +/* Reply command carries completion status */ +struct dst_ctl_ack +{ + struct cn_msg msg; + int error; + int unused[3]; +}; + +/* + * Unfortunaltely socket address structure is not exported to userspace + * and is redefined there. + */ +#define SADDR_MAX_DATA 128 + +struct saddr { + /* address family, AF_xxx */ + unsigned short sa_family; + /* 14 bytes of protocol address */ + char sa_data[SADDR_MAX_DATA]; + /* Number of bytes used in sa_data */ + unsigned short sa_data_len; +}; + +/* Address structure */ +struct dst_network_ctl +{ + /* Socket type: datagram, stream...*/ + unsigned int type; + /* Let me guess, is it a Jupiter diameter? */ + unsigned int proto; + /* Peer's address */ + struct saddr addr; +}; + +struct dst_crypto_ctl +{ + /* Cipher and hash names */ + char cipher_algo[DST_NAMELEN]; + char hash_algo[DST_NAMELEN]; + + /* Key sizes. Can be zero for digest for example */ + unsigned int cipher_keysize, hash_keysize; + /* Alignment. Calculated by the DST itself. */ + unsigned int crypto_attached_size; + /* Number of threads to perform crypto operations */ + int thread_num; +}; + +/* Export security attributes have this bits checked in when client connects */ +#define DST_PERM_READ (1<<0) +#define DST_PERM_WRITE (1<<1) + +/* + * Right now it is simple model, where each remote address + * is assigned to set of permissions it is allowed to perform. + * In real world block device does not know anything but + * reading and writing, so it should be more than enough. + */ +struct dst_secure_user +{ + unsigned int permissions; + struct saddr addr; +}; + +/* + * Export control command: device to export and network address to accept + * clients to work with given device + */ +struct dst_export_ctl +{ + char device[DST_NAMELEN]; + struct dst_network_ctl ctl; +}; + +enum { + DST_CFG = 1, /* Request remote configuration */ + DST_IO, /* IO command */ + DST_IO_RESPONSE, /* IO response */ + DST_PING, /* Keepalive message */ + DST_NCMD_MAX, +}; + +struct dst_cmd +{ + /* Network command itself, see above */ + __u32 cmd; + /* + * Size of the attached data + * (in most cases, for READ command it means how many bytes were requested) + */ + __u32 size; + /* Crypto size: number of attached bytes with digest/hmac */ + __u32 csize; + /* Here we can carry secret data */ + __u32 reserved; + /* Read/write bits, see how they are encoded in bio structure */ + __u64 rw; + /* BIO flags */ + __u64 flags; + /* Unique command id (like transaction ID) */ + __u64 id; + /* Sector to start IO from */ + __u64 sector; + /* Hash data is placed after this header */ + __u8 hash[0]; +}; + +/* + * Convert command to/from network byte order. + * We do not use hton*() functions, since there is + * no 64-bit implementation. + */ +static inline void dst_convert_cmd(struct dst_cmd *c) +{ + c->cmd = __cpu_to_be32(c->cmd); + c->csize = __cpu_to_be32(c->csize); + c->size = __cpu_to_be32(c->size); + c->sector = __cpu_to_be64(c->sector); + c->id = __cpu_to_be64(c->id); + c->flags = __cpu_to_be64(c->flags); + c->rw = __cpu_to_be64(c->rw); +} + +/* Transaction id */ +typedef __u64 dst_gen_t; + +#ifdef __KERNEL__ + +#include <linux/blkdev.h> +#include <linux/bio.h> +#include <linux/device.h> +#include <linux/mempool.h> +#include <linux/net.h> +#include <linux/poll.h> +#include <linux/rbtree.h> + +#ifdef CONFIG_DST_DEBUG +#define dprintk(f, a...) printk(KERN_NOTICE f, ##a) +#else +static inline void __attribute__ ((format (printf, 1, 2))) + dprintk(const char *fmt, ...) {} +#endif + +struct dst_node; + +struct dst_trans +{ + /* DST node we are working with */ + struct dst_node *n; + + /* Entry inside transaction tree */ + struct rb_node trans_entry; + + /* Merlin kills this transaction when this memory cell equals zero */ + atomic_t refcnt; + + /* How this transaction should be processed by crypto engine */ + short enc; + /* How many times this transaction was resent */ + short retries; + /* Completion status */ + int error; + + /* When did we send it to the remote peer */ + long send_time; + + /* My name is... + * Well, computers does not speak, they have unique id instead */ + dst_gen_t gen; + + /* Block IO we are working with */ + struct bio *bio; + + /* Network command for above block IO request */ + struct dst_cmd cmd; +}; + +struct dst_crypto_engine +{ + /* What should we do with all block requests */ + struct crypto_hash *hash; + struct crypto_ablkcipher *cipher; + + /* Pool of pages used to encrypt data into before sending */ + int page_num; + struct page **pages; + + /* What to do with current request */ + int enc; + /* Who we are and where do we go */ + struct scatterlist *src, *dst; + + /* Maximum timeout waiting for encryption to be completed */ + long timeout; + /* IV is a 64-bit sequential counter */ + u64 iv; + + /* Secret data */ + void *private; + + /* Cached temporary data lives here */ + int size; + void *data; +}; + +struct dst_state +{ + /* The main state protection */ + struct mutex state_lock; + + /* Polling machinery for sockets */ + wait_queue_t wait; + wait_queue_head_t *whead; + /* Most of events are being waited here */ + wait_queue_head_t thread_wait; + + /* Who owns this? */ + struct dst_node *node; + + /* Network address for this state */ + struct dst_network_ctl ctl; + + /* Permissions to work with: read-only or rw connection */ + u32 permissions; + + /* Called when we need to clean private data */ + void (* cleanup)(struct dst_state *st); + + /* Used by the server: BIO completion queues BIOs here */ + struct list_head request_list; + spinlock_t request_lock; + + /* Guess what? No, it is not number of planets */ + atomic_t refcnt; + + /* This flags is set when connection should be dropped */ + int need_exit; + + /* + * Socket to work with. Second pointer is used for + * lockless check if socket was changed before performing + * next action (like working with cached polling result) + */ + struct socket *socket, *read_socket; + + /* Cached preallocated data */ + void *data; + unsigned int size; + + /* Currently processed command */ + struct dst_cmd cmd; +}; + +struct dst_info +{ + /* Device size */ + u64 size; + + /* Local device name for export devices */ + char local[DST_NAMELEN]; + + /* Network setup */ + struct dst_network_ctl net; + + /* Sysfs bits use this */ + struct device device; +}; + +struct dst_node +{ + struct list_head node_entry; + + /* Hi, my name is stored here */ + char name[DST_NAMELEN]; + /* My cache name is stored here */ + char cache_name[DST_NAMELEN]; + + /* Block device attached to given node. + * Only valid for exporting nodes */ + struct block_device *bdev; + /* Network state machine for given peer */ + struct dst_state *state; + + /* Block IO machinery */ + struct request_queue *queue; + struct gendisk *disk; + + /* Number of threads in processing pool */ + int thread_num; + /* Maximum number of pages in single IO */ + int max_pages; + + /* I'm that big in bytes */ + loff_t size; + + /* Exported to userspace node information */ + struct dst_info *info; + + /* + * Security attribute list. + * Used only by exporting node currently. + */ + struct list_head security_list; + struct mutex security_lock; + + /* + * When this unerflows below zero, university collapses. + * But this will not happen, since node will be freed, + * when reference counter reaches zero. + */ + atomic_t refcnt; + + /* How precisely should I be started? */ + int (*start)(struct dst_node *); + + /* Crypto capabilities */ + struct dst_crypto_ctl crypto; + u8 *hash_key; + u8 *cipher_key; + + /* Pool of processing thread */ + struct thread_pool *pool; + + /* Transaction IDs live here */ + atomic_long_t gen; + + /* + * How frequently and how many times transaction + * tree should be scanned to drop stale objects. + */ + long trans_scan_timeout; + int trans_max_retries; + + /* Small gnomes live here */ + struct rb_root trans_root; + struct mutex trans_lock; + + /* + * Transaction cache/memory pool. + * It is big enough to contain not only transaction + * itself, but additional crypto data (digest/hmac). + */ + struct kmem_cache *trans_cache; + mempool_t *trans_pool; + + /* This entity scans transaction tree */ + struct delayed_work trans_work; + + wait_queue_head_t wait; +}; + +/* Kernel representation of the security attribute */ +struct dst_secure +{ + struct list_head sec_entry; + struct dst_secure_user sec; +}; + +int dst_process_bio(struct dst_node *n, struct bio *bio); + +int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r); +int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le); + +static inline struct dst_state *dst_state_get(struct dst_state *st) +{ + BUG_ON(atomic_read(&st->refcnt) == 0); + atomic_inc(&st->refcnt); + return st; +} + +void dst_state_put(struct dst_state *st); + +struct dst_state *dst_state_alloc(struct dst_node *n); +int dst_state_socket_create(struct dst_state *st); +void dst_state_socket_release(struct dst_state *st); + +void dst_state_exit_connected(struct dst_state *st); + +int dst_state_schedule_receiver(struct dst_state *st); + +void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str); + +static inline void dst_state_lock(struct dst_state *st) +{ + mutex_lock(&st->state_lock); +} + +static inline void dst_state_unlock(struct dst_state *st) +{ + mutex_unlock(&st->state_lock); +} + +void dst_poll_exit(struct dst_state *st); +int dst_poll_init(struct dst_state *st); + +static inline unsigned int dst_state_poll(struct dst_state *st) +{ + unsigned int revents = POLLHUP | POLLERR; + + dst_state_lock(st); + if (st->socket) + revents = st->socket->ops->poll(NULL, st->socket, NULL); + dst_state_unlock(st); + + return revents; +} + +static inline int dst_thread_setup(void *private, void *data) +{ + return 0; +} + +void dst_node_put(struct dst_node *n); + +static inline struct dst_node *dst_node_get(struct dst_node *n) +{ + atomic_inc(&n->refcnt); + return n; +} + +int dst_data_recv(struct dst_state *st, void *data, unsigned int size); +int dst_recv_cdata(struct dst_state *st, void *cdata); +int dst_data_send_header(struct socket *sock, + void *data, unsigned int size, int more); + +int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio); + +int dst_process_io(struct dst_state *st); +int dst_export_crypto(struct dst_node *n, struct bio *bio); +int dst_export_send_bio(struct bio *bio); +int dst_start_export(struct dst_node *n); + +int __init dst_export_init(void); +void dst_export_exit(void); + +/* Private structure for export block IO requests */ +struct dst_export_priv +{ + struct list_head request_entry; + struct dst_state *state; + struct bio *bio; + struct dst_cmd cmd; +}; + +static inline void dst_trans_get(struct dst_trans *t) +{ + atomic_inc(&t->refcnt); +} + +struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen); +int dst_trans_remove(struct dst_trans *t); +int dst_trans_remove_nolock(struct dst_trans *t); +void dst_trans_put(struct dst_trans *t); + +/* + * Convert bio into network command. + */ +static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd, + u32 command, u64 id) +{ + cmd->cmd = command; + cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS; + cmd->rw = bio->bi_rw; + cmd->size = bio->bi_size; + cmd->csize = 0; + cmd->id = id; + cmd->sector = bio->bi_sector; +}; + +int dst_trans_send(struct dst_trans *t); +int dst_trans_crypto(struct dst_trans *t); + +int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl); +void dst_node_crypto_exit(struct dst_node *n); + +static inline int dst_need_crypto(struct dst_node *n) +{ + struct dst_crypto_ctl *c = &n->crypto; + /* + * Logical OR is appropriate here, but boolean one produces + * more optimal code, so it is used instead. + */ + return (c->hash_algo[0] | c->cipher_algo[0]); +} + +int dst_node_trans_init(struct dst_node *n, unsigned int size); +void dst_node_trans_exit(struct dst_node *n); + +/* + * Pool of threads. + * Ready list contains threads currently free to be used, + * active one contains threads with some work scheduled for them. + * Caller can wait in given queue when thread is ready. + */ +struct thread_pool +{ + int thread_num; + struct mutex thread_lock; + struct list_head ready_list, active_list; + + wait_queue_head_t wait; +}; + +void thread_pool_del_worker(struct thread_pool *p); +void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id); +int thread_pool_add_worker(struct thread_pool *p, + char *name, + unsigned int id, + void *(* init)(void *data), + void (* cleanup)(void *data), + void *data); + +void thread_pool_destroy(struct thread_pool *p); +struct thread_pool *thread_pool_create(int num, char *name, + void *(* init)(void *data), + void (* cleanup)(void *data), + void *data); + +int thread_pool_schedule(struct thread_pool *p, + int (* setup)(void *stored_private, void *setup_data), + int (* action)(void *stored_private, void *setup_data), + void *setup_data, long timeout); +int thread_pool_schedule_private(struct thread_pool *p, + int (* setup)(void *private, void *data), + int (* action)(void *private, void *data), + void *data, long timeout, void *id); + +#endif /* __KERNEL__ */ +#endif /* __DST_H */ diff --git a/include/linux/dvb/audio.h b/include/linux/dvb/audio.h index 89412e18f57..fec66bd24f2 100644 --- a/include/linux/dvb/audio.h +++ b/include/linux/dvb/audio.h @@ -24,12 +24,7 @@ #ifndef _DVBAUDIO_H_ #define _DVBAUDIO_H_ -#ifdef __KERNEL__ #include <linux/types.h> -#else -#include <stdint.h> -#endif - typedef enum { AUDIO_SOURCE_DEMUX, /* Select the demux as the main source */ @@ -81,7 +76,7 @@ struct audio_karaoke{ /* if Vocal1 or Vocal2 are non-zero, they get mixed */ } audio_karaoke_t; /* into left and right */ -typedef uint16_t audio_attributes_t; +typedef __u16 audio_attributes_t; /* bits: descr. */ /* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */ /* 12 multichannel extension */ diff --git a/include/linux/dvb/dmx.h b/include/linux/dvb/dmx.h index 402fb7a8d92..fef943738a2 100644 --- a/include/linux/dvb/dmx.h +++ b/include/linux/dvb/dmx.h @@ -24,7 +24,7 @@ #ifndef _DVBDMX_H_ #define _DVBDMX_H_ -#include <asm/types.h> +#include <linux/types.h> #ifdef __KERNEL__ #include <linux/time.h> #else diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h index 55026b1a40b..51c8d2d49e4 100644 --- a/include/linux/dvb/frontend.h +++ b/include/linux/dvb/frontend.h @@ -26,8 +26,7 @@ #ifndef _DVBFRONTEND_H_ #define _DVBFRONTEND_H_ -#include <asm/types.h> - +#include <linux/types.h> typedef enum fe_type { FE_QPSK, diff --git a/include/linux/dvb/net.h b/include/linux/dvb/net.h index 5be474bf0d2..f451e7eb0b0 100644 --- a/include/linux/dvb/net.h +++ b/include/linux/dvb/net.h @@ -24,8 +24,7 @@ #ifndef _DVBNET_H_ #define _DVBNET_H_ -#include <asm/types.h> - +#include <linux/types.h> struct dvb_net_if { __u16 pid; diff --git a/include/linux/dvb/video.h b/include/linux/dvb/video.h index 50839fe9e39..1d750c0fd86 100644 --- a/include/linux/dvb/video.h +++ b/include/linux/dvb/video.h @@ -24,17 +24,14 @@ #ifndef _DVBVIDEO_H_ #define _DVBVIDEO_H_ -#include <linux/compiler.h> - -#ifdef __KERNEL__ #include <linux/types.h> +#ifdef __KERNEL__ +#include <linux/compiler.h> #else -#include <asm/types.h> #include <stdint.h> #include <time.h> #endif - typedef enum { VIDEO_FORMAT_4_3, /* Select 4:3 format */ VIDEO_FORMAT_16_9, /* Select 16:9 format. */ @@ -135,12 +132,12 @@ struct video_command { #define VIDEO_VSYNC_FIELD_PROGRESSIVE (3) struct video_event { - int32_t type; + __s32 type; #define VIDEO_EVENT_SIZE_CHANGED 1 #define VIDEO_EVENT_FRAME_RATE_CHANGED 2 #define VIDEO_EVENT_DECODER_STOPPED 3 #define VIDEO_EVENT_VSYNC 4 - time_t timestamp; + __kernel_time_t timestamp; union { video_size_t size; unsigned int frame_rate; /* in frames per 1000sec */ @@ -160,25 +157,25 @@ struct video_status { struct video_still_picture { char __user *iFrame; /* pointer to a single iframe in memory */ - int32_t size; + __s32 size; }; typedef struct video_highlight { int active; /* 1=show highlight, 0=hide highlight */ - uint8_t contrast1; /* 7- 4 Pattern pixel contrast */ + __u8 contrast1; /* 7- 4 Pattern pixel contrast */ /* 3- 0 Background pixel contrast */ - uint8_t contrast2; /* 7- 4 Emphasis pixel-2 contrast */ + __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */ /* 3- 0 Emphasis pixel-1 contrast */ - uint8_t color1; /* 7- 4 Pattern pixel color */ + __u8 color1; /* 7- 4 Pattern pixel color */ /* 3- 0 Background pixel color */ - uint8_t color2; /* 7- 4 Emphasis pixel-2 color */ + __u8 color2; /* 7- 4 Emphasis pixel-2 color */ /* 3- 0 Emphasis pixel-1 color */ - uint32_t ypos; /* 23-22 auto action mode */ + __u32 ypos; /* 23-22 auto action mode */ /* 21-12 start y */ /* 9- 0 end y */ - uint32_t xpos; /* 23-22 button color number */ + __u32 xpos; /* 23-22 button color number */ /* 21-12 start x */ /* 9- 0 end x */ } video_highlight_t; @@ -192,17 +189,17 @@ typedef struct video_spu { typedef struct video_spu_palette { /* SPU Palette information */ int length; - uint8_t __user *palette; + __u8 __user *palette; } video_spu_palette_t; typedef struct video_navi_pack { int length; /* 0 ... 1024 */ - uint8_t data[1024]; + __u8 data[1024]; } video_navi_pack_t; -typedef uint16_t video_attributes_t; +typedef __u16 video_attributes_t; /* bits: descr. */ /* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */ /* 13-12 TV system (0=525/60, 1=625/50) */ diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index d797dde247f..c8aad713a04 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h @@ -74,4 +74,23 @@ struct dw_dma_slave { #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ +/* DMA API extensions */ +struct dw_cyclic_desc { + struct dw_desc **desc; + unsigned long periods; + void (*period_callback)(void *param); + void *period_callback_param; +}; + +struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, + dma_addr_t buf_addr, size_t buf_len, size_t period_len, + enum dma_data_direction direction); +void dw_dma_cyclic_free(struct dma_chan *chan); +int dw_dma_cyclic_start(struct dma_chan *chan); +void dw_dma_cyclic_stop(struct dma_chan *chan); + +dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); + +dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); + #endif /* DW_DMAC_H */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h new file mode 100644 index 00000000000..a0d9422a156 --- /dev/null +++ b/include/linux/dynamic_debug.h @@ -0,0 +1,88 @@ +#ifndef _DYNAMIC_DEBUG_H +#define _DYNAMIC_DEBUG_H + +/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which + * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They + * use independent hash functions, to reduce the chance of false positives. + */ +extern long long dynamic_debug_enabled; +extern long long dynamic_debug_enabled2; + +/* + * An instance of this structure is created in a special + * ELF section at every dynamic debug callsite. At runtime, + * the special section is treated as an array of these. + */ +struct _ddebug { + /* + * These fields are used to drive the user interface + * for selecting and displaying debug callsites. + */ + const char *modname; + const char *function; + const char *filename; + const char *format; + char primary_hash; + char secondary_hash; + unsigned int lineno:24; + /* + * The flags field controls the behaviour at the callsite. + * The bits here are changed dynamically when the user + * writes commands to <debugfs>/dynamic_debug/ddebug + */ +#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ +#define _DPRINTK_FLAGS_DEFAULT 0 + unsigned int flags:8; +} __attribute__((aligned(8))); + + +int ddebug_add_module(struct _ddebug *tab, unsigned int n, + const char *modname); + +#if defined(CONFIG_DYNAMIC_DEBUG) +extern int ddebug_remove_module(char *mod_name); + +#define __dynamic_dbg_enabled(dd) ({ \ + int __ret = 0; \ + if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \ + (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \ + if (unlikely(dd.flags)) \ + __ret = 1; \ + __ret; }) + +#define dynamic_pr_debug(fmt, ...) do { \ + static struct _ddebug descriptor \ + __used \ + __attribute__((section("__verbose"), aligned(8))) = \ + { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ + DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ + if (__dynamic_dbg_enabled(descriptor)) \ + printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \ + ##__VA_ARGS__); \ + } while (0) + + +#define dynamic_dev_dbg(dev, fmt, ...) do { \ + static struct _ddebug descriptor \ + __used \ + __attribute__((section("__verbose"), aligned(8))) = \ + { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ + DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ + if (__dynamic_dbg_enabled(descriptor)) \ + dev_printk(KERN_DEBUG, dev, \ + KBUILD_MODNAME ": " fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#else + +static inline int ddebug_remove_module(char *mod) +{ + return 0; +} + +#define dynamic_pr_debug(fmt, ...) do { } while (0) +#define dynamic_dev_dbg(dev, format, ...) do { } while (0) +#endif + +#endif diff --git a/include/linux/dynamic_printk.h b/include/linux/dynamic_printk.h deleted file mode 100644 index 2d528d00907..00000000000 --- a/include/linux/dynamic_printk.h +++ /dev/null @@ -1,93 +0,0 @@ -#ifndef _DYNAMIC_PRINTK_H -#define _DYNAMIC_PRINTK_H - -#define DYNAMIC_DEBUG_HASH_BITS 6 -#define DEBUG_HASH_TABLE_SIZE (1 << DYNAMIC_DEBUG_HASH_BITS) - -#define TYPE_BOOLEAN 1 - -#define DYNAMIC_ENABLED_ALL 0 -#define DYNAMIC_ENABLED_NONE 1 -#define DYNAMIC_ENABLED_SOME 2 - -extern int dynamic_enabled; - -/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which - * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They - * use independent hash functions, to reduce the chance of false positives. - */ -extern long long dynamic_printk_enabled; -extern long long dynamic_printk_enabled2; - -struct mod_debug { - char *modname; - char *logical_modname; - char *flag_names; - int type; - int hash; - int hash2; -} __attribute__((aligned(8))); - -int register_dynamic_debug_module(char *mod_name, int type, char *share_name, - char *flags, int hash, int hash2); - -#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG) -extern int unregister_dynamic_debug_module(char *mod_name); -extern int __dynamic_dbg_enabled_helper(char *modname, int type, - int value, int hash); - -#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ \ - int __ret = 0; \ - if (unlikely((dynamic_printk_enabled & (1LL << DEBUG_HASH)) && \ - (dynamic_printk_enabled2 & (1LL << DEBUG_HASH2)))) \ - __ret = __dynamic_dbg_enabled_helper(module, type, \ - value, hash);\ - __ret; }) - -#define dynamic_pr_debug(fmt, ...) do { \ - static char mod_name[] \ - __attribute__((section("__verbose_strings"))) \ - = KBUILD_MODNAME; \ - static struct mod_debug descriptor \ - __used \ - __attribute__((section("__verbose"), aligned(8))) = \ - { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\ - if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \ - 0, 0, DEBUG_HASH)) \ - printk(KERN_DEBUG KBUILD_MODNAME ":" fmt, \ - ##__VA_ARGS__); \ - } while (0) - -#define dynamic_dev_dbg(dev, format, ...) do { \ - static char mod_name[] \ - __attribute__((section("__verbose_strings"))) \ - = KBUILD_MODNAME; \ - static struct mod_debug descriptor \ - __used \ - __attribute__((section("__verbose"), aligned(8))) = \ - { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\ - if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \ - 0, 0, DEBUG_HASH)) \ - dev_printk(KERN_DEBUG, dev, \ - KBUILD_MODNAME ": " format, \ - ##__VA_ARGS__); \ - } while (0) - -#else - -static inline int unregister_dynamic_debug_module(const char *mod_name) -{ - return 0; -} -static inline int __dynamic_dbg_enabled_helper(char *modname, int type, - int value, int hash) -{ - return 0; -} - -#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ 0; }) -#define dynamic_pr_debug(fmt, ...) do { } while (0) -#define dynamic_dev_dbg(dev, format, ...) do { } while (0) -#endif - -#endif diff --git a/include/linux/edd.h b/include/linux/edd.h index 5d747c5cd0f..4cbd0fe9df0 100644 --- a/include/linux/edd.h +++ b/include/linux/edd.h @@ -30,6 +30,8 @@ #ifndef _LINUX_EDD_H #define _LINUX_EDD_H +#include <linux/types.h> + #define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF in boot_params - treat this as 1 byte */ #define EDDBUF 0xd00 /* addr of edd_info structs in boot_params */ diff --git a/include/linux/efi.h b/include/linux/efi.h index bb66feb164b..ce4581fbc08 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -101,7 +101,7 @@ typedef struct { u64 attribute; } efi_memory_desc_t; -typedef int (*efi_freemem_callback_t) (unsigned long start, unsigned long end, void *arg); +typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg); /* * Types and defines for Time Services diff --git a/include/linux/efs_fs_sb.h b/include/linux/efs_fs_sb.h index ff1945e3779..a01be90c58c 100644 --- a/include/linux/efs_fs_sb.h +++ b/include/linux/efs_fs_sb.h @@ -9,6 +9,7 @@ #ifndef __EFS_FS_SB_H__ #define __EFS_FS_SB_H__ +#include <linux/types.h> #include <linux/magic.h> /* EFS superblock magic numbers */ diff --git a/include/linux/eisa.h b/include/linux/eisa.h index e61c0be2a45..6925249a5ac 100644 --- a/include/linux/eisa.h +++ b/include/linux/eisa.h @@ -78,12 +78,12 @@ static inline void eisa_driver_unregister (struct eisa_driver *edrv) { } /* Mimics pci.h... */ static inline void *eisa_get_drvdata (struct eisa_device *edev) { - return edev->dev.driver_data; + return dev_get_drvdata(&edev->dev); } static inline void eisa_set_drvdata (struct eisa_device *edev, void *data) { - edev->dev.driver_data = data; + dev_set_drvdata(&edev->dev, data); } /* The EISA root device. There's rumours about machines with multiple diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 7a204256b15..1cb3372e65d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *); extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); extern void elv_merged_request(struct request_queue *, struct request *, int); -extern void elv_dequeue_request(struct request_queue *, struct request *); extern void elv_requeue_request(struct request_queue *, struct request *); extern int elv_queue_empty(struct request_queue *); -extern struct request *elv_next_request(struct request_queue *q); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); extern int elv_register_queue(struct request_queue *q); @@ -116,6 +114,7 @@ extern void elv_abort_queue(struct request_queue *); extern void elv_completed_request(struct request_queue *, struct request *); extern int elv_set_request(struct request_queue *, struct request *, gfp_t); extern void elv_put_request(struct request_queue *, struct request *); +extern void elv_drain_elevator(struct request_queue *); /* * io scheduler registration @@ -170,7 +169,7 @@ enum { ELV_MQUEUE_MUST, }; -#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) +#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) /* diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h index 9f5b7456bff..7cd2e80cebc 100644 --- a/include/linux/elf-fdpic.h +++ b/include/linux/elf-fdpic.h @@ -58,11 +58,13 @@ struct elf_fdpic_params { #define ELF_FDPIC_FLAG_PRESENT 0x80000000 /* T if this object is present */ }; +#ifdef __KERNEL__ #ifdef CONFIG_MMU extern void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, struct elf_fdpic_params *interp_params, unsigned long *start_stack, unsigned long *start_brk); #endif +#endif /* __KERNEL__ */ #endif /* _LINUX_ELF_FDPIC_H */ diff --git a/include/linux/elf.h b/include/linux/elf.h index 0b61ca41a04..45a937be6d3 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -377,6 +377,7 @@ typedef struct elf64_note { Elf64_Word n_type; /* Content type */ } Elf64_Nhdr; +#ifdef __KERNEL__ #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; @@ -404,5 +405,5 @@ static inline int elf_coredump_extra_notes_write(struct file *file, extern int elf_coredump_extra_notes_size(void); extern int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset); #endif - +#endif /* __KERNEL__ */ #endif /* _LINUX_ELF_H */ diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 5ca54d77079..00d6a68d042 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -111,11 +111,21 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re #endif } +static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs) +{ +#ifdef ELF_CORE_COPY_KERNEL_REGS + ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs); +#else + elf_core_copy_regs(elfregs, regs); +#endif +} + static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) { -#ifdef ELF_CORE_COPY_TASK_REGS - +#if defined (ELF_CORE_COPY_TASK_REGS) return ELF_CORE_COPY_TASK_REGS(t, elfregs); +#elif defined (task_pt_regs) + elf_core_copy_regs(elfregs, task_pt_regs(t)); #endif return 0; } diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h index 92f8d4fab32..ec12cc74366 100644 --- a/include/linux/errqueue.h +++ b/include/linux/errqueue.h @@ -1,6 +1,8 @@ #ifndef _LINUX_ERRQUEUE_H #define _LINUX_ERRQUEUE_H 1 +#include <linux/types.h> + struct sock_extended_err { __u32 ee_errno; @@ -16,6 +18,7 @@ struct sock_extended_err #define SO_EE_ORIGIN_LOCAL 1 #define SO_EE_ORIGIN_ICMP 2 #define SO_EE_ORIGIN_ICMP6 3 +#define SO_EE_ORIGIN_TIMESTAMPING 4 #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 1cb0f0b9092..3d7a6687d24 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -182,6 +182,54 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], return compare_ether_addr(addr1, addr2); #endif } + +/** + * is_etherdev_addr - Tell if given Ethernet address belongs to the device. + * @dev: Pointer to a device structure + * @addr: Pointer to a six-byte array containing the Ethernet address + * + * Compare passed address with all addresses of the device. Return true if the + * address if one of the device addresses. + * + * Note that this function calls compare_ether_addr_64bits() so take care of + * the right padding. + */ +static inline bool is_etherdev_addr(const struct net_device *dev, + const u8 addr[6 + 2]) +{ + struct netdev_hw_addr *ha; + int res = 1; + + rcu_read_lock(); + for_each_dev_addr(dev, ha) { + res = compare_ether_addr_64bits(addr, ha->addr); + if (!res) + break; + } + rcu_read_unlock(); + return !res; +} #endif /* __KERNEL__ */ +/** + * compare_ether_header - Compare two Ethernet headers + * @a: Pointer to Ethernet header + * @b: Pointer to Ethernet header + * + * Compare two ethernet headers, returns 0 if equal. + * This assumes that the network header (i.e., IP header) is 4-byte + * aligned OR the platform can handle unaligned access. This is the + * case for all packets coming into netif_receive_skb or similar + * entry points. + */ + +static inline int compare_ether_header(const void *a, const void *b) +{ + u32 *a32 = (u32 *)((u8 *)a + 2); + u32 *b32 = (u32 *)((u8 *)b + 2); + + return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | + (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); +} + #endif /* _LINUX_ETHERDEVICE_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 27c67a54223..15e4eb71369 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -7,6 +7,7 @@ * Portions Copyright 2002 Intel (eli.kupermann@intel.com, * christopher.leech@intel.com, * scott.feldman@intel.com) + * Portions Copyright (C) Sun Microsystems 2008 */ #ifndef _LINUX_ETHTOOL_H @@ -25,11 +26,14 @@ struct ethtool_cmd { __u8 phy_address; __u8 transceiver; /* Which transceiver to use */ __u8 autoneg; /* Enable or disable autonegotiation */ + __u8 mdio_support; __u32 maxtxpkt; /* Tx pkts before generating tx int */ __u32 maxrxpkt; /* Rx pkts before generating rx int */ __u16 speed_hi; - __u16 reserved2; - __u32 reserved[3]; + __u8 eth_tp_mdix; + __u8 reserved2; + __u32 lp_advertising; /* Features the link partner advertises */ + __u32 reserved[2]; }; static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, @@ -287,10 +291,87 @@ enum ethtool_flags { ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ }; -struct ethtool_rxnfc { - __u32 cmd; +/* The following structures are for supporting RX network flow + * classification configuration. Note, all multibyte fields, e.g., + * ip4src, ip4dst, psrc, pdst, spi, etc. are expected to be in network + * byte order. + */ +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +struct ethtool_rawip4_spec { + __be32 ip4src; + __be32 ip4dst; + __u8 hdata[64]; +}; + +struct ethtool_ether_spec { + __be16 ether_type; + __u8 frame_size; + __u8 eframe[16]; +}; + +#define ETH_RX_NFC_IP4 1 +#define ETH_RX_NFC_IP6 2 + +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +struct ethtool_rx_flow_spec { __u32 flow_type; - __u64 data; + union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_rawip4_spec raw_ip4_spec; + struct ethtool_ether_spec ether_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + __u8 hdata[64]; + } h_u, m_u; /* entry, mask */ + __u64 ring_cookie; + __u32 location; +}; + +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + /* The rx flow hash value or the rule DB size */ + __u64 data; + struct ethtool_rx_flow_spec fs; + __u32 rule_cnt; + __u32 rule_locs[0]; +}; + +#define ETHTOOL_FLASH_MAX_FILENAME 128 +enum ethtool_flash_op_type { + ETHTOOL_FLASH_ALL_REGIONS = 0, +}; + +/* for passing firmware flashing related parameters */ +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[ETHTOOL_FLASH_MAX_FILENAME]; }; #ifdef __KERNEL__ @@ -299,6 +380,7 @@ struct net_device; /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); +u32 ethtool_op_get_rx_csum(struct net_device *dev); u32 ethtool_op_get_tx_csum(struct net_device *dev); int ethtool_op_set_tx_csum(struct net_device *dev, u32 data); int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data); @@ -417,8 +499,9 @@ struct ethtool_ops { /* the following hooks are obsolete */ int (*self_test_count)(struct net_device *);/* use get_sset_count */ int (*get_stats_count)(struct net_device *);/* use get_sset_count */ - int (*get_rxhash)(struct net_device *, struct ethtool_rxnfc *); - int (*set_rxhash)(struct net_device *, struct ethtool_rxnfc *); + int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, void *); + int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); + int (*flash_device)(struct net_device *, struct ethtool_flash *); }; #endif /* __KERNEL__ */ @@ -469,6 +552,13 @@ struct ethtool_ops { #define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ #define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */ #define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */ +#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */ +#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */ +#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */ +#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */ +#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */ +#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */ +#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET @@ -491,6 +581,11 @@ struct ethtool_ops { #define SUPPORTED_Pause (1 << 13) #define SUPPORTED_Asym_Pause (1 << 14) #define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_10000baseR_FEC (1 << 20) /* Indicates what features are advertised by the interface. */ #define ADVERTISED_10baseT_Half (1 << 0) @@ -509,6 +604,11 @@ struct ethtool_ops { #define ADVERTISED_Pause (1 << 13) #define ADVERTISED_Asym_Pause (1 << 14) #define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_Backplane (1 << 16) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseR_FEC (1 << 20) /* The following are all involved in forcing a particular link * mode for the device for setting things. When getting the @@ -533,6 +633,7 @@ struct ethtool_ops { #define PORT_MII 0x02 #define PORT_FIBRE 0x03 #define PORT_BNC 0x04 +#define PORT_OTHER 0xff /* Which transceiver to use. */ #define XCVR_INTERNAL 0x00 @@ -547,6 +648,11 @@ struct ethtool_ops { #define AUTONEG_DISABLE 0x00 #define AUTONEG_ENABLE 0x01 +/* Mode MDI or MDI-X */ +#define ETH_TP_MDI_INVALID 0x00 +#define ETH_TP_MDI 0x01 +#define ETH_TP_MDI_X 0x02 + /* Wake-On-Lan options. */ #define WAKE_PHY (1 << 0) #define WAKE_UCAST (1 << 1) @@ -565,9 +671,13 @@ struct ethtool_ops { #define UDP_V6_FLOW 0x06 #define SCTP_V6_FLOW 0x07 #define AH_ESP_V6_FLOW 0x08 +#define AH_V4_FLOW 0x09 +#define ESP_V4_FLOW 0x0a +#define AH_V6_FLOW 0x0b +#define ESP_V6_FLOW 0x0c +#define IP_USER_FLOW 0x0d /* L3-L4 network traffic flow hash options */ -#define RXH_DEV_PORT (1 << 0) #define RXH_L2DA (1 << 1) #define RXH_VLAN (1 << 2) #define RXH_L3_PROTO (1 << 3) @@ -577,5 +687,6 @@ struct ethtool_ops { #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ #define RXH_DISCARD (1 << 31) +#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index a667637b54e..3b85ba6479f 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -8,25 +8,54 @@ #ifndef _LINUX_EVENTFD_H #define _LINUX_EVENTFD_H -#ifdef CONFIG_EVENTFD - -/* For O_CLOEXEC and O_NONBLOCK */ #include <linux/fcntl.h> +#include <linux/file.h> -/* Flags for eventfd2. */ +/* + * CAREFUL: Check include/asm-generic/fcntl.h when defining + * new flags, since they might collide with O_* ones. We want + * to re-use O_* flags that couldn't possibly have a meaning + * from eventfd, in order to leave a free define-space for + * shared O_* flags. + */ +#define EFD_SEMAPHORE (1 << 0) #define EFD_CLOEXEC O_CLOEXEC #define EFD_NONBLOCK O_NONBLOCK +#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) +#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) + +#ifdef CONFIG_EVENTFD + +struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); +void eventfd_ctx_put(struct eventfd_ctx *ctx); struct file *eventfd_fget(int fd); -int eventfd_signal(struct file *file, int n); +struct eventfd_ctx *eventfd_ctx_fdget(int fd); +struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); +int eventfd_signal(struct eventfd_ctx *ctx, int n); #else /* CONFIG_EVENTFD */ -#define eventfd_fget(fd) ERR_PTR(-ENOSYS) -static inline int eventfd_signal(struct file *file, int n) -{ return 0; } +/* + * Ugly ugly ugly error layer to support modules that uses eventfd but + * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. + */ +static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) +{ + return ERR_PTR(-ENOSYS); +} + +static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) +{ + return -ENOSYS; +} + +static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) +{ + +} -#endif /* CONFIG_EVENTFD */ +#endif #endif /* _LINUX_EVENTFD_H */ diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index f1e1d3c4712..f6856a5a1d4 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -61,7 +61,6 @@ struct file; static inline void eventpoll_init_file(struct file *file) { INIT_LIST_HEAD(&file->f_ep_links); - spin_lock_init(&file->f_ep_lock); } diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index dd495b8c309..7499b366779 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags) #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ #define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ +#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008 /* Used to pass group descriptor data when online resize is done */ struct ext3_new_group_input { @@ -873,7 +874,7 @@ struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, - int create, int extend_disksize); + int create); extern struct inode *ext3_iget(struct super_block *, unsigned long); extern int ext3_write_inode (struct inode *, int); @@ -893,9 +894,8 @@ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); /* ioctl.c */ -extern int ext3_ioctl (struct inode *, struct file *, unsigned int, - unsigned long); -extern long ext3_compat_ioctl (struct file *, unsigned int, unsigned long); +extern long ext3_ioctl(struct file *, unsigned int, unsigned long); +extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long); /* namei.c */ extern int ext3_orphan_add(handle_t *, struct inode *); diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h index 7894dd0f3b7..ca1bfe90004 100644 --- a/include/linux/ext3_fs_i.h +++ b/include/linux/ext3_fs_i.h @@ -103,10 +103,6 @@ struct ext3_inode_info { */ struct rw_semaphore xattr_sem; #endif -#ifdef CONFIG_EXT3_FS_POSIX_ACL - struct posix_acl *i_acl; - struct posix_acl *i_default_acl; -#endif struct list_head i_orphan; /* unlinked but open inodes */ diff --git a/include/linux/falloc.h b/include/linux/falloc.h index 8e912ab6a07..3c155107d61 100644 --- a/include/linux/falloc.h +++ b/include/linux/falloc.h @@ -3,4 +3,25 @@ #define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */ +#ifdef __KERNEL__ + +/* + * Space reservation ioctls and argument structure + * are designed to be compatible with the legacy XFS ioctls. + */ +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserved area */ +}; + +#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv) +#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv) + +#endif /* __KERNEL__ */ + #endif /* _FALLOC_H_ */ diff --git a/include/linux/fb.h b/include/linux/fb.h index 818fe21257e..f847df9e99b 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -123,6 +123,7 @@ struct dentry; #define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ #define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ #define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ +#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ @@ -172,8 +173,12 @@ struct fb_fix_screeninfo { /* Interpretation of offset for color fields: All offsets are from the right, * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you * can use the offset as right argument to <<). A pixel afterwards is a bit - * stream and is written to video memory as that unmodified. This implies - * big-endian byte order if bits_per_pixel is greater than 8. + * stream and is written to video memory as that unmodified. + * + * For pseudocolor: offset and length should be the same for all color + * components. Offset specifies the position of the least significant bit + * of the pallette index in a pixel value. Length indicates the number + * of available palette entries (i.e. # of entries = 1 << length). */ struct fb_bitfield { __u32 offset; /* beginning of bitfield */ @@ -672,6 +677,9 @@ struct fb_ops { /* get capability given var */ void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps, struct fb_var_screeninfo *var); + + /* teardown any resources to do with this framebuffer */ + void (*fb_destroy)(struct fb_info *info); }; #ifdef CONFIG_FB_TILEBLITTING @@ -781,6 +789,8 @@ struct fb_tile_ops { #define FBINFO_MISC_USEREVENT 0x10000 /* event request from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ +#define FBINFO_MISC_FIRMWARE 0x40000 /* a replaceable firmware + inited framebuffer */ /* A driver may set this flag to indicate that it does want a set_par to be * called every time when fbcon_switch is executed. The advantage is that with @@ -809,6 +819,7 @@ struct fb_info { int node; int flags; struct mutex lock; /* Lock for open/release/ioctl funcs */ + struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ struct fb_var_screeninfo var; /* Current var */ struct fb_fix_screeninfo fix; /* Current fix */ struct fb_monspecs monspecs; /* Current Monitor specs */ @@ -849,7 +860,12 @@ struct fb_info { u32 state; /* Hardware state i.e suspend */ void *fbcon_par; /* fbcon use-only private area */ /* From here on everything is device dependent */ - void *par; + void *par; + /* we need the PCI or similiar aperture base/size not + smem_start/size as smem_start may just be an object + allocated inside the aperture so may not actually overlap */ + resource_size_t aperture_base; + resource_size_t aperture_size; }; #ifdef MODULE @@ -888,7 +904,7 @@ struct fb_info { #define fb_writeq sbus_writeq #define fb_memset sbus_memset_io -#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) #define fb_readb __raw_readb #define fb_readw __raw_readw @@ -960,6 +976,13 @@ extern struct fb_info *registered_fb[FB_MAX]; extern int num_registered_fb; extern struct class *fb_class; +extern int lock_fb_info(struct fb_info *info); + +static inline void unlock_fb_info(struct fb_info *info) +{ + mutex_unlock(&info->lock); +} + static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height) { diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 09d6c5bbddd..a2ec74bc481 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -5,12 +5,14 @@ #ifndef __LINUX_FDTABLE_H #define __LINUX_FDTABLE_H -#include <asm/atomic.h> #include <linux/posix_types.h> #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/types.h> +#include <linux/init.h> + +#include <asm/atomic.h> /* * The default fd array needs to be at least BITS_PER_LONG, diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index 671decbd2ae..934e22d6580 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h @@ -11,6 +11,8 @@ #ifndef _LINUX_FIEMAP_H #define _LINUX_FIEMAP_H +#include <linux/types.h> + struct fiemap_extent { __u64 fe_logical; /* logical offset in bytes for the start of * the extent from the beginning of the file */ diff --git a/include/linux/fips.h b/include/linux/fips.h new file mode 100644 index 00000000000..f8fb07b0b6b --- /dev/null +++ b/include/linux/fips.h @@ -0,0 +1,10 @@ +#ifndef _FIPS_H +#define _FIPS_H + +#ifdef CONFIG_CRYPTO_FIPS +extern int fips_enabled; +#else +#define fips_enabled 0 +#endif + +#endif diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 4d078e99c01..c6b3ca3af6d 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h @@ -25,10 +25,12 @@ #include <linux/types.h> #include <linux/firewire-constants.h> -#define FW_CDEV_EVENT_BUS_RESET 0x00 -#define FW_CDEV_EVENT_RESPONSE 0x01 -#define FW_CDEV_EVENT_REQUEST 0x02 -#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 +#define FW_CDEV_EVENT_BUS_RESET 0x00 +#define FW_CDEV_EVENT_RESPONSE 0x01 +#define FW_CDEV_EVENT_REQUEST 0x02 +#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 +#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 +#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 /** * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types @@ -136,7 +138,24 @@ struct fw_cdev_event_request { * This event is sent when the controller has completed an &fw_cdev_iso_packet * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers * stripped of all packets up until and including the interrupt packet are - * returned in the @header field. + * returned in the @header field. The amount of header data per packet is as + * specified at iso context creation by &fw_cdev_create_iso_context.header_size. + * + * In version 1 of this ABI, header data consisted of the 1394 isochronous + * packet header, followed by quadlets from the packet payload if + * &fw_cdev_create_iso_context.header_size > 4. + * + * In version 2 of this ABI, header data consist of the 1394 isochronous + * packet header, followed by a timestamp quadlet if + * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the + * packet payload if &fw_cdev_create_iso_context.header_size > 8. + * + * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. + * + * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, + * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: + * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte + * order. */ struct fw_cdev_event_iso_interrupt { __u64 closure; @@ -147,12 +166,44 @@ struct fw_cdev_event_iso_interrupt { }; /** + * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed + * @closure: See &fw_cdev_event_common; + * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl + * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED + * @handle: Reference by which an allocated resource can be deallocated + * @channel: Isochronous channel which was (de)allocated, if any + * @bandwidth: Bandwidth allocation units which were (de)allocated, if any + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event is sent after an isochronous + * resource was allocated at the IRM. The client has to check @channel and + * @bandwidth for whether the allocation actually succeeded. + * + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event is sent after an isochronous + * resource was deallocated at the IRM. It is also sent when automatic + * reallocation after a bus reset failed. + * + * @channel is <0 if no channel was (de)allocated or if reallocation failed. + * @bandwidth is 0 if no bandwidth was (de)allocated or if reallocation failed. + */ +struct fw_cdev_event_iso_resource { + __u64 closure; + __u32 type; + __u32 handle; + __s32 channel; + __s32 bandwidth; +}; + +/** * union fw_cdev_event - Convenience union of fw_cdev_event_ types * @common: Valid for all types * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT + * @iso_resource: Valid if @common.type == + * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or + * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED * * Convenience union for userspace use. Events could be read(2) into an * appropriately aligned char buffer and then cast to this union for further @@ -163,33 +214,47 @@ struct fw_cdev_event_iso_interrupt { * not fit will be discarded so that the next read(2) will return a new event. */ union fw_cdev_event { - struct fw_cdev_event_common common; - struct fw_cdev_event_bus_reset bus_reset; - struct fw_cdev_event_response response; - struct fw_cdev_event_request request; - struct fw_cdev_event_iso_interrupt iso_interrupt; + struct fw_cdev_event_common common; + struct fw_cdev_event_bus_reset bus_reset; + struct fw_cdev_event_response response; + struct fw_cdev_event_request request; + struct fw_cdev_event_iso_interrupt iso_interrupt; + struct fw_cdev_event_iso_resource iso_resource; }; -#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) -#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) -#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) -#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) -#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) -#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) -#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) -#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) +/* available since kernel version 2.6.22 */ +#define FW_CDEV_IOC_GET_INFO _IOWR('#', 0x00, struct fw_cdev_get_info) +#define FW_CDEV_IOC_SEND_REQUEST _IOW('#', 0x01, struct fw_cdev_send_request) +#define FW_CDEV_IOC_ALLOCATE _IOWR('#', 0x02, struct fw_cdev_allocate) +#define FW_CDEV_IOC_DEALLOCATE _IOW('#', 0x03, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_SEND_RESPONSE _IOW('#', 0x04, struct fw_cdev_send_response) +#define FW_CDEV_IOC_INITIATE_BUS_RESET _IOW('#', 0x05, struct fw_cdev_initiate_bus_reset) +#define FW_CDEV_IOC_ADD_DESCRIPTOR _IOWR('#', 0x06, struct fw_cdev_add_descriptor) +#define FW_CDEV_IOC_REMOVE_DESCRIPTOR _IOW('#', 0x07, struct fw_cdev_remove_descriptor) +#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) +#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) +#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) +#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) -#define FW_CDEV_IOC_CREATE_ISO_CONTEXT _IOWR('#', 0x08, struct fw_cdev_create_iso_context) -#define FW_CDEV_IOC_QUEUE_ISO _IOWR('#', 0x09, struct fw_cdev_queue_iso) -#define FW_CDEV_IOC_START_ISO _IOW('#', 0x0a, struct fw_cdev_start_iso) -#define FW_CDEV_IOC_STOP_ISO _IOW('#', 0x0b, struct fw_cdev_stop_iso) -#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) +/* available since kernel version 2.6.24 */ +#define FW_CDEV_IOC_GET_CYCLE_TIMER _IOR('#', 0x0c, struct fw_cdev_get_cycle_timer) -/* FW_CDEV_VERSION History - * - * 1 Feb 18, 2007: Initial version. +/* available since kernel version 2.6.30 */ +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE _IOWR('#', 0x0d, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE _IOW('#', 0x0e, struct fw_cdev_deallocate) +#define FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x0f, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE _IOW('#', 0x10, struct fw_cdev_allocate_iso_resource) +#define FW_CDEV_IOC_GET_SPEED _IO('#', 0x11) /* returns speed code */ +#define FW_CDEV_IOC_SEND_BROADCAST_REQUEST _IOW('#', 0x12, struct fw_cdev_send_request) +#define FW_CDEV_IOC_SEND_STREAM_PACKET _IOW('#', 0x13, struct fw_cdev_send_stream_packet) + +/* + * FW_CDEV_VERSION History + * 1 (2.6.22) - initial version + * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if + * &fw_cdev_create_iso_context.header_size is 8 or more */ -#define FW_CDEV_VERSION 1 +#define FW_CDEV_VERSION 2 /** * struct fw_cdev_get_info - General purpose information ioctl @@ -201,7 +266,7 @@ union fw_cdev_event { * case, @rom_length is updated with the actual length of the * configuration ROM. * @rom: If non-zero, address of a buffer to be filled by a copy of the - * local node's configuration ROM + * device's configuration ROM * @bus_reset: If non-zero, address of a buffer to be filled by a * &struct fw_cdev_event_bus_reset with the current state * of the bus. This does not cause a bus reset to happen. @@ -229,7 +294,7 @@ struct fw_cdev_get_info { * Send a request to the device. This ioctl implements all outgoing requests. * Both quadlet and block request specify the payload as a pointer to the data * in the @data field. Once the transaction completes, the kernel writes an - * &fw_cdev_event_request event back. The @closure field is passed back to + * &fw_cdev_event_response event back. The @closure field is passed back to * user space in the response event. */ struct fw_cdev_send_request { @@ -284,9 +349,9 @@ struct fw_cdev_allocate { }; /** - * struct fw_cdev_deallocate - Free an address range allocation - * @handle: Handle to the address range, as returned by the kernel when the - * range was allocated + * struct fw_cdev_deallocate - Free a CSR address range or isochronous resource + * @handle: Handle to the address range or iso resource, as returned by the + * kernel when the range or resource was allocated */ struct fw_cdev_deallocate { __u32 handle; @@ -329,6 +394,9 @@ struct fw_cdev_initiate_bus_reset { * If successful, the kernel adds the descriptor and writes back a handle to the * kernel-side object to be used for later removal of the descriptor block and * immediate key. + * + * This ioctl affects the configuration ROMs of all local nodes. + * The ioctl only succeeds on device files which represent a local node. */ struct fw_cdev_add_descriptor { __u32 immediate; @@ -344,7 +412,7 @@ struct fw_cdev_add_descriptor { * descriptor was added * * Remove a descriptor block and accompanying immediate key from the local - * node's configuration ROM. + * nodes' configuration ROMs. */ struct fw_cdev_remove_descriptor { __u32 handle; @@ -370,6 +438,9 @@ struct fw_cdev_remove_descriptor { * * If a context was successfully created, the kernel writes back a handle to the * context, which must be passed in for subsequent operations on that context. + * + * Note that the effect of a @header_size > 4 depends on + * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. */ struct fw_cdev_create_iso_context { __u32 type; @@ -473,10 +544,91 @@ struct fw_cdev_stop_iso { * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer * and also the system clock. This allows to express the receive time of an * isochronous packet as a system time with microsecond accuracy. + * + * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and + * 12 bits cycleOffset, in host byte order. */ struct fw_cdev_get_cycle_timer { __u64 local_time; __u32 cycle_timer; }; +/** + * struct fw_cdev_allocate_iso_resource - (De)allocate a channel or bandwidth + * @closure: Passed back to userspace in correponding iso resource events + * @channels: Isochronous channels of which one is to be (de)allocated + * @bandwidth: Isochronous bandwidth units to be (de)allocated + * @handle: Handle to the allocation, written by the kernel (only valid in + * case of %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctls) + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE ioctl initiates allocation of an + * isochronous channel and/or of isochronous bandwidth at the isochronous + * resource manager (IRM). Only one of the channels specified in @channels is + * allocated. An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED is sent after + * communication with the IRM, indicating success or failure in the event data. + * The kernel will automatically reallocate the resources after bus resets. + * Should a reallocation fail, an %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event + * will be sent. The kernel will also automatically deallocate the resources + * when the file descriptor is closed. + * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE ioctl can be used to initiate + * deallocation of resources which were allocated as described above. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * The %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE ioctl is a variant of allocation + * without automatic re- or deallocation. + * An %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED event concludes this operation, + * indicating success or failure in its data. + * + * The %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE_ONCE ioctl works like + * %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE except that resources are freed + * instead of allocated. + * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. + * + * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources + * for the lifetime of the fd or handle. + * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources + * for the duration of a bus generation. + * + * @channels is a host-endian bitfield with the least significant bit + * representing channel 0 and the most significant bit representing channel 63: + * 1ULL << c for each channel c that is a candidate for (de)allocation. + * + * @bandwidth is expressed in bandwidth allocation units, i.e. the time to send + * one quadlet of data (payload or header data) at speed S1600. + */ +struct fw_cdev_allocate_iso_resource { + __u64 closure; + __u64 channels; + __u32 bandwidth; + __u32 handle; +}; + +/** + * struct fw_cdev_send_stream_packet - send an asynchronous stream packet + * @length: Length of outgoing payload, in bytes + * @tag: Data format tag + * @channel: Isochronous channel to transmit to + * @sy: Synchronization code + * @closure: Passed back to userspace in the response event + * @data: Userspace pointer to payload + * @generation: The bus generation where packet is valid + * @speed: Speed to transmit at + * + * The %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl sends an asynchronous stream packet + * to every device which is listening to the specified channel. The kernel + * writes an &fw_cdev_event_response event which indicates success or failure of + * the transmission. + */ +struct fw_cdev_send_stream_packet { + __u32 length; + __u32 tag; + __u32 channel; + __u32 sy; + __u64 closure; + __u64 data; + __u32 generation; + __u32 speed; +}; + #endif /* _LINUX_FIREWIRE_CDEV_H */ diff --git a/include/linux/firewire.h b/include/linux/firewire.h new file mode 100644 index 00000000000..192d1e43c43 --- /dev/null +++ b/include/linux/firewire.h @@ -0,0 +1,446 @@ +#ifndef _LINUX_FIREWIRE_H +#define _LINUX_FIREWIRE_H + +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <asm/atomic.h> +#include <asm/byteorder.h> + +#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args) +#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args) + +static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size) +{ + u32 *dst = _dst; + __be32 *src = _src; + int i; + + for (i = 0; i < size / 4; i++) + dst[i] = be32_to_cpu(src[i]); +} + +static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size) +{ + fw_memcpy_from_be32(_dst, _src, size); +} +#define CSR_REGISTER_BASE 0xfffff0000000ULL + +/* register offsets are relative to CSR_REGISTER_BASE */ +#define CSR_STATE_CLEAR 0x0 +#define CSR_STATE_SET 0x4 +#define CSR_NODE_IDS 0x8 +#define CSR_RESET_START 0xc +#define CSR_SPLIT_TIMEOUT_HI 0x18 +#define CSR_SPLIT_TIMEOUT_LO 0x1c +#define CSR_CYCLE_TIME 0x200 +#define CSR_BUS_TIME 0x204 +#define CSR_BUSY_TIMEOUT 0x210 +#define CSR_BUS_MANAGER_ID 0x21c +#define CSR_BANDWIDTH_AVAILABLE 0x220 +#define CSR_CHANNELS_AVAILABLE 0x224 +#define CSR_CHANNELS_AVAILABLE_HI 0x224 +#define CSR_CHANNELS_AVAILABLE_LO 0x228 +#define CSR_BROADCAST_CHANNEL 0x234 +#define CSR_CONFIG_ROM 0x400 +#define CSR_CONFIG_ROM_END 0x800 +#define CSR_FCP_COMMAND 0xB00 +#define CSR_FCP_RESPONSE 0xD00 +#define CSR_FCP_END 0xF00 +#define CSR_TOPOLOGY_MAP 0x1000 +#define CSR_TOPOLOGY_MAP_END 0x1400 +#define CSR_SPEED_MAP 0x2000 +#define CSR_SPEED_MAP_END 0x3000 + +#define CSR_OFFSET 0x40 +#define CSR_LEAF 0x80 +#define CSR_DIRECTORY 0xc0 + +#define CSR_DESCRIPTOR 0x01 +#define CSR_VENDOR 0x03 +#define CSR_HARDWARE_VERSION 0x04 +#define CSR_NODE_CAPABILITIES 0x0c +#define CSR_UNIT 0x11 +#define CSR_SPECIFIER_ID 0x12 +#define CSR_VERSION 0x13 +#define CSR_DEPENDENT_INFO 0x14 +#define CSR_MODEL 0x17 +#define CSR_INSTANCE 0x18 +#define CSR_DIRECTORY_ID 0x20 + +struct fw_csr_iterator { + u32 *p; + u32 *end; +}; + +void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p); +int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value); + +extern struct bus_type fw_bus_type; + +struct fw_card_driver; +struct fw_node; + +struct fw_card { + const struct fw_card_driver *driver; + struct device *device; + struct kref kref; + struct completion done; + + int node_id; + int generation; + int current_tlabel; + u64 tlabel_mask; + struct list_head transaction_list; + struct timer_list flush_timer; + unsigned long reset_jiffies; + + unsigned long long guid; + unsigned max_receive; + int link_speed; + int config_rom_generation; + + spinlock_t lock; /* Take this lock when handling the lists in + * this struct. */ + struct fw_node *local_node; + struct fw_node *root_node; + struct fw_node *irm_node; + u8 color; /* must be u8 to match the definition in struct fw_node */ + int gap_count; + bool beta_repeaters_present; + + int index; + + struct list_head link; + + /* Work struct for BM duties. */ + struct delayed_work work; + int bm_retries; + int bm_generation; + __be32 bm_transaction_data[2]; + + bool broadcast_channel_allocated; + u32 broadcast_channel; + u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; +}; + +static inline struct fw_card *fw_card_get(struct fw_card *card) +{ + kref_get(&card->kref); + + return card; +} + +void fw_card_release(struct kref *kref); + +static inline void fw_card_put(struct fw_card *card) +{ + kref_put(&card->kref, fw_card_release); +} + +struct fw_attribute_group { + struct attribute_group *groups[2]; + struct attribute_group group; + struct attribute *attrs[12]; +}; + +enum fw_device_state { + FW_DEVICE_INITIALIZING, + FW_DEVICE_RUNNING, + FW_DEVICE_GONE, + FW_DEVICE_SHUTDOWN, +}; + +/* + * Note, fw_device.generation always has to be read before fw_device.node_id. + * Use SMP memory barriers to ensure this. Otherwise requests will be sent + * to an outdated node_id if the generation was updated in the meantime due + * to a bus reset. + * + * Likewise, fw-core will take care to update .node_id before .generation so + * that whenever fw_device.generation is current WRT the actual bus generation, + * fw_device.node_id is guaranteed to be current too. + * + * The same applies to fw_device.card->node_id vs. fw_device.generation. + * + * fw_device.config_rom and fw_device.config_rom_length may be accessed during + * the lifetime of any fw_unit belonging to the fw_device, before device_del() + * was called on the last fw_unit. Alternatively, they may be accessed while + * holding fw_device_rwsem. + */ +struct fw_device { + atomic_t state; + struct fw_node *node; + int node_id; + int generation; + unsigned max_speed; + struct fw_card *card; + struct device device; + + struct mutex client_list_mutex; + struct list_head client_list; + + u32 *config_rom; + size_t config_rom_length; + int config_rom_retries; + unsigned is_local:1; + unsigned max_rec:4; + unsigned cmc:1; + unsigned irmc:1; + unsigned bc_implemented:2; + + struct delayed_work work; + struct fw_attribute_group attribute_group; +}; + +static inline struct fw_device *fw_device(struct device *dev) +{ + return container_of(dev, struct fw_device, device); +} + +static inline int fw_device_is_shutdown(struct fw_device *device) +{ + return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; +} + +static inline struct fw_device *fw_device_get(struct fw_device *device) +{ + get_device(&device->device); + + return device; +} + +static inline void fw_device_put(struct fw_device *device) +{ + put_device(&device->device); +} + +int fw_device_enable_phys_dma(struct fw_device *device); + +/* + * fw_unit.directory must not be accessed after device_del(&fw_unit.device). + */ +struct fw_unit { + struct device device; + u32 *directory; + struct fw_attribute_group attribute_group; +}; + +static inline struct fw_unit *fw_unit(struct device *dev) +{ + return container_of(dev, struct fw_unit, device); +} + +static inline struct fw_unit *fw_unit_get(struct fw_unit *unit) +{ + get_device(&unit->device); + + return unit; +} + +static inline void fw_unit_put(struct fw_unit *unit) +{ + put_device(&unit->device); +} + +static inline struct fw_device *fw_parent_device(struct fw_unit *unit) +{ + return fw_device(unit->device.parent); +} + +struct ieee1394_device_id; + +struct fw_driver { + struct device_driver driver; + /* Called when the parent device sits through a bus reset. */ + void (*update)(struct fw_unit *unit); + const struct ieee1394_device_id *id_table; +}; + +struct fw_packet; +struct fw_request; + +typedef void (*fw_packet_callback_t)(struct fw_packet *packet, + struct fw_card *card, int status); +typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, + void *data, size_t length, + void *callback_data); +/* + * Important note: The callback must guarantee that either fw_send_response() + * or kfree() is called on the @request. + */ +typedef void (*fw_address_callback_t)(struct fw_card *card, + struct fw_request *request, + int tcode, int destination, int source, + int generation, int speed, + unsigned long long offset, + void *data, size_t length, + void *callback_data); + +struct fw_packet { + int speed; + int generation; + u32 header[4]; + size_t header_length; + void *payload; + size_t payload_length; + dma_addr_t payload_bus; + u32 timestamp; + + /* + * This callback is called when the packet transmission has + * completed; for successful transmission, the status code is + * the ack received from the destination, otherwise it's a + * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. + * The callback can be called from tasklet context and thus + * must never block. + */ + fw_packet_callback_t callback; + int ack; + struct list_head link; + void *driver_data; +}; + +struct fw_transaction { + int node_id; /* The generation is implied; it is always the current. */ + int tlabel; + int timestamp; + struct list_head link; + + struct fw_packet packet; + + /* + * The data passed to the callback is valid only during the + * callback. + */ + fw_transaction_callback_t callback; + void *callback_data; +}; + +struct fw_address_handler { + u64 offset; + size_t length; + fw_address_callback_t address_callback; + void *callback_data; + struct list_head link; +}; + +struct fw_address_region { + u64 start; + u64 end; +}; + +extern const struct fw_address_region fw_high_memory_region; + +int fw_core_add_address_handler(struct fw_address_handler *handler, + const struct fw_address_region *region); +void fw_core_remove_address_handler(struct fw_address_handler *handler); +void fw_send_response(struct fw_card *card, + struct fw_request *request, int rcode); +void fw_send_request(struct fw_card *card, struct fw_transaction *t, + int tcode, int destination_id, int generation, int speed, + unsigned long long offset, void *payload, size_t length, + fw_transaction_callback_t callback, void *callback_data); +int fw_cancel_transaction(struct fw_card *card, + struct fw_transaction *transaction); +int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, + int generation, int speed, unsigned long long offset, + void *payload, size_t length); + +static inline int fw_stream_packet_destination_id(int tag, int channel, int sy) +{ + return tag << 14 | channel << 8 | sy; +} + +struct fw_descriptor { + struct list_head link; + size_t length; + u32 immediate; + u32 key; + const u32 *data; +}; + +int fw_core_add_descriptor(struct fw_descriptor *desc); +void fw_core_remove_descriptor(struct fw_descriptor *desc); + +/* + * The iso packet format allows for an immediate header/payload part + * stored in 'header' immediately after the packet info plus an + * indirect payload part that is pointer to by the 'payload' field. + * Applications can use one or the other or both to implement simple + * low-bandwidth streaming (e.g. audio) or more advanced + * scatter-gather streaming (e.g. assembling video frame automatically). + */ +struct fw_iso_packet { + u16 payload_length; /* Length of indirect payload. */ + u32 interrupt:1; /* Generate interrupt on this packet */ + u32 skip:1; /* Set to not send packet at all. */ + u32 tag:2; + u32 sy:4; + u32 header_length:8; /* Length of immediate header. */ + u32 header[0]; +}; + +#define FW_ISO_CONTEXT_TRANSMIT 0 +#define FW_ISO_CONTEXT_RECEIVE 1 + +#define FW_ISO_CONTEXT_MATCH_TAG0 1 +#define FW_ISO_CONTEXT_MATCH_TAG1 2 +#define FW_ISO_CONTEXT_MATCH_TAG2 4 +#define FW_ISO_CONTEXT_MATCH_TAG3 8 +#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15 + +/* + * An iso buffer is just a set of pages mapped for DMA in the + * specified direction. Since the pages are to be used for DMA, they + * are not mapped into the kernel virtual address space. We store the + * DMA address in the page private. The helper function + * fw_iso_buffer_map() will map the pages into a given vma. + */ +struct fw_iso_buffer { + enum dma_data_direction direction; + struct page **pages; + int page_count; +}; + +int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, + int page_count, enum dma_data_direction direction); +void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); + +struct fw_iso_context; +typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, + u32 cycle, size_t header_length, + void *header, void *data); +struct fw_iso_context { + struct fw_card *card; + int type; + int channel; + int speed; + size_t header_size; + fw_iso_callback_t callback; + void *callback_data; +}; + +struct fw_iso_context *fw_iso_context_create(struct fw_card *card, + int type, int channel, int speed, size_t header_size, + fw_iso_callback_t callback, void *callback_data); +int fw_iso_context_queue(struct fw_iso_context *ctx, + struct fw_iso_packet *packet, + struct fw_iso_buffer *buffer, + unsigned long payload); +int fw_iso_context_start(struct fw_iso_context *ctx, + int cycle, int sync, int tags); +int fw_iso_context_stop(struct fw_iso_context *ctx); +void fw_iso_context_destroy(struct fw_iso_context *ctx); + +#endif /* _LINUX_FIREWIRE_H */ diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h index 6e199c8dfac..875451f1373 100644 --- a/include/linux/firmware-map.h +++ b/include/linux/firmware-map.h @@ -1,7 +1,7 @@ /* * include/linux/firmware-map.h: * Copyright (C) 2008 SUSE LINUX Products GmbH - * by Bernhard Walle <bwalle@suse.de> + * by Bernhard Walle <bernhard.walle@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2.0 as published by @@ -24,21 +24,17 @@ */ #ifdef CONFIG_FIRMWARE_MEMMAP -int firmware_map_add(resource_size_t start, resource_size_t end, - const char *type); -int firmware_map_add_early(resource_size_t start, resource_size_t end, - const char *type); +int firmware_map_add(u64 start, u64 end, const char *type); +int firmware_map_add_early(u64 start, u64 end, const char *type); #else /* CONFIG_FIRMWARE_MEMMAP */ -static inline int firmware_map_add(resource_size_t start, resource_size_t end, - const char *type) +static inline int firmware_map_add(u64 start, u64 end, const char *type) { return 0; } -static inline int firmware_map_add_early(resource_size_t start, - resource_size_t end, const char *type) +static inline int firmware_map_add_early(u64 start, u64 end, const char *type) { return 0; } diff --git a/include/linux/firmware.h b/include/linux/firmware.h index c8ecf5b2a20..d3154462843 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -5,7 +5,6 @@ #include <linux/types.h> #include <linux/compiler.h> -#define FIRMWARE_NAME_MAX 30 #define FW_ACTION_NOHOTPLUG 0 #define FW_ACTION_HOTPLUG 1 diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h new file mode 100644 index 00000000000..45ff1849151 --- /dev/null +++ b/include/linux/flex_array.h @@ -0,0 +1,49 @@ +#ifndef _FLEX_ARRAY_H +#define _FLEX_ARRAY_H + +#include <linux/types.h> +#include <asm/page.h> + +#define FLEX_ARRAY_PART_SIZE PAGE_SIZE +#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE + +struct flex_array_part; + +/* + * This is meant to replace cases where an array-like + * structure has gotten too big to fit into kmalloc() + * and the developer is getting tempted to use + * vmalloc(). + */ + +struct flex_array { + union { + struct { + int element_size; + int total_nr_elements; + struct flex_array_part *parts[]; + }; + /* + * This little trick makes sure that + * sizeof(flex_array) == PAGE_SIZE + */ + char padding[FLEX_ARRAY_BASE_SIZE]; + }; +}; + +#define FLEX_ARRAY_INIT(size, total) { { {\ + .element_size = (size), \ + .total_nr_elements = (total), \ +} } } + +struct flex_array *flex_array_alloc(int element_size, unsigned int total, + gfp_t flags); +int flex_array_prealloc(struct flex_array *fa, unsigned int start, + unsigned int end, gfp_t flags); +void flex_array_free(struct flex_array *fa); +void flex_array_free_parts(struct flex_array *fa); +int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, + gfp_t flags); +void *flex_array_get(struct flex_array *fa, unsigned int element_nr); + +#endif /* _FLEX_ARRAY_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 6022f44043f..37f53216998 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -54,24 +54,30 @@ struct inodes_stat_t { #define MAY_ACCESS 16 #define MAY_OPEN 32 +/* + * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond + * to O_WRONLY and O_RDWR via the strange trick in __dentry_open() + */ + /* file is open for reading */ #define FMODE_READ ((__force fmode_t)1) /* file is open for writing */ #define FMODE_WRITE ((__force fmode_t)2) /* file is seekable */ #define FMODE_LSEEK ((__force fmode_t)4) -/* file can be accessed using pread/pwrite */ +/* file can be accessed using pread */ #define FMODE_PREAD ((__force fmode_t)8) -#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */ +/* file can be accessed using pwrite */ +#define FMODE_PWRITE ((__force fmode_t)16) /* File is opened for execution with sys_execve / sys_uselib */ -#define FMODE_EXEC ((__force fmode_t)16) +#define FMODE_EXEC ((__force fmode_t)32) /* File is opened with O_NDELAY (only set for block devices) */ -#define FMODE_NDELAY ((__force fmode_t)32) +#define FMODE_NDELAY ((__force fmode_t)64) /* File is opened with O_EXCL (only set for block devices) */ -#define FMODE_EXCL ((__force fmode_t)64) +#define FMODE_EXCL ((__force fmode_t)128) /* File is opened using open(.., 3, ..) and is writeable only for ioctls (specialy hack for floppy.c) */ -#define FMODE_WRITE_IOCTL ((__force fmode_t)128) +#define FMODE_WRITE_IOCTL ((__force fmode_t)256) /* * Don't update ctime and mtime. @@ -81,17 +87,80 @@ struct inodes_stat_t { */ #define FMODE_NOCMTIME ((__force fmode_t)2048) +/* + * The below are the various read and write types that we support. Some of + * them include behavioral modifiers that send information down to the + * block layer and IO scheduler. Terminology: + * + * The block layer uses device plugging to defer IO a little bit, in + * the hope that we will see more IO very shortly. This increases + * coalescing of adjacent IO and thus reduces the number of IOs we + * have to send to the device. It also allows for better queuing, + * if the IO isn't mergeable. If the caller is going to be waiting + * for the IO, then he must ensure that the device is unplugged so + * that the IO is dispatched to the driver. + * + * All IO is handled async in Linux. This is fine for background + * writes, but for reads or writes that someone waits for completion + * on, we want to notify the block layer and IO scheduler so that they + * know about it. That allows them to make better scheduling + * decisions. So when the below references 'sync' and 'async', it + * is referencing this priority hint. + * + * With that in mind, the available types are: + * + * READ A normal read operation. Device will be plugged. + * READ_SYNC A synchronous read. Device is not plugged, caller can + * immediately wait on this read without caring about + * unplugging. + * READA Used for read-ahead operations. Lower priority, and the + * block layer could (in theory) choose to ignore this + * request if it runs into resource problems. + * WRITE A normal async write. Device will be plugged. + * SWRITE Like WRITE, but a special case for ll_rw_block() that + * tells it to lock the buffer first. Normally a buffer + * must be locked before doing IO. + * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down + * the hint that someone will be waiting on this IO + * shortly. The device must still be unplugged explicitly, + * WRITE_SYNC_PLUG does not do this as we could be + * submitting more writes before we actually wait on any + * of them. + * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device + * immediately after submission. The write equivalent + * of READ_SYNC. + * WRITE_ODIRECT Special case write for O_DIRECT only. + * SWRITE_SYNC + * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. + * See SWRITE. + * WRITE_BARRIER Like WRITE, but tells the block layer that all + * previously submitted writes must be safely on storage + * before this one is started. Also guarantees that when + * this write is complete, it itself is also safely on + * storage. Prevents reordering of writes on both sides + * of this IO. + * + */ #define RW_MASK 1 #define RWA_MASK 2 #define READ 0 #define WRITE 1 #define READA 2 /* read-ahead - don't block if no resources */ #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ -#define READ_SYNC (READ | (1 << BIO_RW_SYNC)) +#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) #define READ_META (READ | (1 << BIO_RW_META)) -#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) -#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) +#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) +#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) +#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) +#define SWRITE_SYNC_PLUG \ + (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) +#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) + +/* + * These aren't really reads or writes, they pass down information about + * parts of device that are now unused by the file system. + */ #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) @@ -135,6 +204,7 @@ struct inodes_stat_t { #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */ #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define MS_I_VERSION (1<<23) /* Update inode I_version field */ +#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ #define MS_ACTIVE (1<<30) #define MS_NOUSER (1<<31) @@ -640,9 +710,12 @@ static inline int mapping_writably_mapped(struct address_space *mapping) #define i_size_ordered_init(inode) do { } while (0) #endif +struct posix_acl; +#define ACL_NOT_CACHED ((void *)(-1)) + struct inode { struct hlist_node i_hash; - struct list_head i_list; + struct list_head i_list; /* backing dev IO list */ struct list_head i_sb_list; struct list_head i_dentry; unsigned long i_ino; @@ -659,8 +732,8 @@ struct inode { struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; - unsigned int i_blkbits; blkcnt_t i_blocks; + unsigned int i_blkbits; unsigned short i_bytes; umode_t i_mode; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ @@ -681,13 +754,12 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; }; - int i_cindex; __u32 i_generation; -#ifdef CONFIG_DNOTIFY - unsigned long i_dnotify_mask; /* Directory notify events */ - struct dnotify_struct *i_dnotify; /* for directory notifications */ +#ifdef CONFIG_FSNOTIFY + __u32 i_fsnotify_mask; /* all events this inode cares about */ + struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ #endif #ifdef CONFIG_INOTIFY @@ -704,6 +776,10 @@ struct inode { #ifdef CONFIG_SECURITY void *i_security; #endif +#ifdef CONFIG_FS_POSIX_ACL + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; +#endif void *i_private; /* fs or device private pointer */ }; @@ -727,9 +803,6 @@ enum inode_i_mutex_lock_class I_MUTEX_QUOTA }; -extern void inode_double_lock(struct inode *inode1, struct inode *inode2); -extern void inode_double_unlock(struct inode *inode1, struct inode *inode2); - /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic @@ -813,7 +886,7 @@ struct file_ra_state { there are only # of pages ahead */ unsigned int ra_pages; /* Maximum readahead window */ - int mmap_miss; /* Cache miss stat for mmap accesses */ + unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ loff_t prev_pos; /* Cache last read() position */ }; @@ -842,6 +915,7 @@ struct file { #define f_dentry f_path.dentry #define f_vfsmnt f_path.mnt const struct file_operations *f_op; + spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; @@ -860,7 +934,6 @@ struct file { #ifdef CONFIG_EPOLL /* Used by fs/eventpoll.c to link all the hooks to this file */ struct list_head f_ep_links; - spinlock_t f_ep_lock; #endif /* #ifdef CONFIG_EPOLL */ struct address_space *f_mapping; #ifdef CONFIG_DEBUG_WRITECOUNT @@ -1041,6 +1114,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *); extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_flock(struct file *); +extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); @@ -1057,34 +1131,147 @@ extern int lease_modify(struct file_lock **, int); extern int lock_may_read(struct inode *, loff_t start, unsigned long count); extern int lock_may_write(struct inode *, loff_t start, unsigned long count); #else /* !CONFIG_FILE_LOCKING */ -#define fcntl_getlk(a, b) ({ -EINVAL; }) -#define fcntl_setlk(a, b, c, d) ({ -EACCES; }) +static inline int fcntl_getlk(struct file *file, struct flock __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk(unsigned int fd, struct file *file, + unsigned int cmd, struct flock __user *user) +{ + return -EACCES; +} + #if BITS_PER_LONG == 32 -#define fcntl_getlk64(a, b) ({ -EINVAL; }) -#define fcntl_setlk64(a, b, c, d) ({ -EACCES; }) +static inline int fcntl_getlk64(struct file *file, struct flock64 __user *user) +{ + return -EINVAL; +} + +static inline int fcntl_setlk64(unsigned int fd, struct file *file, + unsigned int cmd, struct flock64 __user *user) +{ + return -EACCES; +} #endif -#define fcntl_setlease(a, b, c) ({ 0; }) -#define fcntl_getlease(a) ({ 0; }) -#define locks_init_lock(a) ({ }) -#define __locks_copy_lock(a, b) ({ }) -#define locks_copy_lock(a, b) ({ }) -#define locks_remove_posix(a, b) ({ }) -#define locks_remove_flock(a) ({ }) -#define posix_test_lock(a, b) ({ 0; }) -#define posix_lock_file(a, b, c) ({ -ENOLCK; }) -#define posix_lock_file_wait(a, b) ({ -ENOLCK; }) -#define posix_unblock_lock(a, b) (-ENOENT) -#define vfs_test_lock(a, b) ({ 0; }) -#define vfs_lock_file(a, b, c, d) (-ENOLCK) -#define vfs_cancel_lock(a, b) ({ 0; }) -#define flock_lock_file_wait(a, b) ({ -ENOLCK; }) -#define __break_lease(a, b) ({ 0; }) -#define lease_get_mtime(a, b) ({ }) -#define generic_setlease(a, b, c) ({ -EINVAL; }) -#define vfs_setlease(a, b, c) ({ -EINVAL; }) -#define lease_modify(a, b) ({ -EINVAL; }) -#define lock_may_read(a, b, c) ({ 1; }) -#define lock_may_write(a, b, c) ({ 1; }) +static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) +{ + return 0; +} + +static inline int fcntl_getlease(struct file *filp) +{ + return 0; +} + +static inline void locks_init_lock(struct file_lock *fl) +{ + return; +} + +static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + return; +} + +static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) +{ + return; +} + +static inline void locks_remove_flock(struct file *filp) +{ + return; +} + +static inline void posix_test_lock(struct file *filp, struct file_lock *fl) +{ + return; +} + +static inline int posix_lock_file(struct file *filp, struct file_lock *fl, + struct file_lock *conflock) +{ + return -ENOLCK; +} + +static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) +{ + return -ENOLCK; +} + +static inline int posix_unblock_lock(struct file *filp, + struct file_lock *waiter) +{ + return -ENOENT; +} + +static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int vfs_lock_file(struct file *filp, unsigned int cmd, + struct file_lock *fl, struct file_lock *conf) +{ + return -ENOLCK; +} + +static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) +{ + return 0; +} + +static inline int flock_lock_file_wait(struct file *filp, + struct file_lock *request) +{ + return -ENOLCK; +} + +static inline int __break_lease(struct inode *inode, unsigned int mode) +{ + return 0; +} + +static inline void lease_get_mtime(struct inode *inode, struct timespec *time) +{ + return; +} + +static inline int generic_setlease(struct file *filp, long arg, + struct file_lock **flp) +{ + return -EINVAL; +} + +static inline int vfs_setlease(struct file *filp, long arg, + struct file_lock **lease) +{ + return -EINVAL; +} + +static inline int lease_modify(struct file_lock **before, int arg) +{ + return -EINVAL; +} + +static inline int lock_may_read(struct inode *inode, loff_t start, + unsigned long len) +{ + return 1; +} + +static inline int lock_may_write(struct inode *inode, loff_t start, + unsigned long len) +{ + return 1; +} + #endif /* !CONFIG_FILE_LOCKING */ @@ -1141,7 +1328,7 @@ struct super_block { struct rw_semaphore s_umount; struct mutex s_lock; int s_count; - int s_need_sync_fs; + int s_need_sync; atomic_t s_active; #ifdef CONFIG_SECURITY void *s_security; @@ -1149,9 +1336,6 @@ struct super_block { struct xattr_handler **s_xattr; struct list_head s_inodes; /* all inodes */ - struct list_head s_dirty; /* dirty inodes */ - struct list_head s_io; /* parked for writeback */ - struct list_head s_more_io; /* parked for more writeback */ struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ struct list_head s_files; /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ @@ -1192,11 +1376,6 @@ struct super_block { * generic_show_options() */ char *s_options; - - /* - * storage for asynchronous operations - */ - struct list_head s_async_list; }; extern struct timespec current_fs_time(struct super_block *sb); @@ -1276,11 +1455,6 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); #define DT_SOCK 12 #define DT_WHT 14 -#define OSYNC_METADATA (1<<0) -#define OSYNC_DATA (1<<1) -#define OSYNC_INODE (1<<2) -int generic_osync_inode(struct inode *, struct address_space *, int); - /* * This is the "filldir" function type, used by readdir() to let * the kernel specify what kind of dirent layout it wants to have. @@ -1346,6 +1520,7 @@ struct inode_operations { void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); int (*permission) (struct inode *, int); + int (*check_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); @@ -1575,6 +1750,9 @@ struct file_system_type { struct lock_class_key i_alloc_sem_key; }; +extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt); extern int get_sb_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int), @@ -1592,6 +1770,7 @@ void kill_block_super(struct super_block *sb); void kill_anon_super(struct super_block *sb); void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); +void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), @@ -1600,8 +1779,9 @@ struct super_block *sget(struct file_system_type *type, extern int get_sb_pseudo(struct file_system_type *, char *, const struct super_operations *ops, unsigned long, struct vfsmount *mnt); -extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); +extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); int __put_super_and_need_restart(struct super_block *sb); +void put_super(struct super_block *sb); /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ #define fops_get(fops) \ @@ -1616,11 +1796,13 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); extern long do_mount(char *, char *, char *, unsigned long, void *); -extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); +extern struct vfsmount *collect_mounts(struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int vfs_statfs(struct dentry *, struct kstatfs *); +extern int current_umask(void); + /* /sys/fs */ extern struct kobject *fs_kobj; @@ -1681,19 +1863,52 @@ static inline int break_lease(struct inode *inode, unsigned int mode) return 0; } #else /* !CONFIG_FILE_LOCKING */ -#define locks_mandatory_locked(a) ({ 0; }) -#define locks_mandatory_area(a, b, c, d, e) ({ 0; }) -#define __mandatory_lock(a) ({ 0; }) -#define mandatory_lock(a) ({ 0; }) -#define locks_verify_locked(a) ({ 0; }) -#define locks_verify_truncate(a, b, c) ({ 0; }) -#define break_lease(a, b) ({ 0; }) +static inline int locks_mandatory_locked(struct inode *inode) +{ + return 0; +} + +static inline int locks_mandatory_area(int rw, struct inode *inode, + struct file *filp, loff_t offset, + size_t count) +{ + return 0; +} + +static inline int __mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int mandatory_lock(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_locked(struct inode *inode) +{ + return 0; +} + +static inline int locks_verify_truncate(struct inode *inode, struct file *filp, + size_t size) +{ + return 0; +} + +static inline int break_lease(struct inode *inode, unsigned int mode) +{ + return 0; +} + #endif /* CONFIG_FILE_LOCKING */ /* fs/open.c */ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); +extern int do_fallocate(struct file *file, int mode, loff_t offset, + loff_t len); extern long do_sys_open(int dfd, const char __user *filename, int flags, int mode); extern struct file *filp_open(const char *, int, int); @@ -1702,14 +1917,19 @@ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, extern int filp_close(struct file *, fl_owner_t id); extern char * getname(const char __user *); +/* fs/ioctl.c */ + +extern int ioctl_preallocate(struct file *filp, void __user *argp); + /* fs/dcache.c */ extern void __init vfs_caches_init_early(void); extern void __init vfs_caches_init(unsigned long); extern struct kmem_cache *names_cachep; -#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) -#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) +#define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp)) +#define __getname() __getname_gfp(GFP_KERNEL) +#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) #ifndef CONFIG_AUDITSYSCALL #define putname(name) __putname(name) #else @@ -1720,13 +1940,33 @@ extern void putname(const char *name); extern int register_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *); extern struct block_device *bdget(dev_t); +extern struct block_device *bdgrab(struct block_device *bdev); extern void bd_set_size(struct block_device *, loff_t size); extern void bd_forget(struct inode *inode); extern void bdput(struct block_device *); extern struct block_device *open_by_devnum(dev_t, fmode_t); +extern void invalidate_bdev(struct block_device *); +extern int sync_blockdev(struct block_device *bdev); +extern struct super_block *freeze_bdev(struct block_device *); +extern void emergency_thaw_all(void); +extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); +extern int fsync_bdev(struct block_device *); #else static inline void bd_forget(struct inode *inode) {} +static inline int sync_blockdev(struct block_device *bdev) { return 0; } +static inline void invalidate_bdev(struct block_device *bdev) {} + +static inline struct super_block *freeze_bdev(struct block_device *sb) +{ + return NULL; +} + +static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) +{ + return 0; +} #endif +extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; extern const struct file_operations bad_sock_fops; @@ -1752,12 +1992,25 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *); #define CHRDEV_MAJOR_HASH_SIZE 255 extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); extern int register_chrdev_region(dev_t, unsigned, const char *); -extern int register_chrdev(unsigned int, const char *, - const struct file_operations *); -extern void unregister_chrdev(unsigned int, const char *); +extern int __register_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name, + const struct file_operations *fops); +extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, + unsigned int count, const char *name); extern void unregister_chrdev_region(dev_t, unsigned); extern void chrdev_show(struct seq_file *,off_t); +static inline int register_chrdev(unsigned int major, const char *name, + const struct file_operations *fops) +{ + return __register_chrdev(major, 0, 256, name, fops); +} + +static inline void unregister_chrdev(unsigned int major, const char *name) +{ + __unregister_chrdev(major, 0, 256, name); +} + /* fs/block_dev.c */ #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ @@ -1806,9 +2059,6 @@ extern int __invalidate_device(struct block_device *); extern int invalidate_partition(struct gendisk *, int); #endif extern int invalidate_inodes(struct super_block *); -unsigned long __invalidate_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t end, - bool be_atomic); unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -1827,12 +2077,12 @@ static inline void invalidate_remote_inode(struct inode *inode) extern int invalidate_inode_pages2(struct address_space *mapping); extern int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); -extern void generic_sync_sb_inodes(struct super_block *sb, - struct writeback_control *wbc); extern int write_inode_now(struct inode *, int); extern int filemap_fdatawrite(struct address_space *); extern int filemap_flush(struct address_space *); extern int filemap_fdatawait(struct address_space *); +extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, + loff_t lend); extern int filemap_write_and_wait(struct address_space *mapping); extern int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend); @@ -1843,14 +2093,13 @@ extern int __filemap_fdatawrite_range(struct address_space *mapping, extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); +extern int vfs_fsync_range(struct file *file, struct dentry *dentry, + loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); +extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); extern void sync_supers(void); -extern void sync_filesystems(int wait); -extern void __fsync_super(struct super_block *sb); extern void emergency_sync(void); extern void emergency_remount(void); -extern int do_remount_sb(struct super_block *sb, int flags, - void *data, int force); #ifdef CONFIG_BLOCK extern sector_t bmap(struct inode *, sector_t); #endif @@ -1875,17 +2124,16 @@ static inline void allow_write_access(struct file *file) if (file) atomic_inc(&file->f_path.dentry->d_inode->i_writecount); } -extern int do_pipe(int *); extern int do_pipe_flags(int *, int); extern struct file *create_read_pipe(struct file *f, int flags); extern struct file *create_write_pipe(int flags); extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, - int open_flag, int mode); + int open_flag, int mode, int acc_mode); extern int may_open(struct path *, int, int); -extern int kernel_read(struct file *, unsigned long, char *, unsigned long); +extern int kernel_read(struct file *, loff_t, char *, unsigned long); extern struct file * open_exec(const char *); /* fs/dcache.c -- generic fs support functions */ @@ -1899,7 +2147,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin); extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); -extern struct inode * inode_init_always(struct super_block *, struct inode *); +extern int inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); extern void inode_add_to_lists(struct super_block *, struct inode *); extern void iput(struct inode *); @@ -1926,6 +2174,7 @@ extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); extern void destroy_inode(struct inode *); +extern void __destroy_inode(struct inode *); extern struct inode *new_inode(struct super_block *); extern int should_remove_suid(struct dentry *); extern int file_remove_suid(struct file *); @@ -1947,16 +2196,15 @@ extern int bdev_read_only(struct block_device *); extern int set_blocksize(struct block_device *, int); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); -extern int sb_has_dirty_inodes(struct super_block *); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); +extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, + loff_t *); extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); -extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *, - unsigned long, loff_t); extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, unsigned long *, loff_t, loff_t *, size_t, size_t); extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, @@ -1966,13 +2214,17 @@ extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t l extern int generic_segment_checks(const struct iovec *iov, unsigned long *nr_segs, size_t *count, int access_flags); +/* fs/block_dev.c */ +extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); + /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); +extern ssize_t default_file_splice_read(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); extern ssize_t generic_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); -extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *, - struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, loff_t *, size_t len, unsigned int flags); extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, @@ -2066,9 +2318,8 @@ extern int vfs_readdir(struct file *, filldir_t, void *); extern int vfs_stat(char __user *, struct kstat *); extern int vfs_lstat(char __user *, struct kstat *); -extern int vfs_stat_fd(int dfd, char __user *, struct kstat *); -extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); extern int vfs_fstat(unsigned int, struct kstat *); +extern int vfs_fstatat(int , char __user *, struct kstat *, int); extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg); @@ -2121,6 +2372,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count); extern ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available); +extern int simple_fsync(struct file *, struct dentry *, int); + #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, struct page *, struct page *); @@ -2135,6 +2388,7 @@ extern void file_update_time(struct file *file); extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt); extern void save_mount_options(struct super_block *sb, char *options); +extern void replace_mount_options(struct super_block *sb, char *options); static inline ino_t parent_ino(struct dentry *dentry) { @@ -2165,19 +2419,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos); int simple_transaction_release(struct inode *inode, struct file *file); -static inline void simple_transaction_set(struct file *file, size_t n) -{ - struct simple_transaction_argresp *ar = file->private_data; - - BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); - - /* - * The barrier ensures that ar->size will really remain zero until - * ar->data is ready for reading. - */ - smp_mb(); - ar->size = n; -} +void simple_transaction_set(struct file *file, size_t n); /* * simple attribute files @@ -2224,32 +2466,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, ssize_t simple_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos); - -#ifdef CONFIG_SECURITY -static inline char *alloc_secdata(void) -{ - return (char *)get_zeroed_page(GFP_KERNEL); -} - -static inline void free_secdata(void *secdata) -{ - free_page((unsigned long)secdata); -} -#else -static inline char *alloc_secdata(void) -{ - return (char *)1; -} - -static inline void free_secdata(void *secdata) -{ } -#endif /* CONFIG_SECURITY */ - struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); -int get_filesystem_list(char * buf); +int __init get_filesystem_list(char *buf); #endif /* __KERNEL__ */ #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h index 8300cab30f9..51b793466ff 100644 --- a/include/linux/fs_enet_pd.h +++ b/include/linux/fs_enet_pd.h @@ -17,6 +17,7 @@ #define FS_ENET_PD_H #include <linux/string.h> +#include <linux/of_mdio.h> #include <asm/types.h> #define FS_ENET_NAME "fs_enet" @@ -130,10 +131,7 @@ struct fs_platform_info { u32 device_flags; - int phy_addr; /* the phy address (-1 no phy) */ - char bus_id[16]; - int phy_irq; /* the phy irq (if it exists) */ - + struct device_node *phy_node; const struct fs_mii_bus_info *bus_info; int rx_ring, tx_ring; /* number of buffers on rx */ diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index a97c053d3a9..78a05bfcd8e 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -4,9 +4,10 @@ #include <linux/path.h> struct fs_struct { - atomic_t count; + int users; rwlock_t lock; int umask; + int in_exec; struct path root, pwd; }; @@ -16,6 +17,8 @@ extern void exit_fs(struct task_struct *); extern void set_fs_root(struct fs_struct *, struct path *); extern void set_fs_pwd(struct fs_struct *, struct path *); extern struct fs_struct *copy_fs_struct(struct fs_struct *); -extern void put_fs_struct(struct fs_struct *); +extern void free_fs_struct(struct fs_struct *); +extern void daemonize_fs_struct(void); +extern int unshare_fs_struct(void); #endif /* _LINUX_FS_STRUCT_H */ diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h new file mode 100644 index 00000000000..84d3532dd3e --- /dev/null +++ b/include/linux/fscache-cache.h @@ -0,0 +1,505 @@ +/* General filesystem caching backing cache interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/backend-api.txt + * + * for a description of the cache backend interface declared here. + */ + +#ifndef _LINUX_FSCACHE_CACHE_H +#define _LINUX_FSCACHE_CACHE_H + +#include <linux/fscache.h> +#include <linux/sched.h> +#include <linux/slow-work.h> + +#define NR_MAXCACHES BITS_PER_LONG + +struct fscache_cache; +struct fscache_cache_ops; +struct fscache_object; +struct fscache_operation; + +/* + * cache tag definition + */ +struct fscache_cache_tag { + struct list_head link; + struct fscache_cache *cache; /* cache referred to by this tag */ + unsigned long flags; +#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ + atomic_t usage; + char name[0]; /* tag name */ +}; + +/* + * cache definition + */ +struct fscache_cache { + const struct fscache_cache_ops *ops; + struct fscache_cache_tag *tag; /* tag representing this cache */ + struct kobject *kobj; /* system representation of this cache */ + struct list_head link; /* link in list of caches */ + size_t max_index_size; /* maximum size of index data */ + char identifier[36]; /* cache label */ + + /* node management */ + struct work_struct op_gc; /* operation garbage collector */ + struct list_head object_list; /* list of data/index objects */ + struct list_head op_gc_list; /* list of ops to be deleted */ + spinlock_t object_list_lock; + spinlock_t op_gc_list_lock; + atomic_t object_count; /* no. of live objects in this cache */ + struct fscache_object *fsdef; /* object for the fsdef index */ + unsigned long flags; +#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */ +#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */ +}; + +extern wait_queue_head_t fscache_cache_cleared_wq; + +/* + * operation to be applied to a cache object + * - retrieval initiation operations are done in the context of the process + * that issued them, and not in an async thread pool + */ +typedef void (*fscache_operation_release_t)(struct fscache_operation *op); +typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); + +struct fscache_operation { + union { + struct work_struct fast_work; /* record for fast ops */ + struct slow_work slow_work; /* record for (very) slow ops */ + }; + struct list_head pend_link; /* link in object->pending_ops */ + struct fscache_object *object; /* object to be operated upon */ + + unsigned long flags; +#define FSCACHE_OP_TYPE 0x000f /* operation type */ +#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ +#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ +#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ +#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ +#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ +#define FSCACHE_OP_DEAD 6 /* op is now dead */ + + atomic_t usage; + unsigned debug_id; /* debugging ID */ + + /* operation processor callback + * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform + * the op in a non-pool thread */ + fscache_operation_processor_t processor; + + /* operation releaser */ + fscache_operation_release_t release; +}; + +extern atomic_t fscache_op_debug_id; +extern const struct slow_work_ops fscache_op_slow_work_ops; + +extern void fscache_enqueue_operation(struct fscache_operation *); +extern void fscache_put_operation(struct fscache_operation *); + +/** + * fscache_operation_init - Do basic initialisation of an operation + * @op: The operation to initialise + * @release: The release function to assign + * + * Do basic initialisation of an operation. The caller must still set flags, + * object, either fast_work or slow_work if necessary, and processor if needed. + */ +static inline void fscache_operation_init(struct fscache_operation *op, + fscache_operation_release_t release) +{ + atomic_set(&op->usage, 1); + op->debug_id = atomic_inc_return(&fscache_op_debug_id); + op->release = release; + INIT_LIST_HEAD(&op->pend_link); +} + +/** + * fscache_operation_init_slow - Do additional initialisation of a slow op + * @op: The operation to initialise + * @processor: The processor function to assign + * + * Do additional initialisation of an operation as required for slow work. + */ +static inline +void fscache_operation_init_slow(struct fscache_operation *op, + fscache_operation_processor_t processor) +{ + op->processor = processor; + slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); +} + +/* + * data read operation + */ +struct fscache_retrieval { + struct fscache_operation op; + struct address_space *mapping; /* netfs pages */ + fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ + void *context; /* netfs read context (pinned) */ + struct list_head to_do; /* list of things to be done by the backend */ + unsigned long start_time; /* time at which retrieval started */ +}; + +typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, + struct page *page, + gfp_t gfp); + +typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op, + struct list_head *pages, + unsigned *nr_pages, + gfp_t gfp); + +/** + * fscache_get_retrieval - Get an extra reference on a retrieval operation + * @op: The retrieval operation to get a reference on + * + * Get an extra reference on a retrieval operation. + */ +static inline +struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op) +{ + atomic_inc(&op->op.usage); + return op; +} + +/** + * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing + * @op: The retrieval operation affected + * + * Enqueue a retrieval operation for processing by the FS-Cache thread pool. + */ +static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) +{ + fscache_enqueue_operation(&op->op); +} + +/** + * fscache_put_retrieval - Drop a reference to a retrieval operation + * @op: The retrieval operation affected + * + * Drop a reference to a retrieval operation. + */ +static inline void fscache_put_retrieval(struct fscache_retrieval *op) +{ + fscache_put_operation(&op->op); +} + +/* + * cached page storage work item + * - used to do three things: + * - batch writes to the cache + * - do cache writes asynchronously + * - defer writes until cache object lookup completion + */ +struct fscache_storage { + struct fscache_operation op; + pgoff_t store_limit; /* don't write more than this */ +}; + +/* + * cache operations + */ +struct fscache_cache_ops { + /* name of cache provider */ + const char *name; + + /* allocate an object record for a cookie */ + struct fscache_object *(*alloc_object)(struct fscache_cache *cache, + struct fscache_cookie *cookie); + + /* look up the object for a cookie */ + void (*lookup_object)(struct fscache_object *object); + + /* finished looking up */ + void (*lookup_complete)(struct fscache_object *object); + + /* increment the usage count on this object (may fail if unmounting) */ + struct fscache_object *(*grab_object)(struct fscache_object *object); + + /* pin an object in the cache */ + int (*pin_object)(struct fscache_object *object); + + /* unpin an object in the cache */ + void (*unpin_object)(struct fscache_object *object); + + /* store the updated auxilliary data on an object */ + void (*update_object)(struct fscache_object *object); + + /* discard the resources pinned by an object and effect retirement if + * necessary */ + void (*drop_object)(struct fscache_object *object); + + /* dispose of a reference to an object */ + void (*put_object)(struct fscache_object *object); + + /* sync a cache */ + void (*sync_cache)(struct fscache_cache *cache); + + /* notification that the attributes of a non-index object (such as + * i_size) have changed */ + int (*attr_changed)(struct fscache_object *object); + + /* reserve space for an object's data and associated metadata */ + int (*reserve_space)(struct fscache_object *object, loff_t i_size); + + /* request a backing block for a page be read or allocated in the + * cache */ + fscache_page_retrieval_func_t read_or_alloc_page; + + /* request backing blocks for a list of pages be read or allocated in + * the cache */ + fscache_pages_retrieval_func_t read_or_alloc_pages; + + /* request a backing block for a page be allocated in the cache so that + * it can be written directly */ + fscache_page_retrieval_func_t allocate_page; + + /* request backing blocks for pages be allocated in the cache so that + * they can be written directly */ + fscache_pages_retrieval_func_t allocate_pages; + + /* write a page to its backing block in the cache */ + int (*write_page)(struct fscache_storage *op, struct page *page); + + /* detach backing block from a page (optional) + * - must release the cookie lock before returning + * - may sleep + */ + void (*uncache_page)(struct fscache_object *object, + struct page *page); + + /* dissociate a cache from all the pages it was backing */ + void (*dissociate_pages)(struct fscache_cache *cache); +}; + +/* + * data file or index object cookie + * - a file will only appear in one cache + * - a request to cache a file may or may not be honoured, subject to + * constraints such as disk space + * - indices are created on disk just-in-time + */ +struct fscache_cookie { + atomic_t usage; /* number of users of this cookie */ + atomic_t n_children; /* number of children of this cookie */ + spinlock_t lock; + struct hlist_head backing_objects; /* object(s) backing this file/index */ + const struct fscache_cookie_def *def; /* definition */ + struct fscache_cookie *parent; /* parent of this entry */ + void *netfs_data; /* back pointer to netfs */ + struct radix_tree_root stores; /* pages to be stored on this cookie */ +#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ + + unsigned long flags; +#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ +#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */ +#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */ +#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ +#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ +#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ +}; + +extern struct fscache_cookie fscache_fsdef_index; + +/* + * on-disk cache file or index handle + */ +struct fscache_object { + enum fscache_object_state { + FSCACHE_OBJECT_INIT, /* object in initial unbound state */ + FSCACHE_OBJECT_LOOKING_UP, /* looking up object */ + FSCACHE_OBJECT_CREATING, /* creating object */ + + /* active states */ + FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ + FSCACHE_OBJECT_ACTIVE, /* object is usable */ + FSCACHE_OBJECT_UPDATING, /* object is updating */ + + /* terminal states */ + FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */ + FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */ + FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */ + FSCACHE_OBJECT_RELEASING, /* releasing object */ + FSCACHE_OBJECT_RECYCLING, /* retiring object */ + FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ + FSCACHE_OBJECT_DEAD, /* object is now dead */ + } state; + + int debug_id; /* debugging ID */ + int n_children; /* number of child objects */ + int n_ops; /* number of ops outstanding on object */ + int n_obj_ops; /* number of object ops outstanding on object */ + int n_in_progress; /* number of ops in progress */ + int n_exclusive; /* number of exclusive ops queued */ + spinlock_t lock; /* state and operations lock */ + + unsigned long lookup_jif; /* time at which lookup started */ + unsigned long event_mask; /* events this object is interested in */ + unsigned long events; /* events to be processed by this object + * (order is important - using fls) */ +#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */ +#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */ +#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */ +#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */ +#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ +#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ +#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ + + unsigned long flags; +#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ +#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ +#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ + + struct list_head cache_link; /* link in cache->object_list */ + struct hlist_node cookie_link; /* link in cookie->backing_objects */ + struct fscache_cache *cache; /* cache that supplied this object */ + struct fscache_cookie *cookie; /* netfs's file/index object */ + struct fscache_object *parent; /* parent object */ + struct slow_work work; /* attention scheduling record */ + struct list_head dependents; /* FIFO of dependent objects */ + struct list_head dep_link; /* link in parent's dependents list */ + struct list_head pending_ops; /* unstarted operations on this object */ + pgoff_t store_limit; /* current storage limit */ +}; + +extern const char *fscache_object_states[]; + +#define fscache_object_is_active(obj) \ + (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ + (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ + (obj)->state < FSCACHE_OBJECT_DYING) + +extern const struct slow_work_ops fscache_object_slow_work_ops; + +/** + * fscache_object_init - Initialise a cache object description + * @object: Object description + * + * Initialise a cache object description to its basic values. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_object_init(struct fscache_object *object, + struct fscache_cookie *cookie, + struct fscache_cache *cache) +{ + atomic_inc(&cache->object_count); + + object->state = FSCACHE_OBJECT_INIT; + spin_lock_init(&object->lock); + INIT_LIST_HEAD(&object->cache_link); + INIT_HLIST_NODE(&object->cookie_link); + vslow_work_init(&object->work, &fscache_object_slow_work_ops); + INIT_LIST_HEAD(&object->dependents); + INIT_LIST_HEAD(&object->dep_link); + INIT_LIST_HEAD(&object->pending_ops); + object->n_children = 0; + object->n_ops = object->n_in_progress = object->n_exclusive = 0; + object->events = object->event_mask = 0; + object->flags = 0; + object->store_limit = 0; + object->cache = cache; + object->cookie = cookie; + object->parent = NULL; +} + +extern void fscache_object_lookup_negative(struct fscache_object *object); +extern void fscache_obtained_object(struct fscache_object *object); + +/** + * fscache_object_destroyed - Note destruction of an object in a cache + * @cache: The cache from which the object came + * + * Note the destruction and deallocation of an object record in a cache. + */ +static inline void fscache_object_destroyed(struct fscache_cache *cache) +{ + if (atomic_dec_and_test(&cache->object_count)) + wake_up_all(&fscache_cache_cleared_wq); +} + +/** + * fscache_object_lookup_error - Note an object encountered an error + * @object: The object on which the error was encountered + * + * Note that an object encountered a fatal error (usually an I/O error) and + * that it should be withdrawn as soon as possible. + */ +static inline void fscache_object_lookup_error(struct fscache_object *object) +{ + set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events); +} + +/** + * fscache_set_store_limit - Set the maximum size to be stored in an object + * @object: The object to set the maximum on + * @i_size: The limit to set in bytes + * + * Set the maximum size an object is permitted to reach, implying the highest + * byte that may be written. Intended to be called by the attr_changed() op. + * + * See Documentation/filesystems/caching/backend-api.txt for a complete + * description. + */ +static inline +void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) +{ + object->store_limit = i_size >> PAGE_SHIFT; + if (i_size & ~PAGE_MASK) + object->store_limit++; +} + +/** + * fscache_end_io - End a retrieval operation on a page + * @op: The FS-Cache operation covering the retrieval + * @page: The page that was to be fetched + * @error: The error code (0 if successful) + * + * Note the end of an operation to retrieve a page, as covered by a particular + * operation record. + */ +static inline void fscache_end_io(struct fscache_retrieval *op, + struct page *page, int error) +{ + op->end_io_func(page, op->context, error); +} + +/* + * out-of-line cache backend functions + */ +extern void fscache_init_cache(struct fscache_cache *cache, + const struct fscache_cache_ops *ops, + const char *idfmt, + ...) __attribute__ ((format (printf, 3, 4))); + +extern int fscache_add_cache(struct fscache_cache *cache, + struct fscache_object *fsdef, + const char *tagname); +extern void fscache_withdraw_cache(struct fscache_cache *cache); + +extern void fscache_io_error(struct fscache_cache *cache); + +extern void fscache_mark_pages_cached(struct fscache_retrieval *op, + struct pagevec *pagevec); + +extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, + const void *data, + uint16_t datalen); + +#endif /* _LINUX_FSCACHE_CACHE_H */ diff --git a/include/linux/fscache.h b/include/linux/fscache.h new file mode 100644 index 00000000000..6d8ee466e0a --- /dev/null +++ b/include/linux/fscache.h @@ -0,0 +1,618 @@ +/* General filesystem caching interface + * + * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * NOTE!!! See: + * + * Documentation/filesystems/caching/netfs-api.txt + * + * for a description of the network filesystem interface declared here. + */ + +#ifndef _LINUX_FSCACHE_H +#define _LINUX_FSCACHE_H + +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/pagemap.h> +#include <linux/pagevec.h> + +#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) +#define fscache_available() (1) +#define fscache_cookie_valid(cookie) (cookie) +#else +#define fscache_available() (0) +#define fscache_cookie_valid(cookie) (0) +#endif + + +/* + * overload PG_private_2 to give us PG_fscache - this is used to indicate that + * a page is currently backed by a local disk cache + */ +#define PageFsCache(page) PagePrivate2((page)) +#define SetPageFsCache(page) SetPagePrivate2((page)) +#define ClearPageFsCache(page) ClearPagePrivate2((page)) +#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) +#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) + +/* pattern used to fill dead space in an index entry */ +#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 + +struct pagevec; +struct fscache_cache_tag; +struct fscache_cookie; +struct fscache_netfs; + +typedef void (*fscache_rw_complete_t)(struct page *page, + void *context, + int error); + +/* result of index entry consultation */ +enum fscache_checkaux { + FSCACHE_CHECKAUX_OKAY, /* entry okay as is */ + FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */ + FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */ +}; + +/* + * fscache cookie definition + */ +struct fscache_cookie_def { + /* name of cookie type */ + char name[16]; + + /* cookie type */ + uint8_t type; +#define FSCACHE_COOKIE_TYPE_INDEX 0 +#define FSCACHE_COOKIE_TYPE_DATAFILE 1 + + /* select the cache into which to insert an entry in this index + * - optional + * - should return a cache identifier or NULL to cause the cache to be + * inherited from the parent if possible or the first cache picked + * for a non-index file if not + */ + struct fscache_cache_tag *(*select_cache)( + const void *parent_netfs_data, + const void *cookie_netfs_data); + + /* get an index key + * - should store the key data in the buffer + * - should return the amount of amount stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_key)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + + /* get certain file attributes from the netfs data + * - this function can be absent for an index + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); + + /* get the auxilliary data from netfs data + * - this function can be absent if the index carries no state data + * - should store the auxilliary data in the buffer + * - should return the amount of amount stored + * - not permitted to return an error + * - the netfs data from the cookie being used as the source is + * presented + */ + uint16_t (*get_aux)(const void *cookie_netfs_data, + void *buffer, + uint16_t bufmax); + + /* consult the netfs about the state of an object + * - this function can be absent if the index carries no state data + * - the netfs data from the cookie being used as the target is + * presented, as is the auxilliary data + */ + enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, + const void *data, + uint16_t datalen); + + /* get an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*get_context)(void *cookie_netfs_data, void *context); + + /* release an extra reference on a read context + * - this function can be absent if the completion function doesn't + * require a context + */ + void (*put_context)(void *cookie_netfs_data, void *context); + + /* indicate pages that now have cache metadata retained + * - this function should mark the specified pages as now being cached + * - the pages will have been marked with PG_fscache before this is + * called, so this is optional + */ + void (*mark_pages_cached)(void *cookie_netfs_data, + struct address_space *mapping, + struct pagevec *cached_pvec); + + /* indicate the cookie is no longer cached + * - this function is called when the backing store currently caching + * a cookie is removed + * - the netfs should use this to clean up any markers indicating + * cached pages + * - this is mandatory for any object that may have data + */ + void (*now_uncached)(void *cookie_netfs_data); +}; + +/* + * fscache cached network filesystem type + * - name, version and ops must be filled in before registration + * - all other fields will be set during registration + */ +struct fscache_netfs { + uint32_t version; /* indexing version */ + const char *name; /* filesystem name */ + struct fscache_cookie *primary_index; + struct list_head link; /* internal link */ +}; + +/* + * slow-path functions for when there is actually caching available, and the + * netfs does actually have a valid token + * - these are not to be called directly + * - these are undefined symbols when FS-Cache is not configured and the + * optimiser takes care of not using them + */ +extern int __fscache_register_netfs(struct fscache_netfs *); +extern void __fscache_unregister_netfs(struct fscache_netfs *); +extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *); +extern void __fscache_release_cache_tag(struct fscache_cache_tag *); + +extern struct fscache_cookie *__fscache_acquire_cookie( + struct fscache_cookie *, + const struct fscache_cookie_def *, + void *); +extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); +extern void __fscache_update_cookie(struct fscache_cookie *); +extern int __fscache_attr_changed(struct fscache_cookie *); +extern int __fscache_read_or_alloc_page(struct fscache_cookie *, + struct page *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, + struct address_space *, + struct list_head *, + unsigned *, + fscache_rw_complete_t, + void *, + gfp_t); +extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); +extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); +extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); +extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); +extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); + +/** + * fscache_register_netfs - Register a filesystem as desiring caching services + * @netfs: The description of the filesystem + * + * Register a filesystem as desiring caching services if they're available. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_register_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + return __fscache_register_netfs(netfs); + else + return 0; +} + +/** + * fscache_unregister_netfs - Indicate that a filesystem no longer desires + * caching services + * @netfs: The description of the filesystem + * + * Indicate that a filesystem no longer desires caching services for the + * moment. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unregister_netfs(struct fscache_netfs *netfs) +{ + if (fscache_available()) + __fscache_unregister_netfs(netfs); +} + +/** + * fscache_lookup_cache_tag - Look up a cache tag + * @name: The name of the tag to search for + * + * Acquire a specific cache referral tag that can be used to select a specific + * cache in which to cache an index. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) +{ + if (fscache_available()) + return __fscache_lookup_cache_tag(name); + else + return NULL; +} + +/** + * fscache_release_cache_tag - Release a cache tag + * @tag: The tag to release + * + * Release a reference to a cache referral tag previously looked up. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_release_cache_tag(struct fscache_cache_tag *tag) +{ + if (fscache_available()) + __fscache_release_cache_tag(tag); +} + +/** + * fscache_acquire_cookie - Acquire a cookie to represent a cache object + * @parent: The cookie that's to be the parent of this one + * @def: A description of the cache object, including callback operations + * @netfs_data: An arbitrary piece of data to be kept in the cookie to + * represent the cache object to the netfs + * + * This function is used to inform FS-Cache about part of an index hierarchy + * that can be used to locate files. This is done by requesting a cookie for + * each index in the path to the file. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +struct fscache_cookie *fscache_acquire_cookie( + struct fscache_cookie *parent, + const struct fscache_cookie_def *def, + void *netfs_data) +{ + if (fscache_cookie_valid(parent)) + return __fscache_acquire_cookie(parent, def, netfs_data); + else + return NULL; +} + +/** + * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding + * it + * @cookie: The cookie being returned + * @retire: True if the cache object the cookie represents is to be discarded + * + * This function returns a cookie to the cache, forcibly discarding the + * associated cache object if retire is set to true. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) +{ + if (fscache_cookie_valid(cookie)) + __fscache_relinquish_cookie(cookie, retire); +} + +/** + * fscache_update_cookie - Request that a cache object be updated + * @cookie: The cookie representing the cache object + * + * Request an update of the index data for the cache object associated with the + * cookie. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_update_cookie(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + __fscache_update_cookie(cookie); +} + +/** + * fscache_pin_cookie - Pin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be pinned in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_pin_cookie(struct fscache_cookie *cookie) +{ + return -ENOBUFS; +} + +/** + * fscache_pin_cookie - Unpin a data-storage cache object in its cache + * @cookie: The cookie representing the cache object + * + * Permit data-storage cache objects to be unpinned from the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_unpin_cookie(struct fscache_cookie *cookie) +{ +} + +/** + * fscache_attr_changed - Notify cache that an object's attributes changed + * @cookie: The cookie representing the cache object + * + * Send a notification to the cache indicating that an object's attributes have + * changed. This includes the data size. These attributes will be obtained + * through the get_attr() cookie definition op. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_attr_changed(struct fscache_cookie *cookie) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_attr_changed(cookie); + else + return -ENOBUFS; +} + +/** + * fscache_reserve_space - Reserve data space for a cached object + * @cookie: The cookie representing the cache object + * @i_size: The amount of space to be reserved + * + * Reserve an amount of space in the cache for the cache object attached to a + * cookie so that a write to that object within the space can always be + * honoured. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) +{ + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_page - Read a page from the cache or allocate a block + * in which to store it + * @cookie: The cookie representing the cache object + * @page: The netfs page to fill if possible + * @end_io_func: The callback to invoke when and if the page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a page from the cache, or if that's not possible make a potential + * one-block reservation in the cache into which the page may be stored once + * fetched from the server. + * + * If the page is not backed by the cache object, or if it there's some reason + * it can't be, -ENOBUFS will be returned and nothing more will be done for + * that page. + * + * Else, if that page is backed by the cache, a read will be initiated directly + * to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if the page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_page(struct fscache_cookie *cookie, + struct page *page, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_read_or_alloc_page(cookie, page, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate + * blocks in which to store them + * @cookie: The cookie representing the cache object + * @mapping: The netfs inode mapping to which the pages will be attached + * @pages: A list of potential netfs pages to be filled + * @end_io_func: The callback to invoke when and if each page is filled + * @context: An arbitrary piece of data to pass on to end_io_func() + * @gfp: The conditions under which memory allocation should be made + * + * Read a set of pages from the cache, or if that's not possible, attempt to + * make a potential one-block reservation for each page in the cache into which + * that page may be stored once fetched from the server. + * + * If some pages are not backed by the cache object, or if it there's some + * reason they can't be, -ENOBUFS will be returned and nothing more will be + * done for that pages. + * + * Else, if some of the pages are backed by the cache, a read will be initiated + * directly to the netfs's page and 0 will be returned by this function. The + * end_io_func() callback will be invoked when the operation terminates on a + * completion or failure. Note that the callback may be invoked before the + * return. + * + * Else, if a page is unbacked, -ENODATA is returned and a block may have + * been allocated in the cache. + * + * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in + * regard to different pages, the return values are prioritised in that order. + * Any pages submitted for reading are removed from the pages list. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages, + fscache_rw_complete_t end_io_func, + void *context, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_read_or_alloc_pages(cookie, mapping, pages, + nr_pages, end_io_func, + context, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_alloc_page - Allocate a block in which to store a page + * @cookie: The cookie representing the cache object + * @page: The netfs page to allocate a page for + * @gfp: The conditions under which memory allocation should be made + * + * Request Allocation a block in the cache in which to store a netfs page + * without retrieving any contents from the cache. + * + * If the page is not backed by a file then -ENOBUFS will be returned and + * nothing more will be done, and no reservation will be made. + * + * Else, a block will be allocated if one wasn't already, and 0 will be + * returned + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_alloc_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_alloc_page(cookie, page, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_write_page - Request storage of a page in the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page to store + * @gfp: The conditions under which memory allocation should be made + * + * Request the contents of the netfs page be written into the cache. This + * request may be ignored if no cache block is currently allocated, in which + * case it will return -ENOBUFS. + * + * If a cache block was already allocated, a write will be initiated and 0 will + * be returned. The PG_fscache_write page bit is set immediately and will then + * be cleared at the completion of the write to indicate the success or failure + * of the operation. Note that the completion may happen before the return. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +int fscache_write_page(struct fscache_cookie *cookie, + struct page *page, + gfp_t gfp) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_write_page(cookie, page, gfp); + else + return -ENOBUFS; +} + +/** + * fscache_uncache_page - Indicate that caching is no longer required on a page + * @cookie: The cookie representing the cache object + * @page: The netfs page that was being cached. + * + * Tell the cache that we no longer want a page to be cached and that it should + * remove any knowledge of the netfs page it may have. + * + * Note that this cannot cancel any outstanding I/O operations between this + * page and the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_uncache_page(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_uncache_page(cookie, page); +} + +/** + * fscache_check_page_write - Ask if a page is being writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache if a page is being written to the cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +bool fscache_check_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + return __fscache_check_page_write(cookie, page); + return false; +} + +/** + * fscache_wait_on_page_write - Wait for a page to complete writing to the cache + * @cookie: The cookie representing the cache object + * @page: The netfs page that is being cached. + * + * Ask the cache to wake us up when a page is no longer being written to the + * cache. + * + * See Documentation/filesystems/caching/netfs-api.txt for a complete + * description. + */ +static inline +void fscache_wait_on_page_write(struct fscache_cookie *cookie, + struct page *page) +{ + if (fscache_cookie_valid(cookie)) + __fscache_wait_on_page_write(cookie, page); +} + +#endif /* _LINUX_FSCACHE_H */ diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index d9051d717d2..43fc95d822d 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h @@ -18,7 +18,6 @@ #define _FSL_DEVICE_H_ #include <linux/types.h> -#include <linux/phy.h> /* * Some conventions on how we handle peripherals on Freescale chips @@ -44,31 +43,6 @@ * */ -struct gianfar_platform_data { - /* device specific information */ - u32 device_flags; - char bus_id[BUS_ID_SIZE]; - phy_interface_t interface; -}; - -struct gianfar_mdio_data { - /* board specific information */ - int irq[32]; -}; - -/* Flags in gianfar_platform_data */ -#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */ -#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */ - -struct fsl_i2c_platform_data { - /* device specific information */ - u32 device_flags; -}; - -/* Flags related to I2C device features */ -#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001 -#define FSL_I2C_DEV_CLOCK_5200 0x00000002 - enum fsl_usb2_operating_modes { FSL_USB2_MPH_HOST, FSL_USB2_DR_HOST, @@ -95,14 +69,15 @@ struct fsl_usb2_platform_data { #define FSL_USB2_PORT0_ENABLED 0x00000001 #define FSL_USB2_PORT1_ENABLED 0x00000002 +struct spi_device; + struct fsl_spi_platform_data { u32 initial_spmode; /* initial SPMODE value */ - u16 bus_num; + s16 bus_num; bool qe_mode; /* board specific information */ u16 max_chipselect; - void (*activate_cs)(u8 cs, u8 polarity); - void (*deactivate_cs)(u8 cs, u8 polarity); + void (*cs_control)(struct spi_device *spi, bool on); u32 sysclk; }; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 00fbd5b245c..936f9aa8bb9 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -13,6 +13,7 @@ #include <linux/dnotify.h> #include <linux/inotify.h> +#include <linux/fsnotify_backend.h> #include <linux/audit.h> /* @@ -22,19 +23,45 @@ static inline void fsnotify_d_instantiate(struct dentry *entry, struct inode *inode) { + __fsnotify_d_instantiate(entry, inode); + inotify_d_instantiate(entry, inode); } +/* Notify this dentry's parent about a child's events. */ +static inline void fsnotify_parent(struct dentry *dentry, __u32 mask) +{ + __fsnotify_parent(dentry, mask); + + inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); +} + /* * fsnotify_d_move - entry has been moved * Called with dcache_lock and entry->d_lock held. */ static inline void fsnotify_d_move(struct dentry *entry) { + /* + * On move we need to update entry->d_flags to indicate if the new parent + * cares about events from this entry. + */ + __fsnotify_update_dcache_flags(entry); + inotify_d_move(entry); } /* + * fsnotify_link_count - inode's link count changed + */ +static inline void fsnotify_link_count(struct inode *inode) +{ + inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL); + + fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); +} + +/* * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir */ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, @@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, int isdir, struct inode *target, struct dentry *moved) { struct inode *source = moved->d_inode; - u32 cookie = inotify_get_cookie(); + u32 in_cookie = inotify_get_cookie(); + u32 fs_cookie = fsnotify_get_cookie(); + __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); + __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); if (old_dir == new_dir) - inode_dir_notify(old_dir, DN_RENAME); - else { - inode_dir_notify(old_dir, DN_DELETE); - inode_dir_notify(new_dir, DN_CREATE); - } + old_dir_mask |= FS_DN_RENAME; - if (isdir) + if (isdir) { isdir = IN_ISDIR; - inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name, + old_dir_mask |= FS_IN_ISDIR; + new_dir_mask |= FS_IN_ISDIR; + } + + inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name, source); - inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name, + inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name, source); + fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); + fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); + if (target) { inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); inotify_inode_is_dead(target); + + /* this is really a link_count change not a removal */ + fsnotify_link_count(target); } if (source) { inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); + fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); } audit_inode_child(new_name, moved, new_dir); } /* + * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed + */ +static inline void fsnotify_inode_delete(struct inode *inode) +{ + __fsnotify_inode_delete(inode); +} + +/* * fsnotify_nameremove - a filename was removed from a directory */ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) { + __u32 mask = FS_DELETE; + if (isdir) - isdir = IN_ISDIR; - dnotify_parent(dentry, DN_DELETE); - inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name); + mask |= FS_IN_ISDIR; + + fsnotify_parent(dentry, mask); } /* @@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode) { inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); inotify_inode_is_dead(inode); -} -/* - * fsnotify_link_count - inode's link count changed - */ -static inline void fsnotify_link_count(struct inode *inode) -{ - inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL); + fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + __fsnotify_inode_delete(inode); } /* @@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode) */ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) { - inode_dir_notify(inode, DN_CREATE); inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, dentry->d_inode); audit_inode_child(dentry->d_name.name, dentry, inode); + + fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); } /* @@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) */ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) { - inode_dir_notify(dir, DN_CREATE); inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, inode); fsnotify_link_count(inode); audit_inode_child(new_dentry->d_name.name, new_dentry, dir); + + fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0); } /* @@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct */ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) { - inode_dir_notify(inode, DN_CREATE); - inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, - dentry->d_name.name, dentry->d_inode); + __u32 mask = (FS_CREATE | FS_IN_ISDIR); + struct inode *d_inode = dentry->d_inode; + + inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode); audit_inode_child(dentry->d_name.name, dentry, inode); + + fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); } /* @@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) static inline void fsnotify_access(struct dentry *dentry) { struct inode *inode = dentry->d_inode; - u32 mask = IN_ACCESS; + __u32 mask = FS_ACCESS; if (S_ISDIR(inode->i_mode)) - mask |= IN_ISDIR; + mask |= FS_IN_ISDIR; - dnotify_parent(dentry, DN_ACCESS); - inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); inotify_inode_queue_event(inode, mask, 0, NULL, NULL); + + fsnotify_parent(dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* @@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry) static inline void fsnotify_modify(struct dentry *dentry) { struct inode *inode = dentry->d_inode; - u32 mask = IN_MODIFY; + __u32 mask = FS_MODIFY; if (S_ISDIR(inode->i_mode)) - mask |= IN_ISDIR; + mask |= FS_IN_ISDIR; - dnotify_parent(dentry, DN_MODIFY); - inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); inotify_inode_queue_event(inode, mask, 0, NULL, NULL); + + fsnotify_parent(dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* @@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry) static inline void fsnotify_open(struct dentry *dentry) { struct inode *inode = dentry->d_inode; - u32 mask = IN_OPEN; + __u32 mask = FS_OPEN; if (S_ISDIR(inode->i_mode)) - mask |= IN_ISDIR; + mask |= FS_IN_ISDIR; - inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); inotify_inode_queue_event(inode, mask, 0, NULL, NULL); + + fsnotify_parent(dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* @@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; - const char *name = dentry->d_name.name; fmode_t mode = file->f_mode; - u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE; + __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; if (S_ISDIR(inode->i_mode)) - mask |= IN_ISDIR; + mask |= FS_IN_ISDIR; - inotify_dentry_parent_queue_event(dentry, mask, 0, name); inotify_inode_queue_event(inode, mask, 0, NULL, NULL); + + fsnotify_parent(dentry, mask); + fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); } /* @@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file) static inline void fsnotify_xattr(struct dentry *dentry) { struct inode *inode = dentry->d_inode; - u32 mask = IN_ATTRIB; + __u32 mask = FS_ATTRIB; if (S_ISDIR(inode->i_mode)) - mask |= IN_ISDIR; + mask |= FS_IN_ISDIR; - inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); inotify_inode_queue_event(inode, mask, 0, NULL, NULL); + + fsnotify_parent(dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } /* @@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry) static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) { struct inode *inode = dentry->d_inode; - int dn_mask = 0; - u32 in_mask = 0; + __u32 mask = 0; + + if (ia_valid & ATTR_UID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_GID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_SIZE) + mask |= FS_MODIFY; - if (ia_valid & ATTR_UID) { - in_mask |= IN_ATTRIB; - dn_mask |= DN_ATTRIB; - } - if (ia_valid & ATTR_GID) { - in_mask |= IN_ATTRIB; - dn_mask |= DN_ATTRIB; - } - if (ia_valid & ATTR_SIZE) { - in_mask |= IN_MODIFY; - dn_mask |= DN_MODIFY; - } /* both times implies a utime(s) call */ if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) - { - in_mask |= IN_ATTRIB; - dn_mask |= DN_ATTRIB; - } else if (ia_valid & ATTR_ATIME) { - in_mask |= IN_ACCESS; - dn_mask |= DN_ACCESS; - } else if (ia_valid & ATTR_MTIME) { - in_mask |= IN_MODIFY; - dn_mask |= DN_MODIFY; - } - if (ia_valid & ATTR_MODE) { - in_mask |= IN_ATTRIB; - dn_mask |= DN_ATTRIB; - } + mask |= FS_ATTRIB; + else if (ia_valid & ATTR_ATIME) + mask |= FS_ACCESS; + else if (ia_valid & ATTR_MTIME) + mask |= FS_MODIFY; + + if (ia_valid & ATTR_MODE) + mask |= FS_ATTRIB; - if (dn_mask) - dnotify_parent(dentry, dn_mask); - if (in_mask) { + if (mask) { if (S_ISDIR(inode->i_mode)) - in_mask |= IN_ISDIR; - inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL); - inotify_dentry_parent_queue_event(dentry, in_mask, 0, - dentry->d_name.name); + mask |= FS_IN_ISDIR; + inotify_inode_queue_event(inode, mask, 0, NULL, NULL); + + fsnotify_parent(dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); } } -#ifdef CONFIG_INOTIFY /* inotify helpers */ +#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */ /* * fsnotify_oldname_init - save off the old filename before we change it @@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name) kfree(old_name); } -#else /* CONFIG_INOTIFY */ +#else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */ static inline const char *fsnotify_oldname_init(const char *name) { diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h new file mode 100644 index 00000000000..4d6f47b5118 --- /dev/null +++ b/include/linux/fsnotify_backend.h @@ -0,0 +1,387 @@ +/* + * Filesystem access notification for Linux + * + * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> + */ + +#ifndef __LINUX_FSNOTIFY_BACKEND_H +#define __LINUX_FSNOTIFY_BACKEND_H + +#ifdef __KERNEL__ + +#include <linux/idr.h> /* inotify uses this */ +#include <linux/fs.h> /* struct inode */ +#include <linux/list.h> +#include <linux/path.h> /* struct path */ +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <asm/atomic.h> + +/* + * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily + * convert between them. dnotify only needs conversion at watch creation + * so no perf loss there. fanotify isn't defined yet, so it can use the + * wholes if it needs more events. + */ +#define FS_ACCESS 0x00000001 /* File was accessed */ +#define FS_MODIFY 0x00000002 /* File was modified */ +#define FS_ATTRIB 0x00000004 /* Metadata changed */ +#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ +#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ +#define FS_OPEN 0x00000020 /* File was opened */ +#define FS_MOVED_FROM 0x00000040 /* File was moved from X */ +#define FS_MOVED_TO 0x00000080 /* File was moved to Y */ +#define FS_CREATE 0x00000100 /* Subfile was created */ +#define FS_DELETE 0x00000200 /* Subfile was deleted */ +#define FS_DELETE_SELF 0x00000400 /* Self was deleted */ +#define FS_MOVE_SELF 0x00000800 /* Self was moved */ + +#define FS_UNMOUNT 0x00002000 /* inode on umount fs */ +#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ +#define FS_IN_IGNORED 0x00008000 /* last inotify event here */ + +#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */ +#define FS_IN_ONESHOT 0x80000000 /* only send event once */ + +#define FS_DN_RENAME 0x10000000 /* file renamed */ +#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ + +/* This inode cares about things that happen to its children. Always set for + * dnotify and inotify. */ +#define FS_EVENT_ON_CHILD 0x08000000 + +/* This is a list of all events that may get sent to a parernt based on fs event + * happening to inodes inside that directory */ +#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ + FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ + FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ + FS_DELETE) + +/* listeners that hard code group numbers near the top */ +#define DNOTIFY_GROUP_NUM UINT_MAX +#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1) + +struct fsnotify_group; +struct fsnotify_event; +struct fsnotify_mark_entry; +struct fsnotify_event_private_data; + +/* + * Each group much define these ops. The fsnotify infrastructure will call + * these operations for each relevant group. + * + * should_send_event - given a group, inode, and mask this function determines + * if the group is interested in this event. + * handle_event - main call for a group to handle an fs event + * free_group_priv - called when a group refcnt hits 0 to clean up the private union + * freeing-mark - this means that a mark has been flagged to die when everything + * finishes using it. The function is supplied with what must be a + * valid group and inode to use to clean up. + */ +struct fsnotify_ops { + bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask); + int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event); + void (*free_group_priv)(struct fsnotify_group *group); + void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group); + void (*free_event_priv)(struct fsnotify_event_private_data *priv); +}; + +/* + * A group is a "thing" that wants to receive notification about filesystem + * events. The mask holds the subset of event types this group cares about. + * refcnt on a group is up to the implementor and at any moment if it goes 0 + * everything will be cleaned up. + */ +struct fsnotify_group { + /* + * global list of all groups receiving events from fsnotify. + * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex + * or fsnotify_grp_srcu depending on write vs read. + */ + struct list_head group_list; + + /* + * Defines all of the event types in which this group is interested. + * This mask is a bitwise OR of the FS_* events from above. Each time + * this mask changes for a group (if it changes) the correct functions + * must be called to update the global structures which indicate global + * interest in event types. + */ + __u32 mask; + + /* + * How the refcnt is used is up to each group. When the refcnt hits 0 + * fsnotify will clean up all of the resources associated with this group. + * As an example, the dnotify group will always have a refcnt=1 and that + * will never change. Inotify, on the other hand, has a group per + * inotify_init() and the refcnt will hit 0 only when that fd has been + * closed. + */ + atomic_t refcnt; /* things with interest in this group */ + unsigned int group_num; /* simply prevents accidental group collision */ + + const struct fsnotify_ops *ops; /* how this group handles things */ + + /* needed to send notification to userspace */ + struct mutex notification_mutex; /* protect the notification_list */ + struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ + wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ + unsigned int q_len; /* events on the queue */ + unsigned int max_events; /* maximum events allowed on the list */ + + /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */ + spinlock_t mark_lock; /* protect mark_entries list */ + atomic_t num_marks; /* 1 for each mark entry and 1 for not being + * past the point of no return when freeing + * a group */ + struct list_head mark_entries; /* all inode mark entries for this group */ + + /* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */ + bool on_group_list; + + /* groups can define private fields here or use the void *private */ + union { + void *private; +#ifdef CONFIG_INOTIFY_USER + struct inotify_group_private_data { + spinlock_t idr_lock; + struct idr idr; + u32 last_wd; + struct fasync_struct *fa; /* async notification */ + struct user_struct *user; + } inotify_data; +#endif + }; +}; + +/* + * A single event can be queued in multiple group->notification_lists. + * + * each group->notification_list will point to an event_holder which in turns points + * to the actual event that needs to be sent to userspace. + * + * Seemed cheaper to create a refcnt'd event and a small holder for every group + * than create a different event for every group + * + */ +struct fsnotify_event_holder { + struct fsnotify_event *event; + struct list_head event_list; +}; + +/* + * Inotify needs to tack data onto an event. This struct lets us later find the + * correct private data of the correct group. + */ +struct fsnotify_event_private_data { + struct fsnotify_group *group; + struct list_head event_list; +}; + +/* + * all of the information about the original object we want to now send to + * a group. If you want to carry more info from the accessing task to the + * listener this structure is where you need to be adding fields. + */ +struct fsnotify_event { + /* + * If we create an event we are also likely going to need a holder + * to link to a group. So embed one holder in the event. Means only + * one allocation for the common case where we only have one group + */ + struct fsnotify_event_holder holder; + spinlock_t lock; /* protection for the associated event_holder and private_list */ + /* to_tell may ONLY be dereferenced during handle_event(). */ + struct inode *to_tell; /* either the inode the event happened to or its parent */ + /* + * depending on the event type we should have either a path or inode + * We hold a reference on path, but NOT on inode. Since we have the ref on + * the path, it may be dereferenced at any point during this object's + * lifetime. That reference is dropped when this object's refcnt hits + * 0. If this event contains an inode instead of a path, the inode may + * ONLY be used during handle_event(). + */ + union { + struct path path; + struct inode *inode; + }; +/* when calling fsnotify tell it if the data is a path or inode */ +#define FSNOTIFY_EVENT_NONE 0 +#define FSNOTIFY_EVENT_PATH 1 +#define FSNOTIFY_EVENT_INODE 2 +#define FSNOTIFY_EVENT_FILE 3 + int data_type; /* which of the above union we have */ + atomic_t refcnt; /* how many groups still are using/need to send this event */ + __u32 mask; /* the type of access, bitwise OR for FS_* event types */ + + u32 sync_cookie; /* used to corrolate events, namely inotify mv events */ + char *file_name; + size_t name_len; + + struct list_head private_data_list; /* groups can store private data here */ +}; + +/* + * a mark is simply an entry attached to an in core inode which allows an + * fsnotify listener to indicate they are either no longer interested in events + * of a type matching mask or only interested in those events. + * + * these are flushed when an inode is evicted from core and may be flushed + * when the inode is modified (as seen by fsnotify_access). Some fsnotify users + * (such as dnotify) will flush these when the open fd is closed and not at + * inode eviction or modification. + */ +struct fsnotify_mark_entry { + __u32 mask; /* mask this mark entry is for */ + /* we hold ref for each i_list and g_list. also one ref for each 'thing' + * in kernel that found and may be using this mark. */ + atomic_t refcnt; /* active things looking at this mark */ + struct inode *inode; /* inode this entry is associated with */ + struct fsnotify_group *group; /* group this mark entry is for */ + struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */ + struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */ + spinlock_t lock; /* protect group, inode, and killme */ + struct list_head free_i_list; /* tmp list used when freeing this mark */ + struct list_head free_g_list; /* tmp list used when freeing this mark */ + void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */ +}; + +#ifdef CONFIG_FSNOTIFY + +/* called from the vfs helpers */ + +/* main fsnotify call to send events */ +extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, + const char *name, u32 cookie); +extern void __fsnotify_parent(struct dentry *dentry, __u32 mask); +extern void __fsnotify_inode_delete(struct inode *inode); +extern u32 fsnotify_get_cookie(void); + +static inline int fsnotify_inode_watches_children(struct inode *inode) +{ + /* FS_EVENT_ON_CHILD is set if the inode may care */ + if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) + return 0; + /* this inode might care about child events, does it care about the + * specific set of events that can happen on a child? */ + return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; +} + +/* + * Update the dentry with a flag indicating the interest of its parent to receive + * filesystem events when those events happens to this dentry->d_inode. + */ +static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) +{ + struct dentry *parent; + + assert_spin_locked(&dcache_lock); + assert_spin_locked(&dentry->d_lock); + + parent = dentry->d_parent; + if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) + dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; + else + dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; +} + +/* + * fsnotify_d_instantiate - instantiate a dentry for inode + * Called with dcache_lock held. + */ +static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) +{ + if (!inode) + return; + + assert_spin_locked(&dcache_lock); + + spin_lock(&dentry->d_lock); + __fsnotify_update_dcache_flags(dentry); + spin_unlock(&dentry->d_lock); +} + +/* called from fsnotify listeners, such as fanotify or dnotify */ + +/* must call when a group changes its ->mask */ +extern void fsnotify_recalc_global_mask(void); +/* get a reference to an existing or create a new group */ +extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, + __u32 mask, + const struct fsnotify_ops *ops); +/* run all marks associated with this group and update group->mask */ +extern void fsnotify_recalc_group_mask(struct fsnotify_group *group); +/* drop reference on a group from fsnotify_obtain_group */ +extern void fsnotify_put_group(struct fsnotify_group *group); + +/* take a reference to an event */ +extern void fsnotify_get_event(struct fsnotify_event *event); +extern void fsnotify_put_event(struct fsnotify_event *event); +/* find private data previously attached to an event and unlink it */ +extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, + struct fsnotify_event *event); + +/* attach the event to the group notification queue */ +extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, + struct fsnotify_event_private_data *priv); +/* true if the group notification queue is empty */ +extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); +/* return, but do not dequeue the first event on the notification queue */ +extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group); +/* return AND dequeue the first event on the notification queue */ +extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group); + +/* functions used to manipulate the marks attached to inodes */ + +/* run all marks associated with an inode and update inode->i_fsnotify_mask */ +extern void fsnotify_recalc_inode_mask(struct inode *inode); +extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry)); +/* find (and take a reference) to a mark associated with group and inode */ +extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode); +/* attach the mark to both the group and the inode */ +extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode); +/* given a mark, flag it to be freed when all references are dropped */ +extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry); +/* run all the marks in a group, and flag them to be freed */ +extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); +extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry); +extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry); +extern void fsnotify_unmount_inodes(struct list_head *list); + +/* put here because inotify does some weird stuff when destroying watches */ +extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, + void *data, int data_is, const char *name, + u32 cookie, gfp_t gfp); + +#else + +static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, + const char *name, u32 cookie) +{} + +static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask) +{} + +static inline void __fsnotify_inode_delete(struct inode *inode) +{} + +static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) +{} + +static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) +{} + +static inline u32 fsnotify_get_cookie(void) +{ + return 0; +} + +static inline void fsnotify_unmount_inodes(struct list_head *list) +{} + +#endif /* CONFIG_FSNOTIFY */ + +#endif /* __KERNEL __ */ + +#endif /* __LINUX_FSNOTIFY_BACKEND_H */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 677432b9cb7..dc3b1328aae 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1,15 +1,18 @@ #ifndef _LINUX_FTRACE_H #define _LINUX_FTRACE_H -#include <linux/linkage.h> -#include <linux/fs.h> -#include <linux/ktime.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/module.h> +#include <linux/trace_clock.h> #include <linux/kallsyms.h> +#include <linux/linkage.h> #include <linux/bitops.h> +#include <linux/module.h> +#include <linux/ktime.h> #include <linux/sched.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/fs.h> + +#include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER @@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write, loff_t *ppos); #endif +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(char *func, char *cmd, + char *params, int enable); +}; + #ifdef CONFIG_DYNAMIC_FTRACE -/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ -#include <asm/ftrace.h> + +int ftrace_arch_code_modify_prepare(void); +int ftrace_arch_code_modify_post_process(void); + +struct seq_file; + +struct ftrace_probe_ops { + void (*func)(unsigned long ip, + unsigned long parent_ip, + void **data); + int (*callback)(unsigned long ip, void **data); + void (*free)(void **data); + int (*print)(struct seq_file *m, + unsigned long ip, + struct ftrace_probe_ops *ops, + void *data); +}; + +extern int +register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, + void *data); +extern void +unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); +extern void unregister_ftrace_function_probe_all(char *glob); enum { FTRACE_FL_FREE = (1 << 0), @@ -110,15 +145,23 @@ enum { }; struct dyn_ftrace { - struct list_head list; - unsigned long ip; /* address of mcount call-site */ - unsigned long flags; - struct dyn_arch_ftrace arch; + union { + unsigned long ip; /* address of mcount call-site */ + struct dyn_ftrace *freelist; + }; + union { + unsigned long flags; + struct dyn_ftrace *newlist; + }; + struct dyn_arch_ftrace arch; }; int ftrace_force_update(void); void ftrace_set_filter(unsigned char *buf, int len, int reset); +int register_ftrace_command(struct ftrace_func_command *cmd); +int unregister_ftrace_command(struct ftrace_func_command *cmd); + /* defined in arch */ extern int ftrace_ip_converted(unsigned long ip); extern int ftrace_dyn_arch_init(void *data); @@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func); extern void ftrace_caller(void); extern void ftrace_call(void); extern void mcount_call(void); + +#ifndef FTRACE_ADDR +#define FTRACE_ADDR ((unsigned long)ftrace_caller) +#endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER extern void ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void); @@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } #endif /** - * ftrace_make_nop - convert code into top + * ftrace_make_nop - convert code into nop * @mod: module structure if called by module load initialization * @rec: the mcount call site record * @addr: the address that the call site should be calling @@ -181,14 +228,11 @@ extern int ftrace_make_nop(struct module *mod, */ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); - /* May be defined in arch */ extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int skip_trace(unsigned long ip); -extern void ftrace_release(void *start, unsigned long size); - extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); #else @@ -198,6 +242,14 @@ extern void ftrace_enable_daemon(void); # define ftrace_disable_daemon() do { } while (0) # define ftrace_enable_daemon() do { } while (0) static inline void ftrace_release(void *start, unsigned long size) { } +static inline int register_ftrace_command(struct ftrace_func_command *cmd) +{ + return -EINVAL; +} +static inline int unregister_ftrace_command(char *cmd_name) +{ + return -EINVAL; +} #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ @@ -233,24 +285,25 @@ static inline void __ftrace_enabled_restore(int enabled) #endif } -#ifdef CONFIG_FRAME_POINTER -/* TODO: need to fix this for ARM */ -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) -# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) -# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) -# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) -# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) -# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) -#else -# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) -# define CALLER_ADDR1 0UL -# define CALLER_ADDR2 0UL -# define CALLER_ADDR3 0UL -# define CALLER_ADDR4 0UL -# define CALLER_ADDR5 0UL -# define CALLER_ADDR6 0UL -#endif +#ifndef HAVE_ARCH_CALLER_ADDR +# ifdef CONFIG_FRAME_POINTER +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) +# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) +# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) +# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) +# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) +# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) +# else +# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +# define CALLER_ADDR1 0UL +# define CALLER_ADDR2 0UL +# define CALLER_ADDR3 0UL +# define CALLER_ADDR4 0UL +# define CALLER_ADDR5 0UL +# define CALLER_ADDR6 0UL +# endif +#endif /* ifndef HAVE_ARCH_CALLER_ADDR */ #ifdef CONFIG_IRQSOFF_TRACER extern void time_hardirqs_on(unsigned long a0, unsigned long a1); @@ -268,94 +321,11 @@ static inline void __ftrace_enabled_restore(int enabled) # define trace_preempt_off(a0, a1) do { } while (0) #endif -#ifdef CONFIG_TRACING -extern int ftrace_dump_on_oops; - -extern void tracing_start(void); -extern void tracing_stop(void); -extern void ftrace_off_permanent(void); - -extern void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); - -/** - * ftrace_printk - printf formatting in the ftrace buffer - * @fmt: the printf format for printing - * - * Note: __ftrace_printk is an internal function for ftrace_printk and - * the @ip is passed in via the ftrace_printk macro. - * - * This function allows a kernel developer to debug fast path sections - * that printk is not appropriate for. By scattering in various - * printk like tracing in the code, a developer can quickly see - * where problems are occurring. - * - * This is intended as a debugging tool for the developer only. - * Please refrain from leaving ftrace_printks scattered around in - * your code. - */ -# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) -extern int -__ftrace_printk(unsigned long ip, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void ftrace_dump(void); -#else -static inline void -ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } -static inline int -ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); - -static inline void tracing_start(void) { } -static inline void tracing_stop(void) { } -static inline void ftrace_off_permanent(void) { } -static inline int -ftrace_printk(const char *fmt, ...) -{ - return 0; -} -static inline void ftrace_dump(void) { } -#endif - #ifdef CONFIG_FTRACE_MCOUNT_RECORD extern void ftrace_init(void); -extern void ftrace_init_module(struct module *mod, - unsigned long *start, unsigned long *end); #else static inline void ftrace_init(void) { } -static inline void -ftrace_init_module(struct module *mod, - unsigned long *start, unsigned long *end) { } -#endif - -enum { - POWER_NONE = 0, - POWER_CSTATE = 1, - POWER_PSTATE = 2, -}; - -struct power_trace { -#ifdef CONFIG_POWER_TRACER - ktime_t stamp; - ktime_t end; - int type; - int state; #endif -}; - -#ifdef CONFIG_POWER_TRACER -extern void trace_power_start(struct power_trace *it, unsigned int type, - unsigned int state); -extern void trace_power_mark(struct power_trace *it, unsigned int type, - unsigned int state); -extern void trace_power_end(struct power_trace *it); -#else -static inline void trace_power_start(struct power_trace *it, unsigned int type, - unsigned int state) { } -static inline void trace_power_mark(struct power_trace *it, unsigned int type, - unsigned int state) { } -static inline void trace_power_end(struct power_trace *it) { } -#endif - /* * Structure that defines an entry function trace. @@ -379,6 +349,33 @@ struct ftrace_graph_ret { #ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* for init task */ +#define INIT_FTRACE_GRAPH .ret_stack = NULL, + +/* + * Stack of return addresses for functions + * of a thread. + * Used in struct thread_info + */ +struct ftrace_ret_stack { + unsigned long ret; + unsigned long func; + unsigned long long calltime; + unsigned long long subtime; + unsigned long fp; +}; + +/* + * Primary handler of a function return. + * It relays on ftrace_return_to_handler. + * Defined in entry_32/64.S + */ +extern void return_to_handler(void); + +extern int +ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, + unsigned long frame_pointer); + /* * Sometimes we don't want to trace a function with the function * graph tracer but we want them to keep traced by the usual function @@ -430,10 +427,11 @@ static inline void unpause_graph_tracing(void) { atomic_dec(¤t->tracing_graph_pause); } -#else +#else /* !CONFIG_FUNCTION_GRAPH_TRACER */ #define __notrace_funcgraph #define __irq_entry +#define INIT_FTRACE_GRAPH static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } @@ -445,7 +443,7 @@ static inline int task_curr_ret_stack(struct task_struct *tsk) static inline void pause_graph_tracing(void) { } static inline void unpause_graph_tracing(void) { } -#endif +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_TRACING #include <linux/sched.h> @@ -490,6 +488,28 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) return tsk->trace & TSK_TRACE_FL_GRAPH; } +extern int ftrace_dump_on_oops; + +#ifdef CONFIG_PREEMPT +#define INIT_TRACE_RECURSION .trace_recursion = 0, +#endif + #endif /* CONFIG_TRACING */ +#ifndef INIT_TRACE_RECURSION +#define INIT_TRACE_RECURSION +#endif + +#ifdef CONFIG_HW_BRANCH_TRACER + +void trace_hw_branch(u64 from, u64 to); +void trace_hw_branch_oops(void); + +#else /* CONFIG_HW_BRANCH_TRACER */ + +static inline void trace_hw_branch(u64 from, u64 to) {} +static inline void trace_hw_branch_oops(void) {} + +#endif /* CONFIG_HW_BRANCH_TRACER */ + #endif /* _LINUX_FTRACE_H */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h new file mode 100644 index 00000000000..23f7179bf74 --- /dev/null +++ b/include/linux/ftrace_event.h @@ -0,0 +1,183 @@ +#ifndef _LINUX_FTRACE_EVENT_H +#define _LINUX_FTRACE_EVENT_H + +#include <linux/trace_seq.h> +#include <linux/ring_buffer.h> +#include <linux/percpu.h> + +struct trace_array; +struct tracer; +struct dentry; + +DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); + +struct trace_print_flags { + unsigned long mask; + const char *name; +}; + +const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, + unsigned long flags, + const struct trace_print_flags *flag_array); + +const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, + const struct trace_print_flags *symbol_array); + +/* + * The trace entry - the most basic unit of tracing. This is what + * is printed in the end as a single line in the trace output, such as: + * + * bash-15816 [01] 235.197585: idle_cpu <- irq_enter + */ +struct trace_entry { + unsigned short type; + unsigned char flags; + unsigned char preempt_count; + int pid; + int tgid; +}; + +#define FTRACE_MAX_EVENT \ + ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) + +/* + * Trace iterator - used by printout routines who present trace + * results to users and which routines might sleep, etc: + */ +struct trace_iterator { + struct trace_array *tr; + struct tracer *trace; + void *private; + int cpu_file; + struct mutex mutex; + struct ring_buffer_iter *buffer_iter[NR_CPUS]; + unsigned long iter_flags; + + /* The below is zeroed out in pipe_read */ + struct trace_seq seq; + struct trace_entry *ent; + int cpu; + u64 ts; + + loff_t pos; + long idx; + + cpumask_var_t started; +}; + + +typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, + int flags); +struct trace_event { + struct hlist_node node; + struct list_head list; + int type; + trace_print_func trace; + trace_print_func raw; + trace_print_func hex; + trace_print_func binary; +}; + +extern int register_ftrace_event(struct trace_event *event); +extern int unregister_ftrace_event(struct trace_event *event); + +/* Return values for print_line callback */ +enum print_line_t { + TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ + TRACE_TYPE_HANDLED = 1, + TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ + TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ +}; + +void tracing_generic_entry_update(struct trace_entry *entry, + unsigned long flags, + int pc); +struct ring_buffer_event * +trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, + int type, unsigned long len, + unsigned long flags, int pc); +void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags, int pc); +void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event, + unsigned long flags, int pc); +void trace_current_buffer_discard_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event); + +void tracing_record_cmdline(struct task_struct *tsk); + +struct event_filter; + +struct ftrace_event_call { + struct list_head list; + char *name; + char *system; + struct dentry *dir; + struct trace_event *event; + int enabled; + int (*regfunc)(void *); + void (*unregfunc)(void *); + int id; + int (*raw_init)(void); + int (*show_format)(struct ftrace_event_call *call, + struct trace_seq *s); + int (*define_fields)(struct ftrace_event_call *); + struct list_head fields; + int filter_active; + struct event_filter *filter; + void *mod; + void *data; + + atomic_t profile_count; + int (*profile_enable)(struct ftrace_event_call *); + void (*profile_disable)(struct ftrace_event_call *); +}; + +#define MAX_FILTER_PRED 32 +#define MAX_FILTER_STR_VAL 128 + +extern void destroy_preds(struct ftrace_event_call *call); +extern int filter_match_preds(struct ftrace_event_call *call, void *rec); +extern int filter_current_check_discard(struct ring_buffer *buffer, + struct ftrace_event_call *call, + void *rec, + struct ring_buffer_event *event); + +enum { + FILTER_OTHER = 0, + FILTER_STATIC_STRING, + FILTER_DYN_STRING, + FILTER_PTR_STRING, +}; + +extern int trace_define_field(struct ftrace_event_call *call, + const char *type, const char *name, + int offset, int size, int is_signed, + int filter_type); +extern int trace_define_common_fields(struct ftrace_event_call *call); + +#define is_signed_type(type) (((type)(-1)) < 0) + +int trace_set_clr_event(const char *system, const char *event, int set); + +/* + * The double __builtin_constant_p is because gcc will give us an error + * if we try to allocate the static variable to fmt if it is not a + * constant. Even with the outer if statement optimizing out. + */ +#define event_trace_printk(ip, fmt, args...) \ +do { \ + __trace_printk_check_format(fmt, ##args); \ + tracing_record_cmdline(current); \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __trace_bprintk(ip, trace_printk_fmt, ##args); \ + } else \ + __trace_printk(ip, fmt, ##args); \ +} while (0) + +#endif /* _LINUX_FTRACE_EVENT_H */ diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 366a054d0b0..dca7bf8cffe 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -2,7 +2,7 @@ #define _LINUX_FTRACE_IRQ_H -#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) +#ifdef CONFIG_FTRACE_NMI_ENTER extern void ftrace_nmi_enter(void); extern void ftrace_nmi_exit(void); #else diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 162e5defe68..cf593bf9fd3 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -25,6 +25,11 @@ * - add IOCTL message * - add unsolicited notification support * - add POLL message and NOTIFY_POLL notification + * + * 7.12 + * - add umask flag to input argument of open, mknod and mkdir + * - add notification messages for invalidation of inodes and + * directory entries */ #ifndef _LINUX_FUSE_H @@ -36,7 +41,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 11 +#define FUSE_KERNEL_MINOR_VERSION 12 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -112,6 +117,7 @@ struct fuse_file_lock { * INIT request/reply flags * * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." + * FUSE_DONT_MASK: don't apply umask to file mode on create operations */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -119,6 +125,14 @@ struct fuse_file_lock { #define FUSE_ATOMIC_O_TRUNC (1 << 3) #define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) +#define FUSE_DONT_MASK (1 << 6) + +/** + * CUSE INIT request/reply flags + * + * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl + */ +#define CUSE_UNRESTRICTED_IOCTL (1 << 0) /** * Release flags @@ -210,10 +224,15 @@ enum fuse_opcode { FUSE_DESTROY = 38, FUSE_IOCTL = 39, FUSE_POLL = 40, + + /* CUSE specific operations */ + CUSE_INIT = 4096, }; enum fuse_notify_code { FUSE_NOTIFY_POLL = 1, + FUSE_NOTIFY_INVAL_INODE = 2, + FUSE_NOTIFY_INVAL_ENTRY = 3, FUSE_NOTIFY_CODE_MAX, }; @@ -252,14 +271,18 @@ struct fuse_attr_out { struct fuse_attr attr; }; +#define FUSE_COMPAT_MKNOD_IN_SIZE 8 + struct fuse_mknod_in { __u32 mode; __u32 rdev; + __u32 umask; + __u32 padding; }; struct fuse_mkdir_in { __u32 mode; - __u32 padding; + __u32 umask; }; struct fuse_rename_in { @@ -291,7 +314,14 @@ struct fuse_setattr_in { struct fuse_open_in { __u32 flags; + __u32 unused; +}; + +struct fuse_create_in { + __u32 flags; __u32 mode; + __u32 umask; + __u32 padding; }; struct fuse_open_out { @@ -401,6 +431,27 @@ struct fuse_init_out { __u32 max_write; }; +#define CUSE_INIT_INFO_MAX 4096 + +struct cuse_init_in { + __u32 major; + __u32 minor; + __u32 unused; + __u32 flags; +}; + +struct cuse_init_out { + __u32 major; + __u32 minor; + __u32 unused; + __u32 flags; + __u32 max_read; + __u32 max_write; + __u32 dev_major; /* chardev major */ + __u32 dev_minor; /* chardev minor */ + __u32 spare[10]; +}; + struct fuse_interrupt_in { __u64 unique; }; @@ -477,4 +528,16 @@ struct fuse_dirent { #define FUSE_DIRENT_SIZE(d) \ FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) +struct fuse_notify_inval_inode_out { + __u64 ino; + __s64 off; + __s64 len; +}; + +struct fuse_notify_inval_entry_out { + __u64 parent; + __u32 namelen; + __u32 padding; +}; + #endif /* _LINUX_FUSE_H */ diff --git a/include/linux/futex.h b/include/linux/futex.h index 3bf5bb5a34f..34956c8fdeb 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -23,6 +23,8 @@ union ktime; #define FUTEX_TRYLOCK_PI 8 #define FUTEX_WAIT_BITSET 9 #define FUTEX_WAKE_BITSET 10 +#define FUTEX_WAIT_REQUEUE_PI 11 +#define FUTEX_CMP_REQUEUE_PI 12 #define FUTEX_PRIVATE_FLAG 128 #define FUTEX_CLOCK_REALTIME 256 @@ -38,6 +40,10 @@ union ktime; #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) +#define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ + FUTEX_PRIVATE_FLAG) +#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ + FUTEX_PRIVATE_FLAG) /* * Support for robust futexes: the kernel cleans up held futexes at diff --git a/include/linux/gameport.h b/include/linux/gameport.h index 0cd825f7363..1bc08541c2b 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h @@ -11,6 +11,7 @@ #ifdef __KERNEL__ #include <asm/io.h> +#include <linux/types.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/device.h> @@ -62,7 +63,7 @@ struct gameport_driver { struct device_driver driver; - unsigned int ignore; + bool ignore; }; #define to_gameport_driver(d) container_of(d, struct gameport_driver, driver) diff --git a/include/linux/gcd.h b/include/linux/gcd.h new file mode 100644 index 00000000000..69f5e8a01ba --- /dev/null +++ b/include/linux/gcd.h @@ -0,0 +1,8 @@ +#ifndef _GCD_H +#define _GCD_H + +#include <linux/compiler.h> + +unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__; + +#endif /* _GCD_H */ diff --git a/include/linux/gen_stats.h b/include/linux/gen_stats.h index 13f4e74609a..710e901085d 100644 --- a/include/linux/gen_stats.h +++ b/include/linux/gen_stats.h @@ -23,6 +23,11 @@ struct gnet_stats_basic __u64 bytes; __u32 packets; }; +struct gnet_stats_basic_packed +{ + __u64 bytes; + __u32 packets; +} __attribute__ ((packed)); /** * struct gnet_stats_rate_est - rate estimator diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index 7da02c93002..b834ef6d59f 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -1,6 +1,7 @@ #ifndef __LINUX_GENERIC_NETLINK_H #define __LINUX_GENERIC_NETLINK_H +#include <linux/types.h> #include <linux/netlink.h> #define GENL_NAMSIZ 16 /* length of family name */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 16948eaecae..45fc320a53c 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -90,6 +90,7 @@ struct disk_stats { struct hd_struct { sector_t start_sect; sector_t nr_sects; + sector_t alignment_offset; struct device __dev; struct kobject *holder_dir; int policy, partno; @@ -113,6 +114,7 @@ struct hd_struct { #define GENHD_FL_UP 16 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ +#define GENHD_FL_NATIVE_CAPACITY 128 #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) @@ -140,7 +142,7 @@ struct gendisk { * disks that can't be partitioned. */ char disk_name[DISK_NAME_LEN]; /* name of major driver */ - + char *(*nodename)(struct gendisk *gd); /* Array of pointers to partitions indexed by partno. * Protected with matching bdev lock but stat and other * non-critical accesses use RCU. Always access through @@ -214,6 +216,7 @@ static inline void disk_put_part(struct hd_struct *part) #define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ +#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ struct disk_part_iter { struct gendisk *disk; @@ -333,11 +336,10 @@ static inline void part_dec_in_flight(struct hd_struct *part) part_to_disk(part)->part0.in_flight--; } -/* drivers/block/ll_rw_blk.c */ +/* block/blk-core.c */ extern void part_round_stats(int cpu, struct hd_struct *part); -/* drivers/block/genhd.c */ -extern int get_blkdev_list(char *, int); +/* block/genhd.c */ extern void add_disk(struct gendisk *disk); extern void del_gendisk(struct gendisk *gp); extern void unlink_gendisk(struct gendisk *gp); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index dd20cd78faa..7c777a0da17 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -4,6 +4,8 @@ #include <linux/mmzone.h> #include <linux/stddef.h> #include <linux/linkage.h> +#include <linux/topology.h> +#include <linux/mmdebug.h> struct vm_area_struct; @@ -19,7 +21,8 @@ struct vm_area_struct; #define __GFP_DMA ((__force gfp_t)0x01u) #define __GFP_HIGHMEM ((__force gfp_t)0x02u) #define __GFP_DMA32 ((__force gfp_t)0x04u) - +#define __GFP_MOVABLE ((__force gfp_t)0x08u) /* Page is movable */ +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /* * Action modifiers - doesn't change the zoning * @@ -49,9 +52,20 @@ struct vm_area_struct; #define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ #define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ #define __GFP_RECLAIMABLE ((__force gfp_t)0x80000u) /* Page is reclaimable */ -#define __GFP_MOVABLE ((__force gfp_t)0x100000u) /* Page is movable */ -#define __GFP_BITS_SHIFT 21 /* Room for 21 __GFP_FOO bits */ +#ifdef CONFIG_KMEMCHECK +#define __GFP_NOTRACK ((__force gfp_t)0x200000u) /* Don't track with kmemcheck */ +#else +#define __GFP_NOTRACK ((__force gfp_t)0) +#endif + +/* + * This may seem redundant, but it's a way of annotating false positives vs. + * allocations that simply cannot be supported (e.g. page tables). + */ +#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) + +#define __GFP_BITS_SHIFT 22 /* Room for 22 __GFP_FOO bits */ #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* This equals 0, but use constants in case they ever change */ @@ -84,6 +98,9 @@ struct vm_area_struct; __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ __GFP_NORETRY|__GFP_NOMEMALLOC) +/* Control slab gfp mask during early boot */ +#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) + /* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) @@ -111,24 +128,105 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) ((gfp_flags & __GFP_RECLAIMABLE) != 0); } -static inline enum zone_type gfp_zone(gfp_t flags) -{ +#ifdef CONFIG_HIGHMEM +#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM +#else +#define OPT_ZONE_HIGHMEM ZONE_NORMAL +#endif + #ifdef CONFIG_ZONE_DMA - if (flags & __GFP_DMA) - return ZONE_DMA; +#define OPT_ZONE_DMA ZONE_DMA +#else +#define OPT_ZONE_DMA ZONE_NORMAL #endif + #ifdef CONFIG_ZONE_DMA32 - if (flags & __GFP_DMA32) - return ZONE_DMA32; +#define OPT_ZONE_DMA32 ZONE_DMA32 +#else +#define OPT_ZONE_DMA32 ZONE_NORMAL #endif - if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) == - (__GFP_HIGHMEM | __GFP_MOVABLE)) - return ZONE_MOVABLE; -#ifdef CONFIG_HIGHMEM - if (flags & __GFP_HIGHMEM) - return ZONE_HIGHMEM; + +/* + * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the + * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long + * and there are 16 of them to cover all possible combinations of + * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM + * + * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. + * But GFP_MOVABLE is not only a zone specifier but also an allocation + * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. + * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1". + * + * bit result + * ================= + * 0x0 => NORMAL + * 0x1 => DMA or NORMAL + * 0x2 => HIGHMEM or NORMAL + * 0x3 => BAD (DMA+HIGHMEM) + * 0x4 => DMA32 or DMA or NORMAL + * 0x5 => BAD (DMA+DMA32) + * 0x6 => BAD (HIGHMEM+DMA32) + * 0x7 => BAD (HIGHMEM+DMA32+DMA) + * 0x8 => NORMAL (MOVABLE+0) + * 0x9 => DMA or NORMAL (MOVABLE+DMA) + * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) + * 0xb => BAD (MOVABLE+HIGHMEM+DMA) + * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) + * 0xd => BAD (MOVABLE+DMA32+DMA) + * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) + * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) + * + * ZONES_SHIFT must be <= 2 on 32 bit platforms. + */ + +#if 16 * ZONES_SHIFT > BITS_PER_LONG +#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer #endif - return ZONE_NORMAL; + +#define GFP_ZONE_TABLE ( \ + (ZONE_NORMAL << 0 * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ + | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ + | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ + | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << (__GFP_MOVABLE | __GFP_DMA) * ZONES_SHIFT) \ + | (ZONE_MOVABLE << (__GFP_MOVABLE | __GFP_HIGHMEM) * ZONES_SHIFT)\ + | (OPT_ZONE_DMA32 << (__GFP_MOVABLE | __GFP_DMA32) * ZONES_SHIFT)\ +) + +/* + * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32 + * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per + * entry starting with bit 0. Bit is set if the combination is not + * allowed. + */ +#define GFP_ZONE_BAD ( \ + 1 << (__GFP_DMA | __GFP_HIGHMEM) \ + | 1 << (__GFP_DMA | __GFP_DMA32) \ + | 1 << (__GFP_DMA32 | __GFP_HIGHMEM) \ + | 1 << (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM) \ + | 1 << (__GFP_MOVABLE | __GFP_HIGHMEM | __GFP_DMA) \ + | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA) \ + | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_HIGHMEM) \ + | 1 << (__GFP_MOVABLE | __GFP_DMA32 | __GFP_DMA | __GFP_HIGHMEM)\ +) + +static inline enum zone_type gfp_zone(gfp_t flags) +{ + enum zone_type z; + int bit = flags & GFP_ZONEMASK; + + z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) & + ((1 << ZONES_SHIFT) - 1); + + if (__builtin_constant_p(bit)) + BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1); + else { +#ifdef CONFIG_DEBUG_VM + BUG_ON((GFP_ZONE_BAD >> bit) & 1); +#endif + } + return z; } /* @@ -168,30 +266,19 @@ static inline void arch_alloc_page(struct page *page, int order) { } #endif struct page * -__alloc_pages_internal(gfp_t gfp_mask, unsigned int order, +__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask); static inline struct page * __alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) { - return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); + return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); } -static inline struct page * -__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, nodemask_t *nodemask) -{ - return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); -} - - static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { - if (unlikely(order >= MAX_ORDER)) - return NULL; - /* Unknown node is current node */ if (nid < 0) nid = numa_node_id(); @@ -199,15 +286,20 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); } +static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, + unsigned int order) +{ + VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); + + return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); +} + #ifdef CONFIG_NUMA extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); static inline struct page * alloc_pages(gfp_t gfp_mask, unsigned int order) { - if (unlikely(order >= MAX_ORDER)) - return NULL; - return alloc_pages_current(gfp_mask, order); } extern struct page *alloc_page_vma(gfp_t gfp_mask, @@ -244,4 +336,23 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(void); void drain_local_pages(void *dummy); +extern bool oom_killer_disabled; + +static inline void oom_killer_disable(void) +{ + oom_killer_disabled = true; +} + +static inline void oom_killer_enable(void) +{ + oom_killer_disabled = false; +} + +extern gfp_t gfp_allowed_mask; + +static inline void set_gfp_allowed_mask(gfp_t mask) +{ + gfp_allowed_mask = mask; +} + #endif /* __LINUX_GFP_H */ diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h index 14d0df0b574..b80c88dedbb 100644 --- a/include/linux/gfs2_ondisk.h +++ b/include/linux/gfs2_ondisk.h @@ -10,6 +10,8 @@ #ifndef __GFS2_ONDISK_DOT_H__ #define __GFS2_ONDISK_DOT_H__ +#include <linux/types.h> + #define GFS2_MAGIC 0x01161970 #define GFS2_BASIC_BLOCK 512 #define GFS2_BASIC_BLOCK_SHIFT 9 @@ -331,6 +333,28 @@ struct gfs2_leaf { /* * Extended attribute header format + * + * This works in a similar way to dirents. There is a fixed size header + * followed by a variable length section made up of the name and the + * associated data. In the case of a "stuffed" entry, the value is + * inline directly after the name, the ea_num_ptrs entry will be + * zero in that case. For non-"stuffed" entries, there will be + * a set of pointers (aligned to 8 byte boundary) to the block(s) + * containing the value. + * + * The blocks containing the values and the blocks containing the + * extended attribute headers themselves all start with the common + * metadata header. Each inode, if it has extended attributes, will + * have either a single block containing the extended attribute headers + * or a single indirect block pointing to blocks containing the + * extended attribure headers. + * + * The maximim size of the data part of an extended attribute is 64k + * so the number of blocks required depends upon block size. Since the + * block size also determines the number of pointers in an indirect + * block, its a fairly complicated calculation to work out the maximum + * number of blocks that an inode may have relating to extended attributes. + * */ #define GFS2_EA_MAX_NAME_LEN 255 diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dd..6d527ee82b2 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -2,7 +2,9 @@ #define LINUX_HARDIRQ_H #include <linux/preempt.h> +#ifdef CONFIG_PREEMPT #include <linux/smp_lock.h> +#endif #include <linux/lockdep.h> #include <linux/ftrace_irq.h> #include <asm/hardirq.h> @@ -15,55 +17,67 @@ * - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256) * - * The hardirq count can be overridden per architecture, the default is: + * The hardirq count can in theory reach the same as NR_IRQS. + * In reality, the number of nested IRQS is limited to the stack + * size as well. For archs with over 1000 IRQS it is not practical + * to expect that they will all nest. We give a max of 10 bits for + * hardirq nesting. An arch may choose to give less than 10 bits. + * m68k expects it to be 8. * - * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) - * - ( bit 28 is the PREEMPT_ACTIVE flag. ) + * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) + * - bit 26 is the NMI_MASK + * - bit 28 is the PREEMPT_ACTIVE flag * * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 - * HARDIRQ_MASK: 0x0fff0000 + * HARDIRQ_MASK: 0x03ff0000 + * NMI_MASK: 0x04000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 +#define NMI_BITS 1 -#ifndef HARDIRQ_BITS -#define HARDIRQ_BITS 12 +#define MAX_HARDIRQ_BITS 10 -#ifndef MAX_HARDIRQS_PER_CPU -#define MAX_HARDIRQS_PER_CPU NR_IRQS +#ifndef HARDIRQ_BITS +# define HARDIRQ_BITS MAX_HARDIRQ_BITS #endif -/* - * The hardirq mask has to be large enough to have space for potentially - * all IRQ sources in the system nesting on a single CPU. - */ -#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU -# error HARDIRQ_BITS is too low! -#endif +#if HARDIRQ_BITS > MAX_HARDIRQ_BITS +#error HARDIRQ_BITS too high! #endif #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) +#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) #define __IRQ_MASK(x) ((1UL << (x))-1) #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#define NMI_OFFSET (1UL << NMI_SHIFT) + +#ifndef PREEMPT_ACTIVE +#define PREEMPT_ACTIVE_BITS 1 +#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) +#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) +#endif -#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) +#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) #error PREEMPT_ACTIVE is too low! #endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) -#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) /* * Are we doing bottom half or hardware interrupt processing? @@ -73,6 +87,11 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) +/* + * Are we in NMI context? + */ +#define in_nmi() (preempt_count() & NMI_MASK) + #if defined(CONFIG_PREEMPT) # define PREEMPT_INATOMIC_BASE kernel_locked() # define PREEMPT_CHECK_OFFSET 1 @@ -105,7 +124,7 @@ # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET #endif -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) extern void synchronize_irq(unsigned int irq); #else # define synchronize_irq(irq) barrier() @@ -119,7 +138,7 @@ static inline void account_system_vtime(struct task_struct *tsk) } #endif -#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) +#if defined(CONFIG_NO_HZ) extern void rcu_irq_enter(void); extern void rcu_irq_exit(void); extern void rcu_nmi_enter(void); @@ -129,7 +148,7 @@ extern void rcu_nmi_exit(void); # define rcu_irq_exit() do { } while (0) # define rcu_nmi_enter() do { } while (0) # define rcu_nmi_exit() do { } while (0) -#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ +#endif /* #if defined(CONFIG_NO_HZ) */ /* * It is safe to do non-atomic ops on ->hardirq_context, @@ -164,20 +183,24 @@ extern void irq_enter(void); */ extern void irq_exit(void); -#define nmi_enter() \ - do { \ - ftrace_nmi_enter(); \ - lockdep_off(); \ - rcu_nmi_enter(); \ - __irq_enter(); \ +#define nmi_enter() \ + do { \ + ftrace_nmi_enter(); \ + BUG_ON(in_nmi()); \ + add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + lockdep_off(); \ + rcu_nmi_enter(); \ + trace_hardirq_enter(); \ } while (0) -#define nmi_exit() \ - do { \ - __irq_exit(); \ - rcu_nmi_exit(); \ - lockdep_on(); \ - ftrace_nmi_exit(); \ +#define nmi_exit() \ + do { \ + trace_hardirq_exit(); \ + rcu_nmi_exit(); \ + lockdep_on(); \ + BUG_ON(!in_nmi()); \ + sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ + ftrace_nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index fd47a151665..ee275c8b3df 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -38,6 +38,7 @@ struct hdlc_proto { int (*ioctl)(struct net_device *dev, struct ifreq *ifr); __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); int (*netif_rx)(struct sk_buff *skb); + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); struct module *module; struct hdlc_proto *next; /* next protocol in the list */ }; @@ -50,7 +51,7 @@ typedef struct hdlc_device { unsigned short encoding, unsigned short parity); /* hardware driver must handle this instead of dev->hard_start_xmit */ - int (*xmit)(struct sk_buff *skb, struct net_device *dev); + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); /* Things below are for HDLC layer internal use only */ const struct hdlc_proto *proto; @@ -59,7 +60,7 @@ typedef struct hdlc_device { spinlock_t state_lock; void *state; void *priv; -}hdlc_device; +} hdlc_device; @@ -102,6 +103,10 @@ static __inline__ void debug_frame(const struct sk_buff *skb) int hdlc_open(struct net_device *dev); /* Must be called by hardware driver when HDLC device is being closed */ void hdlc_close(struct net_device *dev); +/* May be used by hardware driver */ +int hdlc_change_mtu(struct net_device *dev, int new_mtu); +/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */ +netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev); int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto, size_t size); diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h index bf6302f6b5f..c010b4a785b 100644 --- a/include/linux/hdlcdrv.h +++ b/include/linux/hdlcdrv.h @@ -215,7 +215,7 @@ struct hdlcdrv_state { struct hdlcdrv_hdlctx { struct hdlcdrv_hdlcbuffer hbuf; - long in_hdlc_tx; + unsigned long in_hdlc_tx; /* * 0 = send flags * 1 = send txtail (flags) @@ -241,7 +241,6 @@ struct hdlcdrv_state { struct hdlcdrv_bitbuffer bitbuf_hdlc; #endif /* HDLCDRV_DEBUG */ - struct net_device_stats stats; int ptt_keyed; /* queued skb for transmission */ diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index c37e9241fae..29ee2873f4a 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h @@ -1,68 +1,6 @@ #ifndef _LINUX_HDREG_H #define _LINUX_HDREG_H -#ifdef __KERNEL__ -#include <linux/ata.h> - -/* - * This file contains some defines for the AT-hd-controller. - * Various sources. - */ - -/* ide.c has its own port definitions in "ide.h" */ - -#define HD_IRQ 14 - -/* Hd controller regs. Ref: IBM AT Bios-listing */ -#define HD_DATA 0x1f0 /* _CTL when writing */ -#define HD_ERROR 0x1f1 /* see err-bits */ -#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */ -#define HD_SECTOR 0x1f3 /* starting sector */ -#define HD_LCYL 0x1f4 /* starting cylinder */ -#define HD_HCYL 0x1f5 /* high byte of starting cyl */ -#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */ -#define HD_STATUS 0x1f7 /* see status-bits */ -#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */ -#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */ -#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */ - -#define HD_CMD 0x3f6 /* used for resets */ -#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */ - -/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */ - -/* Bits of HD_STATUS */ -#define ERR_STAT 0x01 -#define INDEX_STAT 0x02 -#define ECC_STAT 0x04 /* Corrected error */ -#define DRQ_STAT 0x08 -#define SEEK_STAT 0x10 -#define SRV_STAT 0x10 -#define WRERR_STAT 0x20 -#define READY_STAT 0x40 -#define BUSY_STAT 0x80 - -/* Bits for HD_ERROR */ -#define MARK_ERR 0x01 /* Bad address mark */ -#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */ -#define TRK0_ERR 0x02 /* couldn't find track 0 */ -#define EOM_ERR 0x02 /* End Of Media (ATAPI) */ -#define ABRT_ERR 0x04 /* Command aborted */ -#define MCR_ERR 0x08 /* media change request */ -#define ID_ERR 0x10 /* ID field not found */ -#define MC_ERR 0x20 /* media changed */ -#define ECC_ERR 0x40 /* Uncorrectable ECC error */ -#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ -#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ -#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */ - -/* Bits of HD_NSECTOR */ -#define CD 0x01 -#define IO 0x02 -#define REL 0x04 -#define TAG_MASK 0xf8 -#endif /* __KERNEL__ */ - #include <linux/types.h> /* @@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr { #define TASKFILE_INVALID 0x7fff #endif +#ifndef __KERNEL__ /* ATA/ATAPI Commands pre T13 Spec */ #define WIN_NOP 0x00 /* @@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr { #define SECURITY_ERASE_UNIT 0xBD #define SECURITY_FREEZE_LOCK 0xBE #define SECURITY_DISABLE_PASSWORD 0xBF +#endif /* __KERNEL__ */ struct hd_geometry { unsigned char heads; @@ -448,6 +388,7 @@ enum { #define __NEW_HD_DRIVE_ID +#ifndef __KERNEL__ /* * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. * @@ -511,7 +452,6 @@ struct hd_driveid { unsigned short words69_70[2]; /* reserved words 69-70 * future command overlap and queuing */ - /* HDIO_GET_IDENTITY currently returns only words 0 through 70 */ unsigned short words71_74[4]; /* reserved words 71-74 * for IDENTIFY PACKET DEVICE command */ @@ -700,6 +640,7 @@ struct hd_driveid { * 7:0 Signature */ }; +#endif /* __KERNEL__ */ /* * IDE "nice" flags. These are used on a per drive basis to determine diff --git a/include/linux/hid.h b/include/linux/hid.h index 81aa84d60c6..53489fd4d70 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -238,6 +238,42 @@ struct hid_item { #define HID_GD_RIGHT 0x00010092 #define HID_GD_LEFT 0x00010093 +#define HID_DG_DIGITIZER 0x000d0001 +#define HID_DG_PEN 0x000d0002 +#define HID_DG_LIGHTPEN 0x000d0003 +#define HID_DG_TOUCHSCREEN 0x000d0004 +#define HID_DG_TOUCHPAD 0x000d0005 +#define HID_DG_STYLUS 0x000d0020 +#define HID_DG_PUCK 0x000d0021 +#define HID_DG_FINGER 0x000d0022 +#define HID_DG_TIPPRESSURE 0x000d0030 +#define HID_DG_BARRELPRESSURE 0x000d0031 +#define HID_DG_INRANGE 0x000d0032 +#define HID_DG_TOUCH 0x000d0033 +#define HID_DG_UNTOUCH 0x000d0034 +#define HID_DG_TAP 0x000d0035 +#define HID_DG_TABLETFUNCTIONKEY 0x000d0039 +#define HID_DG_PROGRAMCHANGEKEY 0x000d003a +#define HID_DG_INVERT 0x000d003c +#define HID_DG_TIPSWITCH 0x000d0042 +#define HID_DG_TIPSWITCH2 0x000d0043 +#define HID_DG_BARRELSWITCH 0x000d0044 +#define HID_DG_ERASER 0x000d0045 +#define HID_DG_TABLETPICK 0x000d0046 +/* + * as of May 20, 2009 the usages below are not yet in the official USB spec + * but are being pushed by Microsft as described in their paper "Digitizer + * Drivers for Windows Touch and Pen-Based Computers" + */ +#define HID_DG_CONFIDENCE 0x000d0047 +#define HID_DG_WIDTH 0x000d0048 +#define HID_DG_HEIGHT 0x000d0049 +#define HID_DG_CONTACTID 0x000d0051 +#define HID_DG_INPUTMODE 0x000d0052 +#define HID_DG_DEVICEINDEX 0x000d0053 +#define HID_DG_CONTACTCOUNT 0x000d0054 +#define HID_DG_CONTACTMAX 0x000d0055 + /* * HID report types --- Ouch! HID spec says 1 2 3! */ @@ -270,6 +306,7 @@ struct hid_item { #define HID_QUIRK_INVERT 0x00000001 #define HID_QUIRK_NOTOUCH 0x00000002 +#define HID_QUIRK_IGNORE 0x00000004 #define HID_QUIRK_NOGET 0x00000008 #define HID_QUIRK_BADPAD 0x00000020 #define HID_QUIRK_MULTI_INPUT 0x00000040 @@ -603,12 +640,17 @@ struct hid_ll_driver { int (*open)(struct hid_device *hdev); void (*close)(struct hid_device *hdev); + int (*power)(struct hid_device *hdev, int level); + int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, unsigned int code, int value); int (*parse)(struct hid_device *hdev); }; +#define PM_HINT_FULLON 1<<5 +#define PM_HINT_NORMAL 1<<1 + /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ /* We ignore a few input applications that are not widely used */ #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) @@ -641,6 +683,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int void hid_output_report(struct hid_report *report, __u8 *data); struct hid_device *hid_allocate_device(void); int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); +int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); /** @@ -791,19 +834,5 @@ dbg_hid(const char *fmt, ...) __FILE__ , ## arg) #endif /* HID_FF */ -#ifdef CONFIG_HID_COMPAT -#define HID_COMPAT_LOAD_DRIVER(name) \ -/* prototype to avoid sparse warning */ \ -extern void hid_compat_##name(void); \ -void hid_compat_##name(void) { } \ -EXPORT_SYMBOL(hid_compat_##name) -#else -#define HID_COMPAT_LOAD_DRIVER(name) -#endif /* HID_COMPAT */ -#define HID_COMPAT_CALL_DRIVER(name) do { \ - extern void hid_compat_##name(void); \ - hid_compat_##name(); \ -} while (0) - #endif diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h index c760ae0eb6a..bb6f58baf31 100644 --- a/include/linux/hiddev.h +++ b/include/linux/hiddev.h @@ -27,6 +27,8 @@ * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic */ +#include <linux/types.h> + /* * The event structure itself */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 13875ce9112..211ff449726 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page) } #endif -#ifdef CONFIG_HIGHMEM +#include <asm/kmap_types.h> + +#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) + +void debug_kmap_atomic(enum km_type type); + +#else + +static inline void debug_kmap_atomic(enum km_type type) +{ +} +#endif + +#ifdef CONFIG_HIGHMEM #include <asm/highmem.h> /* declarations for linux/mm/highmem.c */ @@ -42,9 +55,9 @@ static inline void *kmap(struct page *page) return page_address(page); } -#define kunmap(page) do { (void) (page); } while (0) - -#include <asm/kmap_types.h> +static inline void kunmap(struct page *page) +{ +} static inline void *kmap_atomic(struct page *page, enum km_type idx) { diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index bd37078c2d7..4759917adc7 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -21,6 +21,7 @@ #include <linux/list.h> #include <linux/wait.h> #include <linux/percpu.h> +#include <linux/timer.h> struct hrtimer_clock_base; @@ -30,8 +31,11 @@ struct hrtimer_cpu_base; * Mode arguments of xxx_hrtimer functions: */ enum hrtimer_mode { - HRTIMER_MODE_ABS, /* Time value is absolute */ - HRTIMER_MODE_REL, /* Time value is relative to now */ + HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ + HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ + HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ + HRTIMER_MODE_ABS_PINNED = 0x02, + HRTIMER_MODE_REL_PINNED = 0x03, }; /* @@ -336,6 +340,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode); extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long range_ns, const enum hrtimer_mode mode); +extern int +__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, + unsigned long delta_ns, + const enum hrtimer_mode mode, int wakeup); + extern int hrtimer_cancel(struct hrtimer *timer); extern int hrtimer_try_to_cancel(struct hrtimer *timer); @@ -439,6 +448,8 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, static inline void timer_stats_account_hrtimer(struct hrtimer *timer) { + if (likely(!timer->start_site)) + return; timer_stats_update_stats(timer, timer->start_pid, timer->start_site, timer->function, timer->start_comm, 0); } @@ -448,6 +459,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) { + if (likely(!timer_stats_active)) + return; __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f1d2fba19ea..5cbc620bdfe 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -10,6 +10,9 @@ #include <asm/tlbflush.h> struct ctl_table; +struct user_struct; + +int PageHuge(struct page *page); static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) { @@ -31,9 +34,10 @@ void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(int, char *); unsigned long hugetlb_total_pages(void); int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access); + unsigned long address, unsigned int flags); int hugetlb_reserve_pages(struct inode *inode, long from, long to, - struct vm_area_struct *vma); + struct vm_area_struct *vma, + int acctflags); void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); extern unsigned long hugepages_treat_as_movable; @@ -60,6 +64,11 @@ void hugetlb_change_protection(struct vm_area_struct *vma, #else /* !CONFIG_HUGETLB_PAGE */ +static inline int PageHuge(struct page *page) +{ + return 0; +} + static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) { return 0; @@ -90,7 +99,7 @@ static inline void hugetlb_report_meminfo(struct seq_file *m) #define pud_huge(x) 0 #define is_hugepage_only_range(mm, addr, len) 0 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) -#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) +#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) #define hugetlb_change_protection(vma, address, end, newprot) @@ -138,7 +147,8 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) extern const struct file_operations hugetlbfs_file_operations; extern struct vm_operations_struct hugetlb_vm_ops; -struct file *hugetlb_file_setup(const char *name, size_t); +struct file *hugetlb_file_setup(const char *name, size_t size, int acct, + struct user_struct **user); int hugetlb_get_quota(struct address_space *mapping, long delta); void hugetlb_put_quota(struct address_space *mapping, long delta); @@ -158,9 +168,9 @@ static inline void set_file_hugepages(struct file *file) } #else /* !CONFIG_HUGETLBFS */ -#define is_file_hugepages(file) 0 -#define set_file_hugepages(file) BUG() -#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS) +#define is_file_hugepages(file) 0 +#define set_file_hugepages(file) BUG() +#define hugetlb_file_setup(name,size,acct,user) ERR_PTR(-ENOSYS) #endif /* !CONFIG_HUGETLBFS */ diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index adcb3dc7ac2..1364d62e2fb 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -1,7 +1,14 @@ #ifndef _LINUX_I2C_ALGO_PCA_H #define _LINUX_I2C_ALGO_PCA_H -/* Clock speeds for the bus */ +/* Chips known to the pca algo */ +#define I2C_PCA_CHIP_9564 0x00 +#define I2C_PCA_CHIP_9665 0x01 + +/* Internal period for PCA9665 oscilator */ +#define I2C_PCA_OSC_PER 3 /* e10-8s */ + +/* Clock speeds for the bus for PCA9564*/ #define I2C_PCA_CON_330kHz 0x00 #define I2C_PCA_CON_288kHz 0x01 #define I2C_PCA_CON_217kHz 0x02 @@ -18,6 +25,26 @@ #define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */ #define I2C_PCA_CON 0x03 /* CONTROL Read/Write */ +/* PCA9665 registers */ +#define I2C_PCA_INDPTR 0x00 /* INDIRECT Pointer Write Only */ +#define I2C_PCA_IND 0x02 /* INDIRECT Read/Write */ + +/* PCA9665 indirect registers */ +#define I2C_PCA_ICOUNT 0x00 /* Byte Count for buffered mode */ +#define I2C_PCA_IADR 0x01 /* OWN ADR */ +#define I2C_PCA_ISCLL 0x02 /* SCL LOW period */ +#define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */ +#define I2C_PCA_ITO 0x04 /* TIMEOUT */ +#define I2C_PCA_IPRESET 0x05 /* Parallel bus reset */ +#define I2C_PCA_IMODE 0x06 /* I2C Bus mode */ + +/* PCA9665 I2C bus mode */ +#define I2C_PCA_MODE_STD 0x00 /* Standard mode */ +#define I2C_PCA_MODE_FAST 0x01 /* Fast mode */ +#define I2C_PCA_MODE_FASTP 0x02 /* Fast Plus mode */ +#define I2C_PCA_MODE_TURBO 0x03 /* Turbo mode */ + + #define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */ #define I2C_PCA_CON_ENSIO 0x40 /* Enable */ #define I2C_PCA_CON_STA 0x20 /* Start */ @@ -31,7 +58,9 @@ struct i2c_algo_pca_data { int (*read_byte) (void *data, int reg); int (*wait_for_completion) (void *data); void (*reset_chip) (void *data); - /* i2c_clock values are defined in linux/i2c-algo-pca.h */ + /* For PCA9564, use one of the predefined frequencies: + * 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000 + * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; }; diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h deleted file mode 100644 index 3b7715024e6..00000000000 --- a/include/linux/i2c-algo-sgi.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License version 2 as published by the Free Software Foundation. - * - * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> - */ - -#ifndef I2C_ALGO_SGI_H -#define I2C_ALGO_SGI_H 1 - -#include <linux/i2c.h> - -struct i2c_algo_sgi_data { - void *data; /* private data for lowlevel routines */ - unsigned (*getctrl)(void *data); - void (*setctrl)(void *data, unsigned val); - unsigned (*rdata)(void *data); - void (*wdata)(void *data, unsigned val); - - int xfer_timeout; - int ack_timeout; -}; - -int i2c_sgi_add_bus(struct i2c_adapter *); - -#endif /* I2C_ALGO_SGI_H */ diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h index 311315b56b6..fd53bfd2647 100644 --- a/include/linux/i2c-dev.h +++ b/include/linux/i2c-dev.h @@ -33,7 +33,7 @@ */ #define I2C_RETRIES 0x0701 /* number of times a device address should be polled when not acknowledging */ -#define I2C_TIMEOUT 0x0702 /* set timeout in jiffies - call with int */ +#define I2C_TIMEOUT 0x0702 /* set timeout in units of 10 ms */ /* NOTE: Slave address is 7 or 10 bits, but 10-bit addresses * are NOT supported! (due to code brokenness) diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 01d67ba9e98..c9087de5c6c 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h @@ -33,130 +33,35 @@ #define I2C_DRIVERID_MSP3400 1 #define I2C_DRIVERID_TUNER 2 -#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */ -#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */ -#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */ -#define I2C_DRIVERID_SAA7111A 8 /* video input processor */ -#define I2C_DRIVERID_SAA7185B 13 /* video encoder */ -#define I2C_DRIVERID_SAA7110 22 /* video decoder */ -#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ -#define I2C_DRIVERID_PCF8583 25 /* real time clock */ #define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ -#define I2C_DRIVERID_TVMIXER 28 /* Mixer driver for tv cards */ #define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ -#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */ -#define I2C_DRIVERID_BT819 40 /* video decoder */ -#define I2C_DRIVERID_BT856 41 /* video encoder */ -#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */ -#define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */ -#define I2C_DRIVERID_SAA7114 49 /* video decoder */ -#define I2C_DRIVERID_ADV7170 54 /* video encoder */ -#define I2C_DRIVERID_SAA7191 57 /* video decoder */ -#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */ -#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */ -#define I2C_DRIVERID_MAX6900 63 /* MAX6900 real-time clock */ -#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */ -#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */ -#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */ -#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */ -#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */ -#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */ #define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ -#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */ #define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ -#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */ -#define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */ -#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */ -#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */ -#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */ -#define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */ -#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */ -#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */ -#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */ -#define I2C_DRIVERID_WM8731 89 /* Wolfson WM8731 audio codec */ -#define I2C_DRIVERID_WM8750 90 /* Wolfson WM8750 audio codec */ -#define I2C_DRIVERID_WM8753 91 /* Wolfson WM8753 audio codec */ -#define I2C_DRIVERID_LM4857 92 /* LM4857 Audio Amplifier */ -#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */ -#define I2C_DRIVERID_CS4270 94 /* Cirrus Logic 4270 audio codec */ -#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */ -#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */ - -#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */ /* * ---- Adapter types ---------------------------------------------------- */ /* --- Bit algorithm adapters */ -#define I2C_HW_B_LP 0x010000 /* Parallel port Philips style */ #define I2C_HW_B_BT848 0x010005 /* BT848 video boards */ -#define I2C_HW_B_VIA 0x010007 /* Via vt82c586b */ -#define I2C_HW_B_HYDRA 0x010008 /* Apple Hydra Mac I/O */ -#define I2C_HW_B_I810 0x01000a /* Intel I810 */ -#define I2C_HW_B_VOO 0x01000b /* 3dfx Voodoo 3 / Banshee */ -#define I2C_HW_B_SCX200 0x01000e /* Nat'l Semi SCx200 I2C */ #define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */ -#define I2C_HW_B_IOC 0x010011 /* IOC bit-wiggling */ -#define I2C_HW_B_IXP2000 0x010016 /* GPIO on IXP2000 systems */ #define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */ -#define I2C_HW_B_PCILYNX 0x01001a /* TI PCILynx I2C adapter */ #define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ -#define I2C_HW_B_NVIDIA 0x01001c /* nvidia framebuffer driver */ -#define I2C_HW_B_SAVAGE 0x01001d /* savage framebuffer driver */ -#define I2C_HW_B_RADEON 0x01001e /* radeon framebuffer driver */ #define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */ #define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ -#define I2C_HW_B_INTELFB 0x010021 /* intel framebuffer driver */ #define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ - -/* --- PCF 8584 based algorithms */ -#define I2C_HW_P_ELEK 0x020002 /* Elektor ISA Bus inteface card */ - -/* --- PCA 9564 based algorithms */ -#define I2C_HW_A_ISA 0x1a0000 /* generic ISA Bus interface card */ - -/* --- PowerPC on-chip adapters */ -#define I2C_HW_OCP 0x120000 /* IBM on-chip I2C adapter */ - -/* --- Broadcom SiByte adapters */ -#define I2C_HW_SIBYTE 0x150000 +#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ +#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ /* --- SGI adapters */ #define I2C_HW_SGI_VINO 0x160000 -/* --- XSCALE on-chip adapters */ -#define I2C_HW_IOP3XX 0x140000 - -/* --- Au1550 PSC adapters adapters */ -#define I2C_HW_AU1550_PSC 0x1b0000 - /* --- SMBus only adapters */ -#define I2C_HW_SMBUS_PIIX4 0x040000 -#define I2C_HW_SMBUS_ALI15X3 0x040001 -#define I2C_HW_SMBUS_VIA2 0x040002 -#define I2C_HW_SMBUS_I801 0x040004 -#define I2C_HW_SMBUS_AMD756 0x040005 -#define I2C_HW_SMBUS_SIS5595 0x040006 -#define I2C_HW_SMBUS_ALI1535 0x040007 -#define I2C_HW_SMBUS_SIS630 0x040008 -#define I2C_HW_SMBUS_SIS96X 0x040009 -#define I2C_HW_SMBUS_AMD8111 0x04000a -#define I2C_HW_SMBUS_SCX200 0x04000b -#define I2C_HW_SMBUS_NFORCE2 0x04000c #define I2C_HW_SMBUS_W9968CF 0x04000d #define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */ #define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */ #define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */ -#define I2C_HW_SMBUS_ALI1563 0x040013 - -/* --- MCP107 adapter */ -#define I2C_HW_MPC107 0x0d0000 - -/* --- Embedded adapters */ -#define I2C_HW_MV64XXX 0x190000 -#define I2C_HW_BLACKFIN 0x190001 /* ADI Blackfin I2C TWI driver */ /* --- Miscellaneous adapters */ #define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */ diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h index 8ed591b0887..4d5e57ff661 100644 --- a/include/linux/i2c-ocores.h +++ b/include/linux/i2c-ocores.h @@ -14,6 +14,8 @@ struct ocores_i2c_platform_data { u32 regstep; /* distance between registers */ u32 clock_khz; /* input clock in kHz */ + u8 num_devices; /* number of devices in the devices list */ + struct i2c_board_info const *devices; /* devices connected to the bus */ }; #endif /* _LINUX_I2C_OCORES_H */ diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h index 3d191873f2d..aba33759dec 100644 --- a/include/linux/i2c-pca-platform.h +++ b/include/linux/i2c-pca-platform.h @@ -6,7 +6,7 @@ struct i2c_pca9564_pf_platform_data { * not supplied (negative value), but it * cannot exit some error conditions then */ int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */ - int timeout; /* timeout = this value * 10us */ + int timeout; /* timeout in jiffies */ }; #endif /* I2C_PCA9564_PLATFORM_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 20873d40246..f4784c0fe97 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -47,6 +47,7 @@ struct i2c_driver; union i2c_smbus_data; struct i2c_board_info; +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) /* * The master routines are the ones normally used to transmit data to devices * on a bus (or read from them). Apart from two basic transfer functions to @@ -93,6 +94,7 @@ extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command, u8 length, const u8 *values); +#endif /* I2C */ /** * struct i2c_driver - represent an I2C device driver @@ -100,9 +102,8 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, * @class: What kind of i2c device we instantiate (for detect) * @attach_adapter: Callback for bus addition (for legacy drivers) * @detach_adapter: Callback for bus removal (for legacy drivers) - * @detach_client: Callback for device removal (for legacy drivers) - * @probe: Callback for device binding (new-style drivers) - * @remove: Callback for device unbinding (new-style drivers) + * @probe: Callback for device binding + * @remove: Callback for device unbinding * @shutdown: Callback for device shutdown * @suspend: Callback for device suspend * @resume: Callback for device resume @@ -137,26 +138,14 @@ struct i2c_driver { int id; unsigned int class; - /* Notifies the driver that a new bus has appeared. This routine - * can be used by the driver to test if the bus meets its conditions - * & seek for the presence of the chip(s) it supports. If found, it - * registers the client(s) that are on the bus to the i2c admin. via - * i2c_attach_client. (LEGACY I2C DRIVERS ONLY) + /* Notifies the driver that a new bus has appeared or is about to be + * removed. You should avoid using this if you can, it will probably + * be removed in a near future. */ int (*attach_adapter)(struct i2c_adapter *); int (*detach_adapter)(struct i2c_adapter *); - /* tells the driver that a client is about to be deleted & gives it - * the chance to remove its private data. Also, if the client struct - * has been dynamically allocated by the driver in the function above, - * it must be freed here. (LEGACY I2C DRIVERS ONLY) - */ - int (*detach_client)(struct i2c_client *); - - /* Standard driver model interfaces, for "new style" i2c drivers. - * With the driver model, device enumeration is NEVER done by drivers; - * it's done by infrastructure. (NEW STYLE DRIVERS ONLY) - */ + /* Standard driver model interfaces */ int (*probe)(struct i2c_client *, const struct i2c_device_id *); int (*remove)(struct i2c_client *); @@ -191,9 +180,8 @@ struct i2c_driver { * @driver: device's driver, hence pointer to access routines * @dev: Driver model device node for the slave. * @irq: indicates the IRQ generated by this device (if any) - * @list: list of active/busy clients (DEPRECATED) - * @detected: member of an i2c_driver.clients list - * @released: used to synchronize client releases & detaches and references + * @detected: member of an i2c_driver.clients list or i2c-core's + * userspace_devices list * * An i2c_client identifies a single device (i.e. chip) connected to an * i2c bus. The behaviour exposed to Linux is defined by the driver @@ -209,9 +197,7 @@ struct i2c_client { struct i2c_driver *driver; /* and our access routines */ struct device dev; /* the device structure */ int irq; /* irq issued by device */ - struct list_head list; /* DEPRECATED */ struct list_head detected; - struct completion released; }; #define to_i2c_client(d) container_of(d, struct i2c_client, dev) @@ -248,11 +234,10 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) * that, such as chip type, configuration, associated IRQ, and so on. * * i2c_board_info is used to build tables of information listing I2C devices - * that are present. This information is used to grow the driver model tree - * for "new style" I2C drivers. For mainboards this is done statically using - * i2c_register_board_info(); bus numbers identify adapters that aren't - * yet available. For add-on boards, i2c_new_device() does this dynamically - * with the adapter already known. + * that are present. This information is used to grow the driver model tree. + * For mainboards this is done statically using i2c_register_board_info(); + * bus numbers identify adapters that aren't yet available. For add-on boards, + * i2c_new_device() does this dynamically with the adapter already known. */ struct i2c_board_info { char type[I2C_NAME_SIZE]; @@ -274,9 +259,10 @@ struct i2c_board_info { * are provided using conventional syntax. */ #define I2C_BOARD_INFO(dev_type, dev_addr) \ - .type = (dev_type), .addr = (dev_addr) + .type = dev_type, .addr = (dev_addr) +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) /* Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. @@ -300,6 +286,7 @@ extern struct i2c_client * i2c_new_dummy(struct i2c_adapter *adap, u16 address); extern void i2c_unregister_device(struct i2c_client *); +#endif /* I2C */ /* Mainboard arch_initcall() code should register all its I2C devices. * This is done at arch_initcall time, before declaring any i2c adapters. @@ -316,7 +303,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, { return 0; } -#endif +#endif /* I2C_BOARDINFO */ /* * The following structs are for those who like to implement new bus drivers: @@ -352,21 +339,15 @@ struct i2c_adapter { const struct i2c_algorithm *algo; /* the algorithm to access the bus */ void *algo_data; - /* --- administration stuff. */ - int (*client_register)(struct i2c_client *); - int (*client_unregister)(struct i2c_client *); - /* data fields that are valid for all devices */ u8 level; /* nesting level for lockdep */ struct mutex bus_lock; - struct mutex clist_lock; - int timeout; + int timeout; /* in jiffies */ int retries; struct device dev; /* the adapter device */ int nr; - struct list_head clients; /* DEPRECATED */ char name[48]; struct completion dev_released; }; @@ -412,11 +393,16 @@ struct i2c_client_address_data { /* The numbers to use to set I2C bus address */ #define ANY_I2C_BUS 0xffff +/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */ +#define I2C_ADDRS(addr, addrs...) \ + ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) + /* ----- functions exported by i2c.o */ /* administration... */ +#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) extern int i2c_add_adapter(struct i2c_adapter *); extern int i2c_del_adapter(struct i2c_adapter *); extern int i2c_add_numbered_adapter(struct i2c_adapter *); @@ -429,9 +415,6 @@ static inline int i2c_add_driver(struct i2c_driver *driver) return i2c_register_driver(THIS_MODULE, driver); } -extern int i2c_attach_client(struct i2c_client *); -extern int i2c_detach_client(struct i2c_client *); - extern struct i2c_client *i2c_use_client(struct i2c_client *client); extern void i2c_release_client(struct i2c_client *client); @@ -440,14 +423,6 @@ extern void i2c_release_client(struct i2c_client *client); extern void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg); -/* Detect function. It iterates over all possible addresses itself. - * It will only call found_proc if some client is connected at the - * specific address (unless a 'force' matched); - */ -extern int i2c_probe(struct i2c_adapter *adapter, - const struct i2c_client_address_data *address_data, - int (*found_proc) (struct i2c_adapter *, int, int)); - extern struct i2c_adapter *i2c_get_adapter(int id); extern void i2c_put_adapter(struct i2c_adapter *adap); @@ -469,6 +444,7 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap) { return adap->nr; } +#endif /* I2C */ #endif /* __KERNEL__ */ /** diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h index f6edd522a92..8ace93024d6 100644 --- a/include/linux/i2c/at24.h +++ b/include/linux/i2c/at24.h @@ -2,6 +2,7 @@ #define _LINUX_AT24_H #include <linux/types.h> +#include <linux/memory.h> /* * As seen through Linux I2C, differences between the most common types of I2C @@ -23,6 +24,9 @@ struct at24_platform_data { #define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ + + void (*setup)(struct memory_accessor *, void *context); + void *context; }; #endif /* _LINUX_AT24_H */ diff --git a/include/linux/i2c/lm8323.h b/include/linux/i2c/lm8323.h new file mode 100644 index 00000000000..478d668bc59 --- /dev/null +++ b/include/linux/i2c/lm8323.h @@ -0,0 +1,46 @@ +/* + * lm8323.h - Configuration for LM8323 keypad driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation (version 2 of the License only). + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __LINUX_LM8323_H +#define __LINUX_LM8323_H + +#include <linux/types.h> + +/* + * Largest keycode that the chip can send, plus one, + * so keys can be mapped directly at the index of the + * LM8323 keycode instead of subtracting one. + */ +#define LM8323_KEYMAP_SIZE (0x7f + 1) + +#define LM8323_NUM_PWMS 3 + +struct lm8323_platform_data { + int debounce_time; /* Time to watch for key bouncing, in ms. */ + int active_time; /* Idle time until sleep, in ms. */ + + int size_x; + int size_y; + bool repeat; + const unsigned short *keymap; + + const char *pwm_names[LM8323_NUM_PWMS]; + + const char *name; /* Device name. */ +}; + +#endif /* __LINUX_LM8323_H */ diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h index 3c7361217df..81736d6a8db 100644 --- a/include/linux/i2c/pca953x.h +++ b/include/linux/i2c/pca953x.h @@ -15,4 +15,5 @@ struct pca953x_platform_data { int (*teardown)(struct i2c_client *client, unsigned gpio, unsigned ngpio, void *context); + char **names; }; diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h new file mode 100644 index 00000000000..d9b34bfdae7 --- /dev/null +++ b/include/linux/i2c/s6000.h @@ -0,0 +1,10 @@ +#ifndef __LINUX_I2C_S6000_H +#define __LINUX_I2C_S6000_H + +struct s6_i2c_platform_data { + const char *clock; /* the clock to use */ + int bus_num; /* the bus number to register */ +}; + +#endif + diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 8137f660a5c..0dc80ef2497 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h @@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); /*----------------------------------------------------------------------*/ +/* Power bus message definitions */ + +#define DEV_GRP_NULL 0x0 +#define DEV_GRP_P1 0x1 +#define DEV_GRP_P2 0x2 +#define DEV_GRP_P3 0x4 + +#define RES_GRP_RES 0x0 +#define RES_GRP_PP 0x1 +#define RES_GRP_RC 0x2 +#define RES_GRP_PP_RC 0x3 +#define RES_GRP_PR 0x4 +#define RES_GRP_PP_PR 0x5 +#define RES_GRP_RC_PR 0x6 +#define RES_GRP_ALL 0x7 + +#define RES_TYPE2_R0 0x0 + +#define RES_TYPE_ALL 0x7 + +#define RES_STATE_WRST 0xF +#define RES_STATE_ACTIVE 0xE +#define RES_STATE_SLEEP 0x8 +#define RES_STATE_OFF 0x0 + +/* + * Power Bus Message Format ... these can be sent individually by Linux, + * but are usually part of downloaded scripts that are run when various + * power events are triggered. + * + * Broadcast Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] + * RES_STATE[3:0] + * + * Singular Message (16 Bits): + * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] + */ + +#define MSG_BROADCAST(devgrp, grp, type, type2, state) \ + ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ + | (type) << 4 | (state)) + +#define MSG_SINGULAR(devgrp, id, state) \ + ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) + +/*----------------------------------------------------------------------*/ + struct twl4030_bci_platform_data { int *battery_tmp_tbl; unsigned int tblsize; diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h index 05a80c44513..1587b7dec50 100644 --- a/include/linux/i7300_idle.h +++ b/include/linux/i7300_idle.h @@ -16,35 +16,33 @@ struct fbd_ioat { unsigned int vendor; unsigned int ioat_dev; + unsigned int enabled; }; /* * The i5000 chip-set has the same hooks as the i7300 - * but support is disabled by default because this driver - * has not been validated on that platform. + * but it is not enabled by default and must be manually + * manually enabled with "forceload=1" because it is + * only lightly validated. */ -#define SUPPORT_I5000 0 static const struct fbd_ioat fbd_ioat_list[] = { - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB}, -#if SUPPORT_I5000 - {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT}, -#endif + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1}, + {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0}, {0, 0} }; /* table of devices that work with this driver */ static const struct pci_device_id pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, -#if SUPPORT_I5000 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, -#endif { } /* Terminating entry */ }; /* Check for known platforms with I/O-AT */ static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, - struct pci_dev **ioat_dev) + struct pci_dev **ioat_dev, + int enable_all) { int i; struct pci_dev *memdev, *dmadev; @@ -69,6 +67,8 @@ static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { if (dmadev->vendor == fbd_ioat_list[i].vendor && dmadev->device == fbd_ioat_list[i].ioat_dev) { + if (!(fbd_ioat_list[i].enabled || enable_all)) + continue; if (fbd_dev) *fbd_dev = memdev; if (ioat_dev) diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h index 1c7a0dd5536..06695b74d40 100644 --- a/include/linux/ibmtr.h +++ b/include/linux/ibmtr.h @@ -207,7 +207,7 @@ struct tok_info { unsigned short exsap_station_id; unsigned short global_int_enable; struct sk_buff *current_skb; - struct net_device_stats tr_stats; + unsigned char auto_speedsave; open_state open_status, sap_status; enum {MANUAL, AUTOMATIC} open_mode; diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index a93a8dd3311..c0d8357917e 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -1,6 +1,7 @@ #ifndef _LINUX_ICMPV6_H #define _LINUX_ICMPV6_H +#include <linux/types.h> #include <asm/byteorder.h> struct icmp6hdr { @@ -170,20 +171,18 @@ struct icmp6_filter { #ifdef __KERNEL__ #include <linux/netdevice.h> -#include <linux/skbuff.h> - extern void icmpv6_send(struct sk_buff *skb, - int type, int code, + u8 type, u8 code, __u32 info, struct net_device *dev); extern int icmpv6_init(void); -extern int icmpv6_err_convert(int type, int code, +extern int icmpv6_err_convert(u8 type, u8 code, int *err); extern void icmpv6_cleanup(void); extern void icmpv6_param_prob(struct sk_buff *skb, - int code, int pos); + u8 code, int pos); struct flowi; struct in6_addr; diff --git a/include/linux/ide.h b/include/linux/ide.h index 194da5a4b0d..edc93a6d931 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h @@ -26,7 +26,10 @@ #include <asm/io.h> #include <asm/mutex.h> -#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) +/* for request_sense */ +#include <linux/cdrom.h> + +#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) # define SUPPORT_VLB_SYNC 0 #else # define SUPPORT_VLB_SYNC 1 @@ -40,6 +43,13 @@ #define ERROR_RESET 3 /* Reset controller every 4th retry */ #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ +/* Error codes returned in rq->errors to the higher part of the driver. */ +enum { + IDE_DRV_ERROR_GENERAL = 101, + IDE_DRV_ERROR_FILEMARK = 102, + IDE_DRV_ERROR_EOD = 103, +}; + /* * Definitions for accessing IDE controller registers */ @@ -147,12 +157,6 @@ enum { #define REQ_UNPARK_HEADS 0x23 /* - * Check for an interrupt and acknowledge the interrupt status - */ -struct hwif_s; -typedef int (ide_ack_intr_t)(struct hwif_s *); - -/* * hwif_chipset_t is used to keep track of the specific hardware * chipset used by each IDE interface, if known. */ @@ -168,20 +172,18 @@ typedef u8 hwif_chipset_t; /* * Structure to hold all information about the location of this port */ -typedef struct hw_regs_s { +struct ide_hw { union { struct ide_io_ports io_ports; unsigned long io_ports_array[IDE_NR_PORTS]; }; int irq; /* our irq number */ - ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ - hwif_chipset_t chipset; struct device *dev, *parent; unsigned long config; -} hw_regs_t; +}; -static inline void ide_std_init_ports(hw_regs_t *hw, +static inline void ide_std_init_ports(struct ide_hw *hw, unsigned long io_addr, unsigned long ctl_addr) { @@ -193,42 +195,8 @@ static inline void ide_std_init_ports(hw_regs_t *hw, hw->io_ports.ctl_addr = ctl_addr; } -/* for IDE PCI controllers in legacy mode, temporary */ -static inline int __ide_default_irq(unsigned long base) -{ - switch (base) { -#ifdef CONFIG_IA64 - case 0x1f0: return isa_irq_to_vector(14); - case 0x170: return isa_irq_to_vector(15); -#else - case 0x1f0: return 14; - case 0x170: return 15; -#endif - } - return 0; -} - -#if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \ - defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \ - || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64) -#include <asm/ide.h> -#else -#include <asm-generic/ide_iops.h> -#endif - #define MAX_HWIFS 10 -/* Currently only m68k, apus and m8xx need it */ -#ifndef IDE_ARCH_ACK_INTR -# define ide_ack_intr(hwif) (1) -#endif - -/* Currently only Atari needs it */ -#ifndef IDE_ARCH_LOCK -# define ide_release_lock() do {} while (0) -# define ide_get_lock(hdlr, data) do {} while (0) -#endif /* IDE_ARCH_LOCK */ - /* * Now for the data we need to maintain per-drive: ide_drive_t */ @@ -242,21 +210,12 @@ static inline int __ide_default_irq(unsigned long base) /* * Special Driver Flags - * - * set_geometry : respecify drive geometry - * recalibrate : seek to cyl 0 - * set_multmode : set multmode count - * reserved : unused */ -typedef union { - unsigned all : 8; - struct { - unsigned set_geometry : 1; - unsigned recalibrate : 1; - unsigned set_multmode : 1; - unsigned reserved : 5; - } b; -} special_t; +enum { + IDE_SFLAG_SET_GEOMETRY = (1 << 0), + IDE_SFLAG_RECALIBRATE = (1 << 1), + IDE_SFLAG_SET_MULTMODE = (1 << 2), +}; /* * Status returned from various ide_ functions @@ -267,108 +226,91 @@ typedef enum { } ide_startstop_t; enum { + IDE_VALID_ERROR = (1 << 1), + IDE_VALID_FEATURE = IDE_VALID_ERROR, + IDE_VALID_NSECT = (1 << 2), + IDE_VALID_LBAL = (1 << 3), + IDE_VALID_LBAM = (1 << 4), + IDE_VALID_LBAH = (1 << 5), + IDE_VALID_DEVICE = (1 << 6), + IDE_VALID_LBA = IDE_VALID_LBAL | + IDE_VALID_LBAM | + IDE_VALID_LBAH, + IDE_VALID_OUT_TF = IDE_VALID_FEATURE | + IDE_VALID_NSECT | + IDE_VALID_LBA, + IDE_VALID_IN_TF = IDE_VALID_NSECT | + IDE_VALID_LBA, + IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF, + IDE_VALID_IN_HOB = IDE_VALID_ERROR | + IDE_VALID_NSECT | + IDE_VALID_LBA, +}; + +enum { IDE_TFLAG_LBA48 = (1 << 0), - IDE_TFLAG_FLAGGED = (1 << 2), - IDE_TFLAG_OUT_DATA = (1 << 3), - IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), - IDE_TFLAG_OUT_HOB_NSECT = (1 << 5), - IDE_TFLAG_OUT_HOB_LBAL = (1 << 6), - IDE_TFLAG_OUT_HOB_LBAM = (1 << 7), - IDE_TFLAG_OUT_HOB_LBAH = (1 << 8), - IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE | - IDE_TFLAG_OUT_HOB_NSECT | - IDE_TFLAG_OUT_HOB_LBAL | - IDE_TFLAG_OUT_HOB_LBAM | - IDE_TFLAG_OUT_HOB_LBAH, - IDE_TFLAG_OUT_FEATURE = (1 << 9), - IDE_TFLAG_OUT_NSECT = (1 << 10), - IDE_TFLAG_OUT_LBAL = (1 << 11), - IDE_TFLAG_OUT_LBAM = (1 << 12), - IDE_TFLAG_OUT_LBAH = (1 << 13), - IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE | - IDE_TFLAG_OUT_NSECT | - IDE_TFLAG_OUT_LBAL | - IDE_TFLAG_OUT_LBAM | - IDE_TFLAG_OUT_LBAH, - IDE_TFLAG_OUT_DEVICE = (1 << 14), - IDE_TFLAG_WRITE = (1 << 15), - IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16), - IDE_TFLAG_IN_DATA = (1 << 17), - IDE_TFLAG_CUSTOM_HANDLER = (1 << 18), - IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19), - IDE_TFLAG_IN_HOB_FEATURE = (1 << 20), - IDE_TFLAG_IN_HOB_NSECT = (1 << 21), - IDE_TFLAG_IN_HOB_LBAL = (1 << 22), - IDE_TFLAG_IN_HOB_LBAM = (1 << 23), - IDE_TFLAG_IN_HOB_LBAH = (1 << 24), - IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL | - IDE_TFLAG_IN_HOB_LBAM | - IDE_TFLAG_IN_HOB_LBAH, - IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | - IDE_TFLAG_IN_HOB_NSECT | - IDE_TFLAG_IN_HOB_LBA, - IDE_TFLAG_IN_FEATURE = (1 << 1), - IDE_TFLAG_IN_NSECT = (1 << 25), - IDE_TFLAG_IN_LBAL = (1 << 26), - IDE_TFLAG_IN_LBAM = (1 << 27), - IDE_TFLAG_IN_LBAH = (1 << 28), - IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL | - IDE_TFLAG_IN_LBAM | - IDE_TFLAG_IN_LBAH, - IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT | - IDE_TFLAG_IN_LBA, - IDE_TFLAG_IN_DEVICE = (1 << 29), - IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB | - IDE_TFLAG_IN_HOB, - IDE_TFLAG_TF = IDE_TFLAG_OUT_TF | - IDE_TFLAG_IN_TF, - IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE | - IDE_TFLAG_IN_DEVICE, + IDE_TFLAG_WRITE = (1 << 1), + IDE_TFLAG_CUSTOM_HANDLER = (1 << 2), + IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3), /* force 16-bit I/O operations */ - IDE_TFLAG_IO_16BIT = (1 << 30), - /* ide_task_t was allocated using kmalloc() */ - IDE_TFLAG_DYN = (1 << 31), + IDE_TFLAG_IO_16BIT = (1 << 4), + /* struct ide_cmd was allocated using kmalloc() */ + IDE_TFLAG_DYN = (1 << 5), + IDE_TFLAG_FS = (1 << 6), + IDE_TFLAG_MULTI_PIO = (1 << 7), }; -struct ide_taskfile { - u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */ +enum { + IDE_FTFLAG_FLAGGED = (1 << 0), + IDE_FTFLAG_SET_IN_FLAGS = (1 << 1), + IDE_FTFLAG_OUT_DATA = (1 << 2), + IDE_FTFLAG_IN_DATA = (1 << 3), +}; - u8 hob_feature; /* 1-5: additional data to support LBA48 */ - u8 hob_nsect; - u8 hob_lbal; - u8 hob_lbam; - u8 hob_lbah; +struct ide_taskfile { + u8 data; /* 0: data byte (for TASKFILE ioctl) */ + union { /* 1: */ + u8 error; /* read: error */ + u8 feature; /* write: feature */ + }; + u8 nsect; /* 2: number of sectors */ + u8 lbal; /* 3: LBA low */ + u8 lbam; /* 4: LBA mid */ + u8 lbah; /* 5: LBA high */ + u8 device; /* 6: device select */ + union { /* 7: */ + u8 status; /* read: status */ + u8 command; /* write: command */ + }; +}; - u8 data; /* 6: low data byte (for TASKFILE IOCTL) */ +struct ide_cmd { + struct ide_taskfile tf; + struct ide_taskfile hob; + struct { + struct { + u8 tf; + u8 hob; + } out, in; + } valid; - union { /*  7: */ - u8 error; /* read: error */ - u8 feature; /* write: feature */ - }; + u8 tf_flags; + u8 ftf_flags; /* for TASKFILE ioctl */ + int protocol; - u8 nsect; /* 8: number of sectors */ - u8 lbal; /* 9: LBA low */ - u8 lbam; /* 10: LBA mid */ - u8 lbah; /* 11: LBA high */ + int sg_nents; /* number of sg entries */ + int orig_sg_nents; + int sg_dma_direction; /* DMA transfer direction */ - u8 device; /* 12: device select */ + unsigned int nbytes; + unsigned int nleft; + unsigned int last_xfer_len; - union { /* 13: */ - u8 status; /*  read: status  */ - u8 command; /* write: command */ - }; -}; + struct scatterlist *cursg; + unsigned int cursg_ofs; -typedef struct ide_task_s { - union { - struct ide_taskfile tf; - u8 tf_array[14]; - }; - u32 tf_flags; - int data_phase; struct request *rq; /* copy of request */ - void *special; /* valid_t generally */ -} ide_task_t; +}; /* ATAPI packet command flags */ enum { @@ -380,15 +322,8 @@ enum { PC_FLAG_DMA_IN_PROGRESS = (1 << 4), PC_FLAG_DMA_ERROR = (1 << 5), PC_FLAG_WRITING = (1 << 6), - /* command timed out */ - PC_FLAG_TIMEDOUT = (1 << 7), }; -/* - * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes. - * This is used for several packet commands (not for READ/WRITE commands). - */ -#define IDE_PC_BUFFER_SIZE 256 #define ATAPI_WAIT_PC (60 * HZ) struct ide_atapi_pc { @@ -400,16 +335,6 @@ struct ide_atapi_pc { /* bytes to transfer */ int req_xfer; - /* bytes actually transferred */ - int xferred; - - /* data buffer */ - u8 *buf; - /* current buffer position */ - u8 *cur_pos; - int buf_size; - /* missing/available data on the current buffer */ - int b_count; /* the corresponding request */ struct request *rq; @@ -420,15 +345,6 @@ struct ide_atapi_pc { * those are more or less driver-specific and some of them are subject * to change/removal later. */ - u8 pc_buf[IDE_PC_BUFFER_SIZE]; - - /* idetape only */ - struct idetape_bh *bh; - char *b_data; - - struct scatterlist *sg; - unsigned int sg_cnt; - unsigned long timeout; }; @@ -445,6 +361,7 @@ struct ide_drive_s; struct ide_disk_ops { int (*check)(struct ide_drive_s *, const char *); int (*get_capacity)(struct ide_drive_s *); + u64 (*set_capacity)(struct ide_drive_s *, u64); void (*setup)(struct ide_drive_s *); void (*flush)(struct ide_drive_s *); int (*init_media)(struct ide_drive_s *, struct gendisk *); @@ -452,7 +369,6 @@ struct ide_disk_ops { int); ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *, sector_t); - int (*end_request)(struct ide_drive_s *, int, int); int (*ioctl)(struct ide_drive_s *, struct block_device *, fmode_t, unsigned int, unsigned long); }; @@ -470,11 +386,6 @@ enum { IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), /* TOC track numbers are in BCD. */ IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), - /* - * Drive does not provide data in multiples of SECTOR_SIZE - * when more than one interrupt is needed. - */ - IDE_AFLAG_LIMIT_NFRAMES = (1 << 5), /* Saved TOC information is current. */ IDE_AFLAG_TOC_VALID = (1 << 6), /* We think that the drive door is locked. */ @@ -528,8 +439,8 @@ enum { IDE_DFLAG_NICE1 = (1 << 5), /* device is physically present */ IDE_DFLAG_PRESENT = (1 << 6), - /* device ejected hint */ - IDE_DFLAG_DEAD = (1 << 7), + /* disable Host Protected Area */ + IDE_DFLAG_NOHPA = (1 << 7), /* id read from device (synthetic if not set) */ IDE_DFLAG_ID_READ = (1 << 8), IDE_DFLAG_NOPROBE = (1 << 9), @@ -568,6 +479,7 @@ enum { /* write protect */ IDE_DFLAG_WP = (1 << 29), IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), + IDE_DFLAG_NIEN_QUIRK = (1 << 31), }; struct ide_drive_s { @@ -592,14 +504,13 @@ struct ide_drive_s { unsigned long sleep; /* sleep until this time */ unsigned long timeout; /* max time to wait for irq */ - special_t special; /* special action flags */ + u8 special_flags; /* special action flags */ u8 select; /* basic drive/head select reg value */ u8 retry_pio; /* retrying dma capable host in pio */ u8 waiting_for_dma; /* dma currently in progress */ u8 dma; /* atapi dma flag */ - u8 quirk_list; /* considered quirky, set for a specific host */ u8 init_speed; /* transfer rate set at boot */ u8 current_speed; /* current transfer rate set */ u8 desired_speed; /* desired transfer rate set */ @@ -621,11 +532,10 @@ struct ide_drive_s { unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ unsigned int cyl; /* "real" number of cyls */ - unsigned int drive_data; /* used by set_pio_mode/selectproc */ + void *drive_data; /* used by set_pio_mode/dev_select() */ unsigned int failures; /* current failure count */ unsigned int max_failures; /* maximum allowed failure count */ - u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ - + u64 probed_capacity;/* initial/native media capacity */ u64 capacity64; /* total number of sectors */ int lun; /* logical unit */ @@ -643,19 +553,22 @@ struct ide_drive_s { /* current packet command */ struct ide_atapi_pc *pc; - /* callback for packet commands */ - void (*pc_callback)(struct ide_drive_s *, int); + /* last failed packet command */ + struct ide_atapi_pc *failed_pc; - void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *); - int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, - unsigned int, int); + /* callback for packet commands */ + int (*pc_callback)(struct ide_drive_s *, int); ide_startstop_t (*irq_handler)(struct ide_drive_s *); unsigned long atapi_flags; struct ide_atapi_pc request_sense_pc; - struct request request_sense_rq; + + /* current sense rq and buffer */ + bool sense_rq_armed; + struct request sense_rq; + struct request_sense sense_data; }; typedef struct ide_drive_s ide_drive_t; @@ -663,7 +576,7 @@ typedef struct ide_drive_s ide_drive_t; #define to_ide_device(dev) container_of(dev, ide_drive_t, gendev) #define to_ide_drv(obj, cont_type) \ - container_of(obj, struct cont_type, kref) + container_of(obj, struct cont_type, dev) #define ide_drv_g(disk, cont_type) \ container_of((disk)->private_data, struct cont_type, driver) @@ -674,16 +587,16 @@ struct ide_tp_ops { void (*exec_command)(struct hwif_s *, u8); u8 (*read_status)(struct hwif_s *); u8 (*read_altstatus)(struct hwif_s *); + void (*write_devctl)(struct hwif_s *, u8); - void (*set_irq)(struct hwif_s *, int); - - void (*tf_load)(ide_drive_t *, struct ide_task_s *); - void (*tf_read)(ide_drive_t *, struct ide_task_s *); + void (*dev_select)(ide_drive_t *); + void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8); + void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8); - void (*input_data)(ide_drive_t *, struct request *, void *, - unsigned int); - void (*output_data)(ide_drive_t *, struct request *, void *, - unsigned int); + void (*input_data)(ide_drive_t *, struct ide_cmd *, + void *, unsigned int); + void (*output_data)(ide_drive_t *, struct ide_cmd *, + void *, unsigned int); }; extern const struct ide_tp_ops default_tp_ops; @@ -694,7 +607,6 @@ extern const struct ide_tp_ops default_tp_ops; * @init_dev: host specific initialization of a device * @set_pio_mode: routine to program host for PIO mode * @set_dma_mode: routine to program host for DMA mode - * @selectproc: tweaks hardware to select drive * @reset_poll: chipset polling based on hba specifics * @pre_reset: chipset specific changes to default for device-hba resets * @resetproc: routine to reset controller after a disk reset @@ -711,13 +623,13 @@ struct ide_port_ops { void (*init_dev)(ide_drive_t *); void (*set_pio_mode)(ide_drive_t *, const u8); void (*set_dma_mode)(ide_drive_t *, const u8); - void (*selectproc)(ide_drive_t *); int (*reset_poll)(ide_drive_t *); void (*pre_reset)(ide_drive_t *); void (*resetproc)(ide_drive_t *); void (*maskproc)(ide_drive_t *, int); void (*quirkproc)(ide_drive_t *); void (*clear_irq)(ide_drive_t *); + int (*test_irq)(struct hwif_s *); u8 (*mdma_filter)(ide_drive_t *); u8 (*udma_filter)(ide_drive_t *); @@ -727,13 +639,15 @@ struct ide_port_ops { struct ide_dma_ops { void (*dma_host_set)(struct ide_drive_s *, int); - int (*dma_setup)(struct ide_drive_s *); - void (*dma_exec_cmd)(struct ide_drive_s *, u8); + int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *); void (*dma_start)(struct ide_drive_s *); int (*dma_end)(struct ide_drive_s *); int (*dma_test_irq)(struct ide_drive_s *); void (*dma_lost_irq)(struct ide_drive_s *); - void (*dma_timeout)(struct ide_drive_s *); + /* below ones are optional */ + int (*dma_check)(struct ide_drive_s *, struct ide_cmd *); + int (*dma_timer_expiry)(struct ide_drive_s *); + void (*dma_clear)(struct ide_drive_s *); /* * The following method is optional and only required to be * implemented for the SFF-8038i compatible controllers. @@ -741,6 +655,10 @@ struct ide_dma_ops { u8 (*dma_sff_read_status)(struct hwif_s *); }; +enum { + IDE_PFLAG_PROBING = (1 << 0), +}; + struct ide_host; typedef struct hwif_s { @@ -757,6 +675,8 @@ typedef struct hwif_s { ide_drive_t *devices[MAX_DRIVES + 1]; + unsigned long port_flags; + u8 major; /* our major number */ u8 index; /* 0 for ide0; 1 for ide1; ... */ u8 channel; /* for dual-port chips: 0=primary, 1=secondary */ @@ -775,8 +695,6 @@ typedef struct hwif_s { struct device *dev; - ide_ack_intr_t *ack_intr; - void (*rw_disk)(ide_drive_t *, struct request *); const struct ide_tp_ops *tp_ops; @@ -796,18 +714,8 @@ typedef struct hwif_s { /* Scatter-gather list used to build the above */ struct scatterlist *sg_table; int sg_max_nents; /* Maximum number of entries in it */ - int sg_nents; /* Current number of entries in it */ - int sg_dma_direction; /* dma transfer direction */ - - /* data phase of the active command (currently only valid for PIO/DMA) */ - int data_phase; - - struct ide_task_s task; /* current command */ - unsigned int nsect; - unsigned int nleft; - struct scatterlist *cursg; - unsigned int cursg_ofs; + struct ide_cmd cmd; /* current command */ int rqsize; /* max sectors per request */ int irq; /* our irq number */ @@ -865,8 +773,18 @@ struct ide_host { ide_hwif_t *ports[MAX_HOST_PORTS + 1]; unsigned int n_ports; struct device *dev[2]; - unsigned int (*init_chipset)(struct pci_dev *); + + int (*init_chipset)(struct pci_dev *); + + void (*get_lock)(irq_handler_t, void *); + void (*release_lock)(void); + + irq_handler_t irq_handler; + unsigned long host_flags; + + int irq_flags; + void *host_priv; ide_hwif_t *cur_port; /* for hosts requiring serialization */ @@ -883,7 +801,7 @@ typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); typedef int (ide_expiry_t)(ide_drive_t *); /* used by ide-cd, ide-floppy, etc. */ -typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned); +typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned); extern struct mutex ide_setting_mtx; @@ -1059,10 +977,11 @@ enum { }; /* DRV_NAME has to be defined in the driver before using the macro below */ -#define __ide_debug_log(lvl, fmt, args...) \ -{ \ - if (unlikely(drive->debug_mask & lvl)) \ - printk(KERN_INFO DRV_NAME ": " fmt, ## args); \ +#define __ide_debug_log(lvl, fmt, args...) \ +{ \ + if (unlikely(drive->debug_mask & lvl)) \ + printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \ + __func__, ## args); \ } /* @@ -1101,7 +1020,7 @@ int generic_ide_resume(struct device *); void ide_complete_power_step(ide_drive_t *, struct request *); ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); -void ide_complete_pm_request(ide_drive_t *, struct request *); +void ide_complete_pm_rq(ide_drive_t *, struct request *); void ide_check_pm_state(ide_drive_t *, struct request *); /* @@ -1113,7 +1032,6 @@ void ide_check_pm_state(ide_drive_t *, struct request *); struct ide_driver { const char *version; ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); - int (*end_request)(ide_drive_t *, int, int); struct device_driver gen_driver; int (*probe)(ide_drive_t *); void (*remove)(ide_drive_t *); @@ -1144,16 +1062,14 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l extern int ide_vlb_clk; extern int ide_pci_clk; -extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); -int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, - int uptodate, int nr_sectors); +int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int); +void ide_kill_rq(ide_drive_t *, struct request *); -extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry); +void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); +void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); -void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int, - ide_expiry_t *); - -void ide_execute_pkt_cmd(ide_drive_t *); +void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *, + unsigned int); void ide_pad_transfer(ide_drive_t *, int, int); @@ -1163,44 +1079,42 @@ void ide_fix_driveid(u16 *); extern void ide_fixstring(u8 *, const int, const int); -int ide_busy_sleep(ide_hwif_t *, unsigned long, int); +int ide_busy_sleep(ide_drive_t *, unsigned long, int); int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); +ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *); +ide_startstop_t ide_do_devset(ide_drive_t *, struct request *); + extern ide_startstop_t ide_do_reset (ide_drive_t *); extern int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, int arg); -extern void ide_do_drive_cmd(ide_drive_t *, struct request *); - -extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); +void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); +int ide_complete_rq(ide_drive_t *, int, unsigned int); -void ide_tf_dump(const char *, struct ide_taskfile *); +void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); +void ide_tf_dump(const char *, struct ide_cmd *); void ide_exec_command(ide_hwif_t *, u8); u8 ide_read_status(ide_hwif_t *); u8 ide_read_altstatus(ide_hwif_t *); +void ide_write_devctl(ide_hwif_t *, u8); -void ide_set_irq(ide_hwif_t *, int); +void ide_dev_select(ide_drive_t *); +void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8); +void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8); -void ide_tf_load(ide_drive_t *, ide_task_t *); -void ide_tf_read(ide_drive_t *, ide_task_t *); +void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); +void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); -void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); -void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); - -int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int); - -extern void SELECT_DRIVE(ide_drive_t *); void SELECT_MASK(ide_drive_t *, int); u8 ide_read_error(ide_drive_t *); void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); -extern int drive_is_ready(ide_drive_t *); - -void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); +int ide_check_ireason(ide_drive_t *, struct request *, int, int, int); int ide_check_atapi_device(ide_drive_t *, const char *); @@ -1226,28 +1140,36 @@ enum { REQ_IDETAPE_WRITE = (1 << 3), }; -int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *); +int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *, + void *, unsigned int); int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); -void ide_retry_pc(ide_drive_t *, struct gendisk *); +void ide_retry_pc(ide_drive_t *drive); + +void ide_prep_sense(ide_drive_t *drive, struct request *rq); +int ide_queue_sense_rq(ide_drive_t *drive, void *special); int ide_cd_expiry(ide_drive_t *); int ide_cd_get_xferlen(struct request *); -ide_startstop_t ide_issue_pc(ide_drive_t *); +ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *); + +ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *); + +void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int); -ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); +void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8); -void task_end_request(ide_drive_t *, struct request *, u8); +int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16); +int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *); -int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16); -int ide_no_data_taskfile(ide_drive_t *, ide_task_t *); +int ide_taskfile_ioctl(ide_drive_t *, unsigned long); -int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); +int ide_dev_read_id(ide_drive_t *, u8, u16 *); extern int ide_driveid_update(ide_drive_t *); extern int ide_config_drive_speed(ide_drive_t *, u8); @@ -1278,8 +1200,8 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev) return 0; } -void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, - hw_regs_t *, hw_regs_t **); +void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, + struct ide_hw *, struct ide_hw **); void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); #ifdef CONFIG_BLK_DEV_IDEDMA_PCI @@ -1347,10 +1269,10 @@ enum { IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), /* serialize ports */ IDE_HFLAG_SERIALIZE = (1 << 20), - /* use legacy IRQs */ - IDE_HFLAG_LEGACY_IRQS = (1 << 21), - /* force use of legacy IRQs */ - IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), + /* host is DTC2278 */ + IDE_HFLAG_DTC2278 = (1 << 21), + /* 4 devices on a single set of I/O ports */ + IDE_HFLAG_4DRIVES = (1 << 22), /* host is TRM290 */ IDE_HFLAG_TRM290 = (1 << 23), /* use 32-bit I/O ops */ @@ -1378,7 +1300,12 @@ enum { struct ide_port_info { char *name; - unsigned int (*init_chipset)(struct pci_dev *); + + int (*init_chipset)(struct pci_dev *); + + void (*get_lock)(irq_handler_t, void *); + void (*release_lock)(void); + void (*init_iops)(ide_hwif_t *); void (*init_hwif)(ide_hwif_t *); int (*init_dma)(ide_hwif_t *, @@ -1395,6 +1322,9 @@ struct ide_port_info { u16 max_sectors; /* if < than the default one */ u32 host_flags; + + int irq_flags; + u8 pio_mask; u8 swdma_mask; u8 mwdma_mask; @@ -1414,8 +1344,8 @@ int ide_pci_resume(struct pci_dev *); #define ide_pci_resume NULL #endif -void ide_map_sg(ide_drive_t *, struct request *); -void ide_init_sg_cmd(ide_drive_t *, struct request *); +void ide_map_sg(ide_drive_t *, struct ide_cmd *); +void ide_init_sg_cmd(struct ide_cmd *, unsigned int); #define BAD_DMA_DRIVE 0 #define GOOD_DMA_DRIVE 1 @@ -1430,7 +1360,6 @@ int ide_in_drive_list(u16 *, const struct drive_list_entry *); #ifdef CONFIG_BLK_DEV_IDEDMA int ide_dma_good_drive(ide_drive_t *); int __ide_dma_bad_drive(ide_drive_t *); -int ide_id_dma_bug(ide_drive_t *); u8 ide_find_dma_mode(ide_drive_t *, u8); @@ -1449,18 +1378,18 @@ ide_startstop_t ide_dma_intr(ide_drive_t *); int ide_allocate_dma_engine(ide_hwif_t *); void ide_release_dma_engine(ide_hwif_t *); -int ide_build_sglist(ide_drive_t *, struct request *); -void ide_destroy_dmatable(ide_drive_t *); +int ide_dma_prepare(ide_drive_t *, struct ide_cmd *); +void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *); #ifdef CONFIG_BLK_DEV_IDEDMA_SFF int config_drive_for_dma(ide_drive_t *); -extern int ide_build_dmatable(ide_drive_t *, struct request *); +int ide_build_dmatable(ide_drive_t *, struct ide_cmd *); void ide_dma_host_set(ide_drive_t *, int); -extern int ide_dma_setup(ide_drive_t *); -void ide_dma_exec_cmd(ide_drive_t *, u8); +int ide_dma_setup(ide_drive_t *, struct ide_cmd *); extern void ide_dma_start(ide_drive_t *); int ide_dma_end(ide_drive_t *); int ide_dma_test_irq(ide_drive_t *); +int ide_dma_sff_timer_expiry(ide_drive_t *); u8 ide_dma_sff_read_status(ide_hwif_t *); extern const struct ide_dma_ops sff_dma_ops; #else @@ -1468,10 +1397,9 @@ static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ void ide_dma_lost_irq(ide_drive_t *); -void ide_dma_timeout(ide_drive_t *); +ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); #else -static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; } static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; } static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; } static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; } @@ -1480,21 +1408,31 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; } static inline void ide_dma_verbose(ide_drive_t *drive) { ; } static inline int ide_set_dma(ide_drive_t *drive) { return 1; } static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } +static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; } +static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } +static inline int ide_dma_prepare(ide_drive_t *drive, + struct ide_cmd *cmd) { return 1; } +static inline void ide_dma_unmap_sg(ide_drive_t *drive, + struct ide_cmd *cmd) { ; } #endif /* CONFIG_BLK_DEV_IDEDMA */ #ifdef CONFIG_BLK_DEV_IDEACPI +int ide_acpi_init(void); +bool ide_port_acpi(ide_hwif_t *hwif); extern int ide_acpi_exec_tfs(ide_drive_t *drive); extern void ide_acpi_get_timing(ide_hwif_t *hwif); extern void ide_acpi_push_timing(ide_hwif_t *hwif); -extern void ide_acpi_init(ide_hwif_t *hwif); +void ide_acpi_init_port(ide_hwif_t *); void ide_acpi_port_init_devices(ide_hwif_t *); extern void ide_acpi_set_state(ide_hwif_t *hwif, int on); #else +static inline int ide_acpi_init(void) { return 0; } +static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; } static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; } static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; } static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; } -static inline void ide_acpi_init(ide_hwif_t *hwif) { ; } +static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; } static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; } static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} #endif @@ -1502,16 +1440,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} void ide_register_region(struct gendisk *); void ide_unregister_region(struct gendisk *); +void ide_check_nien_quirk_list(ide_drive_t *); void ide_undecoded_slave(ide_drive_t *); void ide_port_apply_params(ide_hwif_t *); int ide_sysfs_register_port(ide_hwif_t *); -struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); +struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **, + unsigned int); void ide_host_free(struct ide_host *); int ide_host_register(struct ide_host *, const struct ide_port_info *, - hw_regs_t **); -int ide_host_add(const struct ide_port_info *, hw_regs_t **, + struct ide_hw **); +int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int, struct ide_host **); void ide_host_remove(struct ide_host *); int ide_legacy_device_add(const struct ide_port_info *, unsigned long); @@ -1528,11 +1468,9 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) hwif->hwif_data = data; } -const char *ide_xfer_verbose(u8 mode); extern void ide_toggle_bounce(ide_drive_t *drive, int on); -extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate); -u64 ide_get_lba_addr(struct ide_taskfile *, int); +u64 ide_get_lba_addr(struct ide_cmd *, int); u8 ide_dump_status(ide_drive_t *, const char *, u8); struct ide_timing { @@ -1569,14 +1507,19 @@ void ide_timing_merge(struct ide_timing *, struct ide_timing *, struct ide_timing *, unsigned int); int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int); +#ifdef CONFIG_IDE_XFER_MODE int ide_scan_pio_blacklist(char *); - +const char *ide_xfer_verbose(u8); u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8); - +int ide_pio_need_iordy(ide_drive_t *, const u8); int ide_set_pio_mode(ide_drive_t *, u8); int ide_set_dma_mode(ide_drive_t *, u8); - void ide_set_pio(ide_drive_t *, u8); +int ide_set_xfer_rate(ide_drive_t *, u8); +#else +static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; } +static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; } +#endif static inline void ide_set_max_pio(ide_drive_t *drive) { @@ -1606,9 +1549,23 @@ static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive) return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL; } +static inline void *ide_get_drivedata(ide_drive_t *drive) +{ + return drive->drive_data; +} + +static inline void ide_set_drivedata(ide_drive_t *drive, void *data) +{ + drive->drive_data = data; +} + #define ide_port_for_each_dev(i, dev, port) \ for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) +#define ide_port_for_each_present_dev(i, dev, port) \ + for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \ + if ((dev)->dev_flags & IDE_DFLAG_PRESENT) + #define ide_host_for_each_port(i, port, host) \ for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++) diff --git a/include/linux/idr.h b/include/linux/idr.h index dd846df8cd3..e968db71e33 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -106,6 +106,7 @@ int idr_get_new(struct idr *idp, void *ptr, int *id); int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); +void *idr_get_next(struct idr *idp, int *nextid); void *idr_replace(struct idr *idp, void *ptr, int id); void idr_remove(struct idr *idp, int id); void idr_remove_all(struct idr *idp); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index c4e6ca1a630..52e15e079c6 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -18,6 +18,22 @@ #include <linux/types.h> #include <asm/byteorder.h> +/* + * DS bit usage + * + * TA = transmitter address + * RA = receiver address + * DA = destination address + * SA = source address + * + * ToDS FromDS A1(RA) A2(TA) A3 A4 Use + * ----------------------------------------------------------------- + * 0 0 DA SA BSSID - IBSS/DLS + * 0 1 DA BSSID SA - AP -> STA + * 1 0 BSSID SA DA - AP <- STA + * 1 1 RA TA DA SA unspecified (WDS) + */ + #define FCS_LEN 4 #define IEEE80211_FCTL_VERS 0x0003 @@ -99,7 +115,7 @@ #define IEEE80211_MAX_SSID_LEN 32 #define IEEE80211_MAX_MESH_ID_LEN 32 -#define IEEE80211_MESH_CONFIG_LEN 19 +#define IEEE80211_MESH_CONFIG_LEN 24 #define IEEE80211_QOS_CTL_LEN 2 #define IEEE80211_QOS_CTL_TID_MASK 0x000F @@ -477,6 +493,7 @@ struct ieee80211s_hdr { /* Mesh flags */ #define MESH_FLAGS_AE_A4 0x1 #define MESH_FLAGS_AE_A5_A6 0x2 +#define MESH_FLAGS_AE 0x3 #define MESH_FLAGS_PS_DEEP 0x4 /** @@ -524,9 +541,11 @@ struct ieee80211_tim_ie { u8 dtim_period; u8 bitmap_ctrl; /* variable size: 1 - 251 bytes */ - u8 virtual_map[0]; + u8 virtual_map[1]; } __attribute__ ((packed)); +#define WLAN_SA_QUERY_TR_ID_LEN 2 + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -646,6 +665,10 @@ struct ieee80211_mgmt { u8 action_code; u8 variable[0]; } __attribute__((packed)) mesh_action; + struct { + u8 action; + u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN]; + } __attribute__ ((packed)) sa_query; } u; } __attribute__ ((packed)) action; } u; @@ -655,6 +678,15 @@ struct ieee80211_mgmt { #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) +/* Management MIC information element (IEEE 802.11w) */ +struct ieee80211_mmie { + u8 element_id; + u8 length; + __le16 key_id; + u8 sequence_number[6]; + u8 mic[8]; +} __attribute__ ((packed)); + /* Control frames */ struct ieee80211_rts { __le16 frame_control; @@ -770,6 +802,31 @@ struct ieee80211_ht_cap { #define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03 #define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C +/* + * Maximum length of AMPDU that the STA can receive. + * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) + */ +enum ieee80211_max_ampdu_length_exp { + IEEE80211_HT_MAX_AMPDU_8K = 0, + IEEE80211_HT_MAX_AMPDU_16K = 1, + IEEE80211_HT_MAX_AMPDU_32K = 2, + IEEE80211_HT_MAX_AMPDU_64K = 3 +}; + +#define IEEE80211_HT_MAX_AMPDU_FACTOR 13 + +/* Minimum MPDU start spacing */ +enum ieee80211_min_mpdu_spacing { + IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */ + IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */ + IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */ + IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */ + IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */ + IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */ + IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */ + IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */ +}; + /** * struct ieee80211_ht_info - HT information * @@ -836,6 +893,7 @@ struct ieee80211_ht_info { /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 +#define WLAN_AUTH_FT 2 #define WLAN_AUTH_LEAP 128 #define WLAN_AUTH_CHALLENGE_LEN 128 @@ -899,6 +957,9 @@ enum ieee80211_statuscode { /* 802.11g */ WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25, WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26, + /* 802.11w */ + WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30, + WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31, /* 802.11i */ WLAN_STATUS_INVALID_IE = 40, WLAN_STATUS_INVALID_GROUP_CIPHER = 41, @@ -1018,6 +1079,8 @@ enum ieee80211_eid { WLAN_EID_HT_INFORMATION = 61, /* 802.11i */ WLAN_EID_RSN = 48, + WLAN_EID_TIMEOUT_INTERVAL = 56, + WLAN_EID_MMIE = 76 /* 802.11w */, WLAN_EID_WPA = 221, WLAN_EID_GENERIC = 221, WLAN_EID_VENDOR_SPECIFIC = 221, @@ -1030,7 +1093,13 @@ enum ieee80211_category { WLAN_CATEGORY_QOS = 1, WLAN_CATEGORY_DLS = 2, WLAN_CATEGORY_BACK = 3, + WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_HT = 7, + WLAN_CATEGORY_SA_QUERY = 8, + WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, WLAN_CATEGORY_WMM = 17, + WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, + WLAN_CATEGORY_VENDOR_SPECIFIC = 127, }; /* SPECTRUM_MGMT action code */ @@ -1042,6 +1111,15 @@ enum ieee80211_spectrum_mgmt_actioncode { WLAN_ACTION_SPCT_CHL_SWITCH = 4, }; +/* Security key length */ +enum ieee80211_key_len { + WLAN_KEY_LEN_WEP40 = 5, + WLAN_KEY_LEN_WEP104 = 13, + WLAN_KEY_LEN_CCMP = 16, + WLAN_KEY_LEN_TKIP = 32, + WLAN_KEY_LEN_AES_CMAC = 16, +}; + /* * IEEE 802.11-2007 7.3.2.9 Country information element * @@ -1104,6 +1182,12 @@ struct ieee80211_country_ie_triplet { }; } __attribute__ ((packed)); +enum ieee80211_timeout_interval_type { + WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */, + WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */, + WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */, +}; + /* BACK action code */ enum ieee80211_back_actioncode { WLAN_ACTION_ADDBA_REQ = 0, @@ -1118,6 +1202,13 @@ enum ieee80211_back_parties { WLAN_BACK_TIMER = 2, }; +/* SA Query action */ +enum ieee80211_sa_query_action { + WLAN_ACTION_SA_QUERY_REQUEST = 0, + WLAN_ACTION_SA_QUERY_RESPONSE = 1, +}; + + /* A-MSDU 802.11n */ #define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080 @@ -1128,6 +1219,11 @@ enum ieee80211_back_parties { /* reserved: 0x000FAC03 */ #define WLAN_CIPHER_SUITE_CCMP 0x000FAC04 #define WLAN_CIPHER_SUITE_WEP104 0x000FAC05 +#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06 + +/* AKM suite selectors */ +#define WLAN_AKM_SUITE_8021X 0x000FAC01 +#define WLAN_AKM_SUITE_PSK 0x000FAC02 #define WLAN_MAX_KEY_LEN 32 @@ -1185,4 +1281,190 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) return hdr->addr1; } +/** + * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame + * @hdr: the frame (buffer must include at least the first octet of payload) + */ +static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) +{ + if (ieee80211_is_disassoc(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control)) + return true; + + if (ieee80211_is_action(hdr->frame_control)) { + u8 *category; + + /* + * Action frames, excluding Public Action frames, are Robust + * Management Frames. However, if we are looking at a Protected + * frame, skip the check since the data may be encrypted and + * the frame has already been found to be a Robust Management + * Frame (by the other end). + */ + if (ieee80211_has_protected(hdr->frame_control)) + return true; + category = ((u8 *) hdr) + 24; + return *category != WLAN_CATEGORY_PUBLIC && + *category != WLAN_CATEGORY_HT && + *category != WLAN_CATEGORY_VENDOR_SPECIFIC; + } + + return false; +} + +/** + * ieee80211_fhss_chan_to_freq - get channel frequency + * @channel: the FHSS channel + * + * Convert IEEE802.11 FHSS channel to frequency (MHz) + * Ref IEEE 802.11-2007 section 14.6 + */ +static inline int ieee80211_fhss_chan_to_freq(int channel) +{ + if ((channel > 1) && (channel < 96)) + return channel + 2400; + else + return -1; +} + +/** + * ieee80211_freq_to_fhss_chan - get channel + * @freq: the channels frequency + * + * Convert frequency (MHz) to IEEE802.11 FHSS channel + * Ref IEEE 802.11-2007 section 14.6 + */ +static inline int ieee80211_freq_to_fhss_chan(int freq) +{ + if ((freq > 2401) && (freq < 2496)) + return freq - 2400; + else + return -1; +} + +/** + * ieee80211_dsss_chan_to_freq - get channel center frequency + * @channel: the DSSS channel + * + * Convert IEEE802.11 DSSS channel to the center frequency (MHz). + * Ref IEEE 802.11-2007 section 15.6 + */ +static inline int ieee80211_dsss_chan_to_freq(int channel) +{ + if ((channel > 0) && (channel < 14)) + return 2407 + (channel * 5); + else if (channel == 14) + return 2484; + else + return -1; +} + +/** + * ieee80211_freq_to_dsss_chan - get channel + * @freq: the frequency + * + * Convert frequency (MHz) to IEEE802.11 DSSS channel + * Ref IEEE 802.11-2007 section 15.6 + * + * This routine selects the channel with the closest center frequency. + */ +static inline int ieee80211_freq_to_dsss_chan(int freq) +{ + if ((freq >= 2410) && (freq < 2475)) + return (freq - 2405) / 5; + else if ((freq >= 2482) && (freq < 2487)) + return 14; + else + return -1; +} + +/* Convert IEEE802.11 HR DSSS channel to frequency (MHz) and back + * Ref IEEE 802.11-2007 section 18.4.6.2 + * + * The channels and frequencies are the same as those defined for DSSS + */ +#define ieee80211_hr_chan_to_freq(chan) ieee80211_dsss_chan_to_freq(chan) +#define ieee80211_freq_to_hr_chan(freq) ieee80211_freq_to_dsss_chan(freq) + +/* Convert IEEE802.11 ERP channel to frequency (MHz) and back + * Ref IEEE 802.11-2007 section 19.4.2 + */ +#define ieee80211_erp_chan_to_freq(chan) ieee80211_hr_chan_to_freq(chan) +#define ieee80211_freq_to_erp_chan(freq) ieee80211_freq_to_hr_chan(freq) + +/** + * ieee80211_ofdm_chan_to_freq - get channel center frequency + * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz + * @channel: the OFDM channel + * + * Convert IEEE802.11 OFDM channel to center frequency (MHz) + * Ref IEEE 802.11-2007 section 17.3.8.3.2 + */ +static inline int ieee80211_ofdm_chan_to_freq(int s_freq, int channel) +{ + if ((channel > 0) && (channel <= 200) && + (s_freq >= 4000)) + return s_freq + (channel * 5); + else + return -1; +} + +/** + * ieee80211_freq_to_ofdm_channel - get channel + * @s_freq: starting frequency == (dotChannelStartingFactor/2) MHz + * @freq: the frequency + * + * Convert frequency (MHz) to IEEE802.11 OFDM channel + * Ref IEEE 802.11-2007 section 17.3.8.3.2 + * + * This routine selects the channel with the closest center frequency. + */ +static inline int ieee80211_freq_to_ofdm_chan(int s_freq, int freq) +{ + if ((freq > (s_freq + 2)) && (freq <= (s_freq + 1202)) && + (s_freq >= 4000)) + return (freq + 2 - s_freq) / 5; + else + return -1; +} + +/** + * ieee80211_tu_to_usec - convert time units (TU) to microseconds + * @tu: the TUs + */ +static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) +{ + return 1024 * tu; +} + +/** + * ieee80211_check_tim - check if AID bit is set in TIM + * @tim: the TIM IE + * @tim_len: length of the TIM IE + * @aid: the AID to look for + */ +static inline bool ieee80211_check_tim(struct ieee80211_tim_ie *tim, + u8 tim_len, u16 aid) +{ + u8 mask; + u8 index, indexn1, indexn2; + + if (unlikely(!tim || tim_len < sizeof(*tim))) + return false; + + aid &= 0x3fff; + index = aid / 8; + mask = 1 << (aid & 7); + + indexn1 = tim->bitmap_ctrl & 0xfe; + indexn2 = tim_len + indexn1 - 4; + + if (index < indexn1 || index > indexn2) + return false; + + index -= indexn1; + + return !!(tim->virtual_map[index] & mask); +} + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if.h b/include/linux/if.h index 2a6e29620a9..b9a6229f3be 100644 --- a/include/linux/if.h +++ b/include/linux/if.h @@ -66,6 +66,10 @@ #define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ #define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ #define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */ +#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */ +#define IFF_XMIT_DST_RELEASE 0x400 /* dev_hard_start_xmit() is allowed to + * release skb->dst + */ #define IF_GET_IFACE 0x0001 /* for querying only */ #define IF_GET_PROTO 0x0002 diff --git a/include/linux/if_addr.h b/include/linux/if_addr.h index 43f3bedaafd..fd974046675 100644 --- a/include/linux/if_addr.h +++ b/include/linux/if_addr.h @@ -1,6 +1,7 @@ #ifndef __LINUX_IF_ADDR_H #define __LINUX_IF_ADDR_H +#include <linux/types.h> #include <linux/netlink.h> struct ifaddrmsg @@ -40,6 +41,7 @@ enum #define IFA_F_NODAD 0x02 #define IFA_F_OPTIMISTIC 0x04 +#define IFA_F_DADFAILED 0x08 #define IFA_F_HOMEADDRESS 0x10 #define IFA_F_DEPRECATED 0x20 #define IFA_F_TENTATIVE 0x40 diff --git a/include/linux/if_addrlabel.h b/include/linux/if_addrlabel.h index 9fe79c95dd2..89571f65d6d 100644 --- a/include/linux/if_addrlabel.h +++ b/include/linux/if_addrlabel.h @@ -10,6 +10,8 @@ #ifndef __LINUX_IF_ADDRLABEL_H #define __LINUX_IF_ADDRLABEL_H +#include <linux/types.h> + struct ifaddrlblmsg { __u8 ifal_family; /* Address family */ diff --git a/include/linux/if_arcnet.h b/include/linux/if_arcnet.h index 27ea2ac445a..0835debab11 100644 --- a/include/linux/if_arcnet.h +++ b/include/linux/if_arcnet.h @@ -16,6 +16,7 @@ #ifndef _LINUX_IF_ARCNET_H #define _LINUX_IF_ARCNET_H +#include <linux/types.h> #include <linux/if_ether.h> @@ -57,10 +58,10 @@ */ struct arc_rfc1201 { - uint8_t proto; /* protocol ID field - varies */ - uint8_t split_flag; /* for use with split packets */ + __u8 proto; /* protocol ID field - varies */ + __u8 split_flag; /* for use with split packets */ __be16 sequence; /* sequence number */ - uint8_t payload[0]; /* space remaining in packet (504 bytes)*/ + __u8 payload[0]; /* space remaining in packet (504 bytes)*/ }; #define RFC1201_HDR_SIZE 4 @@ -70,8 +71,8 @@ struct arc_rfc1201 */ struct arc_rfc1051 { - uint8_t proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ - uint8_t payload[0]; /* 507 bytes */ + __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ + __u8 payload[0]; /* 507 bytes */ }; #define RFC1051_HDR_SIZE 1 @@ -82,20 +83,20 @@ struct arc_rfc1051 */ struct arc_eth_encap { - uint8_t proto; /* Always ARC_P_ETHER */ + __u8 proto; /* Always ARC_P_ETHER */ struct ethhdr eth; /* standard ethernet header (yuck!) */ - uint8_t payload[0]; /* 493 bytes */ + __u8 payload[0]; /* 493 bytes */ }; #define ETH_ENCAP_HDR_SIZE 14 struct arc_cap { - uint8_t proto; - uint8_t cookie[sizeof(int)]; /* Actually NOT sent over the network */ + __u8 proto; + __u8 cookie[sizeof(int)]; /* Actually NOT sent over the network */ union { - uint8_t ack; - uint8_t raw[0]; /* 507 bytes */ + __u8 ack; + __u8 raw[0]; /* 507 bytes */ } mes; }; @@ -109,7 +110,7 @@ struct arc_cap */ struct arc_hardware { - uint8_t source, /* source ARCnet - filled in automagically */ + __u8 source, /* source ARCnet - filled in automagically */ dest, /* destination ARCnet - 0 for broadcast */ offset[2]; /* offset bytes (some weird semantics) */ }; @@ -130,7 +131,7 @@ struct archdr struct arc_rfc1051 rfc1051; struct arc_eth_encap eth_encap; struct arc_cap cap; - uint8_t raw[0]; /* 508 bytes */ + __u8 raw[0]; /* 508 bytes */ } soft; }; diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 5ff89809a58..282eb37e2de 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -86,6 +86,7 @@ #define ARPHRD_IEEE80211 801 /* IEEE 802.11 */ #define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */ #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ +#define ARPHRD_IEEE802154 804 #define ARPHRD_PHONET 820 /* PhoNet media type */ #define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 7f3c735f422..580b6004d00 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -17,7 +17,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ - + #ifndef _LINUX_IF_ETHER_H #define _LINUX_IF_ETHER_H @@ -25,7 +25,7 @@ /* * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble - * and FCS/CRC (frame check sequence). + * and FCS/CRC (frame check sequence). */ #define ETH_ALEN 6 /* Octets in one ethernet addr */ @@ -78,12 +78,15 @@ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ +#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ +#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ /* * Non DIX types. Won't clash for 1500 types. */ - + #define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */ #define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */ #define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */ @@ -105,11 +108,12 @@ #define ETH_P_DSA 0x001B /* Distributed Switch Arch. */ #define ETH_P_TRAILER 0x001C /* Trailer switch tagging */ #define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ +#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ /* * This is an Ethernet frame header. */ - + struct ethhdr { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_source[ETH_ALEN]; /* source ether addr */ @@ -135,10 +139,10 @@ extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); /* * Display a 6 byte device address (MAC) in a readable format. */ -extern char *print_mac(char *buf, const unsigned char *addr); +extern char *print_mac(char *buf, const unsigned char *addr) __deprecated; #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_BUF_SIZE 18 -#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] __maybe_unused +#define DECLARE_MAC_BUF(var) char var[MAC_BUF_SIZE] #endif diff --git a/include/linux/if_fc.h b/include/linux/if_fc.h index 376a34ea472..6ed7f1bf35c 100644 --- a/include/linux/if_fc.h +++ b/include/linux/if_fc.h @@ -20,6 +20,7 @@ #ifndef _LINUX_IF_FC_H #define _LINUX_IF_FC_H +#include <linux/types.h> #define FC_ALEN 6 /* Octets in one ethernet addr */ #define FC_HLEN (sizeof(struct fch_hdr)+sizeof(struct fcllc)) diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h index 5c34240de74..80b3a1056a5 100644 --- a/include/linux/if_frad.h +++ b/include/linux/if_frad.h @@ -26,8 +26,6 @@ #include <linux/if.h> -#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE) - /* Structures and constants associated with the DLCI device driver */ struct dlci_add @@ -71,11 +69,6 @@ struct dlci_conf { #define DLCI_VALID_FLAGS 0x000B -/* FRAD driver uses these to indicate what it did with packet */ -#define DLCI_RET_OK 0x00 -#define DLCI_RET_ERR 0x01 -#define DLCI_RET_DROP 0x02 - /* defines for the actual Frame Relay hardware */ #define FRAD_GET_CONF (SIOCDEVPRIVATE) #define FRAD_SET_CONF (SIOCDEVPRIVATE + 1) @@ -127,6 +120,8 @@ struct frad_conf #ifdef __KERNEL__ +#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE) + /* these are the fields of an RFC 1490 header */ struct frhdr { @@ -153,7 +148,6 @@ struct frhdr struct dlci_local { - struct net_device_stats stats; struct net_device *master; struct net_device *slave; struct dlci_conf config; @@ -190,12 +184,10 @@ struct frad_local int buffer; /* current buffer for S508 firmware */ }; -#endif /* __KERNEL__ */ - #endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */ -#ifdef __KERNEL__ extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *)); -#endif + +#endif /* __KERNEL__ */ #endif diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h index f0f23516bb5..4a7c9940b08 100644 --- a/include/linux/if_hippi.h +++ b/include/linux/if_hippi.h @@ -22,6 +22,7 @@ #ifndef _LINUX_IF_HIPPI_H #define _LINUX_IF_HIPPI_H +#include <linux/types.h> #include <asm/byteorder.h> /* diff --git a/include/linux/if_link.h b/include/linux/if_link.h index f9032c88716..176c5182c51 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -1,6 +1,7 @@ #ifndef _LINUX_IF_LINK_H #define _LINUX_IF_LINK_H +#include <linux/types.h> #include <linux/netlink.h> /* The struct should be in sync with struct net_device_stats */ diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h index 18db0668065..dea7d6b7cf9 100644 --- a/include/linux/if_packet.h +++ b/include/linux/if_packet.h @@ -46,6 +46,8 @@ struct sockaddr_ll #define PACKET_VERSION 10 #define PACKET_HDRLEN 11 #define PACKET_RESERVE 12 +#define PACKET_TX_RING 13 +#define PACKET_LOSS 14 struct tpacket_stats { @@ -63,14 +65,22 @@ struct tpacket_auxdata __u16 tp_vlan_tci; }; +/* Rx ring - header status */ +#define TP_STATUS_KERNEL 0x0 +#define TP_STATUS_USER 0x1 +#define TP_STATUS_COPY 0x2 +#define TP_STATUS_LOSING 0x4 +#define TP_STATUS_CSUMNOTREADY 0x8 + +/* Tx ring - header status */ +#define TP_STATUS_AVAILABLE 0x0 +#define TP_STATUS_SEND_REQUEST 0x1 +#define TP_STATUS_SENDING 0x2 +#define TP_STATUS_WRONG_FORMAT 0x4 + struct tpacket_hdr { unsigned long tp_status; -#define TP_STATUS_KERNEL 0 -#define TP_STATUS_USER 1 -#define TP_STATUS_COPY 2 -#define TP_STATUS_LOSING 4 -#define TP_STATUS_CSUMNOTREADY 8 unsigned int tp_len; unsigned int tp_snaplen; unsigned short tp_mac; @@ -135,5 +145,6 @@ struct packet_mreq #define PACKET_MR_MULTICAST 0 #define PACKET_MR_PROMISC 1 #define PACKET_MR_ALLMULTI 2 +#define PACKET_MR_UNICAST 3 #endif diff --git a/include/linux/if_ppp.h b/include/linux/if_ppp.h index c3b1f856270..fcef103aa3f 100644 --- a/include/linux/if_ppp.h +++ b/include/linux/if_ppp.h @@ -33,6 +33,7 @@ #ifndef _IF_PPP_H_ #define _IF_PPP_H_ +#include <linux/types.h> #include <linux/compiler.h> /* diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h index c7a66882b6d..3a14b088c8e 100644 --- a/include/linux/if_pppol2tp.h +++ b/include/linux/if_pppol2tp.h @@ -26,7 +26,7 @@ */ struct pppol2tp_addr { - pid_t pid; /* pid that owns the fd. + __kernel_pid_t pid; /* pid that owns the fd. * 0 => current */ int fd; /* FD of UDP socket to use */ diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 30c88b2245f..90b5fae5d71 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -95,16 +95,16 @@ struct pppoe_tag { } __attribute ((packed)); /* Tag identifiers */ -#define PTT_EOL __constant_htons(0x0000) -#define PTT_SRV_NAME __constant_htons(0x0101) -#define PTT_AC_NAME __constant_htons(0x0102) -#define PTT_HOST_UNIQ __constant_htons(0x0103) -#define PTT_AC_COOKIE __constant_htons(0x0104) -#define PTT_VENDOR __constant_htons(0x0105) -#define PTT_RELAY_SID __constant_htons(0x0110) -#define PTT_SRV_ERR __constant_htons(0x0201) -#define PTT_SYS_ERR __constant_htons(0x0202) -#define PTT_GEN_ERR __constant_htons(0x0203) +#define PTT_EOL __cpu_to_be16(0x0000) +#define PTT_SRV_NAME __cpu_to_be16(0x0101) +#define PTT_AC_NAME __cpu_to_be16(0x0102) +#define PTT_HOST_UNIQ __cpu_to_be16(0x0103) +#define PTT_AC_COOKIE __cpu_to_be16(0x0104) +#define PTT_VENDOR __cpu_to_be16(0x0105) +#define PTT_RELAY_SID __cpu_to_be16(0x0110) +#define PTT_SRV_ERR __cpu_to_be16(0x0201) +#define PTT_SYS_ERR __cpu_to_be16(0x0202) +#define PTT_GEN_ERR __cpu_to_be16(0x0203) struct pppoe_hdr { #if defined(__LITTLE_ENDIAN_BITFIELD) diff --git a/include/linux/if_strip.h b/include/linux/if_strip.h index fb5c5c98442..6526a623583 100644 --- a/include/linux/if_strip.h +++ b/include/linux/if_strip.h @@ -18,6 +18,8 @@ #ifndef __LINUX_STRIP_H #define __LINUX_STRIP_H +#include <linux/types.h> + typedef struct { __u8 c[6]; } MetricomAddress; diff --git a/include/linux/if_tr.h b/include/linux/if_tr.h index 5bcec8b2c5e..fc23aeb0f20 100644 --- a/include/linux/if_tr.h +++ b/include/linux/if_tr.h @@ -19,6 +19,7 @@ #ifndef _LINUX_IF_TR_H #define _LINUX_IF_TR_H +#include <linux/types.h> #include <asm/byteorder.h> /* For __be16 */ /* IEEE 802.5 Token-Ring magic constants. The frame sizes omit the preamble diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 8529f57ba26..3f5fd523b49 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -46,6 +46,8 @@ #define TUNSETOFFLOAD _IOW('T', 208, unsigned int) #define TUNSETTXFILTER _IOW('T', 209, unsigned int) #define TUNGETIFF _IOR('T', 210, unsigned int) +#define TUNGETSNDBUF _IOR('T', 211, int) +#define TUNSETSNDBUF _IOW('T', 212, int) /* TUNSETIFF ifr flags */ #define IFF_TUN 0x0001 @@ -53,12 +55,14 @@ #define IFF_NO_PI 0x1000 #define IFF_ONE_QUEUE 0x2000 #define IFF_VNET_HDR 0x4000 +#define IFF_TUN_EXCL 0x8000 /* Features for GSO (TUNSETOFFLOAD). */ #define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */ #define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */ #define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */ #define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */ +#define TUN_F_UFO 0x10 /* I can handle UFO packets */ /* Protocol info prepended to the packets (when IFF_NO_PI is not set) */ #define TUN_PKT_STRIP 0x0001 diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index aeab2cb32a9..5eb9b0f857e 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -2,7 +2,10 @@ #define _IF_TUNNEL_H_ #include <linux/types.h> + +#ifdef __KERNEL__ #include <linux/ip.h> +#endif #define SIOCGETTUNNEL (SIOCDEVPRIVATE + 0) #define SIOCADDTUNNEL (SIOCDEVPRIVATE + 1) @@ -13,14 +16,14 @@ #define SIOCDELPRL (SIOCDEVPRIVATE + 6) #define SIOCCHGPRL (SIOCDEVPRIVATE + 7) -#define GRE_CSUM __constant_htons(0x8000) -#define GRE_ROUTING __constant_htons(0x4000) -#define GRE_KEY __constant_htons(0x2000) -#define GRE_SEQ __constant_htons(0x1000) -#define GRE_STRICT __constant_htons(0x0800) -#define GRE_REC __constant_htons(0x0700) -#define GRE_FLAGS __constant_htons(0x00F8) -#define GRE_VERSION __constant_htons(0x0007) +#define GRE_CSUM __cpu_to_be16(0x8000) +#define GRE_ROUTING __cpu_to_be16(0x4000) +#define GRE_KEY __cpu_to_be16(0x2000) +#define GRE_SEQ __cpu_to_be16(0x1000) +#define GRE_STRICT __cpu_to_be16(0x0800) +#define GRE_REC __cpu_to_be16(0x0700) +#define GRE_FLAGS __cpu_to_be16(0x00F8) +#define GRE_VERSION __cpu_to_be16(0x0007) struct ip_tunnel_parm { @@ -41,7 +44,7 @@ struct ip_tunnel_prl { __u16 flags; __u16 __reserved; __u32 datalen; - __u32 __reserved2; + __u32 rs_delay; /* data follows */ }; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index f8ff918c208..7ff9af1d0f0 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -118,8 +118,7 @@ extern int vlan_hwaccel_do_receive(struct sk_buff *skb); extern int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, unsigned int vlan_tci, struct sk_buff *skb); extern int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, - unsigned int vlan_tci, - struct napi_gro_fraginfo *info); + unsigned int vlan_tci); #else static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) @@ -154,8 +153,7 @@ static inline int vlan_gro_receive(struct napi_struct *napi, } static inline int vlan_gro_frags(struct napi_struct *napi, - struct vlan_group *grp, unsigned int vlan_tci, - struct napi_gro_fraginfo *info) + struct vlan_group *grp, unsigned int vlan_tci) { return NET_RX_DROP; } @@ -210,6 +208,7 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) /* Move the mac addresses to the beginning of the new header. */ memmove(skb->data, skb->data + VLAN_HLEN, 2 * VLAN_ETH_ALEN); + skb->mac_header -= VLAN_HLEN; /* first, the ethernet type */ veth->h_vlan_proto = htons(ETH_P_8021Q); diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f734a0ba069..fe158e0e20e 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -16,6 +16,7 @@ #ifndef _LINUX_IGMP_H #define _LINUX_IGMP_H +#include <linux/types.h> #include <asm/byteorder.h> /* @@ -232,6 +233,8 @@ extern void ip_mc_init_dev(struct in_device *); extern void ip_mc_destroy_dev(struct in_device *); extern void ip_mc_up(struct in_device *); extern void ip_mc_down(struct in_device *); +extern void ip_mc_unmap(struct in_device *); +extern void ip_mc_remap(struct in_device *); extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); extern void ip_mc_rejoin_group(struct ip_mc_list *im); diff --git a/include/linux/ima.h b/include/linux/ima.h new file mode 100644 index 00000000000..0e3f2a4c25f --- /dev/null +++ b/include/linux/ima.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2008 IBM Corporation + * Author: Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef _LINUX_IMA_H +#define _LINUX_IMA_H + +#include <linux/fs.h> +struct linux_binprm; + +#define IMA_COUNT_UPDATE 1 +#define IMA_COUNT_LEAVE 0 + +#ifdef CONFIG_IMA +extern int ima_bprm_check(struct linux_binprm *bprm); +extern int ima_inode_alloc(struct inode *inode); +extern void ima_inode_free(struct inode *inode); +extern int ima_path_check(struct path *path, int mask, int update_counts); +extern void ima_file_free(struct file *file); +extern int ima_file_mmap(struct file *file, unsigned long prot); +extern void ima_counts_get(struct file *file); +extern void ima_counts_put(struct path *path, int mask); + +#else +static inline int ima_bprm_check(struct linux_binprm *bprm) +{ + return 0; +} + +static inline int ima_inode_alloc(struct inode *inode) +{ + return 0; +} + +static inline void ima_inode_free(struct inode *inode) +{ + return; +} + +static inline int ima_path_check(struct path *path, int mask, int update_counts) +{ + return 0; +} + +static inline void ima_file_free(struct file *file) +{ + return; +} + +static inline int ima_file_mmap(struct file *file, unsigned long prot) +{ + return 0; +} + +static inline void ima_counts_get(struct file *file) +{ + return; +} + +static inline void ima_counts_put(struct path *path, int mask) +{ + return; +} +#endif /* CONFIG_IMA_H */ +#endif /* _LINUX_IMA_H */ diff --git a/include/linux/in.h b/include/linux/in.h index d60122a3a08..cf196da04ec 100644 --- a/include/linux/in.h +++ b/include/linux/in.h @@ -107,6 +107,7 @@ struct in_addr { #define MCAST_JOIN_SOURCE_GROUP 46 #define MCAST_LEAVE_SOURCE_GROUP 47 #define MCAST_MSFILTER 48 +#define IP_MULTICAST_ALL 49 #define MCAST_EXCLUDE 0 #define MCAST_INCLUDE 1 diff --git a/include/linux/in6.h b/include/linux/in6.h index bc492048c34..718bf21c575 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h @@ -44,11 +44,11 @@ struct in6_addr * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined * in network byte order, not in host byte order as are the IPv4 equivalents */ +#ifdef __KERNEL__ extern const struct in6_addr in6addr_any; #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } extern const struct in6_addr in6addr_loopback; #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } -#ifdef __KERNEL__ extern const struct in6_addr in6addr_linklocal_allnodes; #define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index 6e8bc548635..bc8c4902208 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -1,6 +1,8 @@ #ifndef _INET_DIAG_H_ #define _INET_DIAG_H_ 1 +#include <linux/types.h> + /* Just some random number */ #define TCPDIAG_GETSOCK 18 #define DCCPDIAG_GETSOCK 19 diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 06fcdb45106..ad27c7da879 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) -#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER) +#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ ACCEPT_SOURCE_ROUTE) #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) @@ -108,6 +108,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) +#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) struct in_ifaddr { diff --git a/include/linux/init.h b/include/linux/init.h index 68cb0265d00..400adbb4541 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -29,7 +29,7 @@ * sign followed by value, e.g.: * * static int init_variable __initdata = 0; - * static char linux_logo[] __initdata = { 0x32, 0x36, ... }; + * static const char linux_logo[] __initconst = { 0x32, 0x36, ... }; * * Don't forget to initialize data not at file scope, i.e. within a function, * as gcc otherwise puts the data into the bss section and not into the init @@ -60,14 +60,6 @@ #define __refdata __section(.ref.data) #define __refconst __section(.ref.rodata) -/* backward compatibility note - * A few places hardcode the old section names: - * .text.init.refok - * .data.init.refok - * .exit.text.refok - * They should be converted to use the defines from this file - */ - /* compatibility defines */ #define __init_refok __ref #define __initdata_refok __refdata @@ -111,8 +103,8 @@ #define __INIT .section ".init.text","ax" #define __FINIT .previous -#define __INITDATA .section ".init.data","aw" -#define __INITRODATA .section ".init.rodata","a" +#define __INITDATA .section ".init.data","aw",%progbits +#define __INITRODATA .section ".init.rodata","a",%progbits #define __FINITDATA .previous #define __DEVINIT .section ".devinit.text", "ax" @@ -142,6 +134,9 @@ typedef void (*exitcall_t)(void); extern initcall_t __con_initcall_start[], __con_initcall_end[]; extern initcall_t __security_initcall_start[], __security_initcall_end[]; +/* Used for contructor calls. */ +typedef void (*ctor_fn_t)(void); + /* Defined in init/main.c */ extern int do_one_initcall(initcall_t fn); extern char __initdata boot_command_line[]; @@ -231,7 +226,8 @@ struct obs_kernel_param { * obs_kernel_param "array" too far apart in .init.setup. */ #define __setup_param(str, unique_id, fn, early) \ - static char __setup_str_##unique_id[] __initdata __aligned(1) = str; \ + static const char __setup_str_##unique_id[] __initconst \ + __aligned(1) = str; \ static struct obs_kernel_param __setup_##unique_id \ __used __section(.init.setup) \ __attribute__((aligned((sizeof(long))))) \ @@ -247,6 +243,7 @@ struct obs_kernel_param { /* Relies on boot_command_line being set */ void __init parse_early_param(void); +void __init parse_early_options(char *cmdline); #endif /* __ASSEMBLY__ */ /** @@ -308,9 +305,17 @@ void __init parse_early_param(void); #ifdef CONFIG_MODULES #define __init_or_module #define __initdata_or_module +#define __initconst_or_module +#define __INIT_OR_MODULE .text +#define __INITDATA_OR_MODULE .data +#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits #else #define __init_or_module __init #define __initdata_or_module __initdata +#define __initconst_or_module __initconst +#define __INIT_OR_MODULE __INIT +#define __INITDATA_OR_MODULE __INITDATA +#define __INITRODATA_OR_MODULE __INITRODATA #endif /*CONFIG_MODULES*/ /* Functions marked as __devexit may be discarded at kernel link time, depending diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2f3c2d4ef73..9e7f2e8fc66 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -5,6 +5,7 @@ #include <linux/irqflags.h> #include <linux/utsname.h> #include <linux/lockdep.h> +#include <linux/ftrace.h> #include <linux/ipc.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> @@ -14,31 +15,6 @@ extern struct files_struct init_files; extern struct fs_struct init_fs; -#define INIT_KIOCTX(name, which_mm) \ -{ \ - .users = ATOMIC_INIT(1), \ - .dead = 0, \ - .mm = &which_mm, \ - .user_id = 0, \ - .next = NULL, \ - .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ - .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ - .reqs_active = 0U, \ - .max_reqs = ~0U, \ -} - -#define INIT_MM(name) \ -{ \ - .mm_rb = RB_ROOT, \ - .pgd = swapper_pg_dir, \ - .mm_users = ATOMIC_INIT(2), \ - .mm_count = ATOMIC_INIT(1), \ - .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \ - .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \ - .mmlist = LIST_HEAD_INIT(name.mmlist), \ - .cpu_vm_mask = CPU_MASK_ALL, \ -} - #define INIT_SIGNALS(sig) { \ .count = ATOMIC_INIT(1), \ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ @@ -48,6 +24,11 @@ extern struct fs_struct init_fs; .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ .rlim = INIT_RLIMITS, \ + .cputimer = { \ + .cputime = INIT_CPUTIME, \ + .running = 0, \ + .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ + }, \ } extern struct nsproxy init_nsproxy; @@ -113,8 +94,27 @@ extern struct group_info init_groups; # define CAP_INIT_BSET CAP_INIT_EFF_SET #endif +#ifdef CONFIG_TREE_PREEMPT_RCU +#define INIT_TASK_RCU_PREEMPT(tsk) \ + .rcu_read_lock_nesting = 0, \ + .rcu_read_unlock_special = 0, \ + .rcu_blocked_node = NULL, \ + .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), +#else +#define INIT_TASK_RCU_PREEMPT(tsk) +#endif + extern struct cred init_cred; +#ifdef CONFIG_PERF_COUNTERS +# define INIT_PERF_COUNTERS(tsk) \ + .perf_counter_mutex = \ + __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ + .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), +#else +# define INIT_PERF_COUNTERS(tsk) +#endif + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -142,6 +142,7 @@ extern struct cred init_cred; .nr_cpus_allowed = NR_CPUS, \ }, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ + .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ .real_parent = &tsk, \ @@ -151,8 +152,8 @@ extern struct cred init_cred; .group_leader = &tsk, \ .real_cred = &init_cred, \ .cred = &init_cred, \ - .cred_exec_mutex = \ - __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ + .cred_guard_mutex = \ + __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ .comm = "swapper", \ .thread = INIT_THREAD, \ .fs = &init_fs, \ @@ -177,8 +178,12 @@ extern struct cred init_cred; }, \ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ INIT_IDS \ + INIT_PERF_COUNTERS(tsk) \ INIT_TRACE_IRQFLAGS \ INIT_LOCKDEP \ + INIT_FTRACE_GRAPH \ + INIT_TRACE_RECURSION \ + INIT_TASK_RCU_PREEMPT(tsk) \ } @@ -189,5 +194,8 @@ extern struct cred init_cred; LIST_HEAD_INIT(cpu_timers[2]), \ } +/* Attach to the init_task data structure for proper alignment */ +#define __init_task_data __attribute__((__section__(".data.init_task"))) + #endif diff --git a/include/linux/input.h b/include/linux/input.h index 1249a0c20a3..8b3bc3e0d14 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -53,6 +53,7 @@ struct input_absinfo { __s32 maximum; __s32 fuzz; __s32 flat; + __s32 resolution; }; #define EVIOCGVERSION _IOR('E', 0x01, int) /* get driver version */ @@ -106,6 +107,7 @@ struct input_absinfo { #define SYN_REPORT 0 #define SYN_CONFIG 1 +#define SYN_MT_REPORT 2 /* * Keys and buttons @@ -445,6 +447,7 @@ struct input_absinfo { #define BTN_STYLUS2 0x14c #define BTN_TOOL_DOUBLETAP 0x14d #define BTN_TOOL_TRIPLETAP 0x14e +#define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */ #define BTN_WHEEL 0x150 #define BTN_GEAR_DOWN 0x150 @@ -644,6 +647,18 @@ struct input_absinfo { #define ABS_TOOL_WIDTH 0x1c #define ABS_VOLUME 0x20 #define ABS_MISC 0x28 + +#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ +#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ +#define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */ +#define ABS_MT_WIDTH_MINOR 0x33 /* Minor axis (omit if circular) */ +#define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */ +#define ABS_MT_POSITION_X 0x35 /* Center X ellipse position */ +#define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */ +#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */ +#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */ +#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */ + #define ABS_MAX 0x3f #define ABS_CNT (ABS_MAX+1) @@ -661,6 +676,7 @@ struct input_absinfo { #define SW_DOCK 0x05 /* set = plugged into dock */ #define SW_LINEOUT_INSERT 0x06 /* set = inserted */ #define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */ +#define SW_VIDEOOUT_INSERT 0x08 /* set = inserted */ #define SW_MAX 0x0f #define SW_CNT (SW_MAX+1) @@ -742,6 +758,12 @@ struct input_absinfo { #define BUS_ATARI 0x1B /* + * MT_TOOL types + */ +#define MT_TOOL_FINGER 0 +#define MT_TOOL_PEN 1 + +/* * Values describing the status of a force-feedback effect */ #define FF_STATUS_STOPPED 0x00 @@ -1088,6 +1110,7 @@ struct input_dev { int absmin[ABS_MAX + 1]; int absfuzz[ABS_MAX + 1]; int absflat[ABS_MAX + 1]; + int absres[ABS_MAX + 1]; int (*open)(struct input_dev *dev); void (*close)(struct input_dev *dev); @@ -1310,6 +1333,11 @@ static inline void input_sync(struct input_dev *dev) input_event(dev, EV_SYN, SYN_REPORT, 0); } +static inline void input_mt_sync(struct input_dev *dev) +{ + input_event(dev, EV_SYN, SYN_MT_REPORT, 0); +} + void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat) diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h new file mode 100644 index 00000000000..15d5903af2d --- /dev/null +++ b/include/linux/input/matrix_keypad.h @@ -0,0 +1,66 @@ +#ifndef _MATRIX_KEYPAD_H +#define _MATRIX_KEYPAD_H + +#include <linux/types.h> +#include <linux/input.h> + +#define MATRIX_MAX_ROWS 16 +#define MATRIX_MAX_COLS 16 + +#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ + (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ + (val & 0xffff)) + +#define KEY_ROW(k) (((k) >> 24) & 0xff) +#define KEY_COL(k) (((k) >> 16) & 0xff) +#define KEY_VAL(k) ((k) & 0xffff) + +#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col)) + +/** + * struct matrix_keymap_data - keymap for matrix keyboards + * @keymap: pointer to array of uint32 values encoded with KEY() macro + * representing keymap + * @keymap_size: number of entries (initialized) in this keymap + * + * This structure is supposed to be used by platform code to supply + * keymaps to drivers that implement matrix-like keypads/keyboards. + */ +struct matrix_keymap_data { + const uint32_t *keymap; + unsigned int keymap_size; +}; + +/** + * struct matrix_keypad_platform_data - platform-dependent keypad data + * @keymap_data: pointer to &matrix_keymap_data + * @row_gpios: pointer to array of gpio numbers representing rows + * @col_gpios: pointer to array of gpio numbers reporesenting colums + * @num_row_gpios: actual number of row gpios used by device + * @num_col_gpios: actual number of col gpios used by device + * @col_scan_delay_us: delay, measured in microseconds, that is + * needed before we can keypad after activating column gpio + * @debounce_ms: debounce interval in milliseconds + * + * This structure represents platform-specific data that use used by + * matrix_keypad driver to perform proper initialization. + */ +struct matrix_keypad_platform_data { + const struct matrix_keymap_data *keymap_data; + + const unsigned int *row_gpios; + const unsigned int *col_gpios; + + unsigned int num_row_gpios; + unsigned int num_col_gpios; + + unsigned int col_scan_delay_us; + + /* key debounce interval in milli-second */ + unsigned int debounce_ms; + + bool active_low; + bool wakeup; +}; + +#endif /* _MATRIX_KEYPAD_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index c4f6c101dbc..482dc91fd53 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -53,6 +53,7 @@ #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ +#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ @@ -120,10 +121,12 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) #define ecap_coherent(e) ((e) & 0x1) #define ecap_qis(e) ((e) & 0x2) +#define ecap_pass_through(e) ((e >> 6) & 0x1) #define ecap_eim_support(e) ((e >> 4) & 0x1) #define ecap_ir_support(e) ((e >> 3) & 0x1) +#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) - +#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ /* IOTLB_REG */ #define DMA_TLB_FLUSH_GRANU_OFFSET 60 @@ -164,6 +167,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define DMA_GCMD_QIE (((u32)1) << 26) #define DMA_GCMD_SIRTP (((u32)1) << 24) #define DMA_GCMD_IRE (((u32) 1) << 25) +#define DMA_GCMD_CFI (((u32) 1) << 23) /* GSTS_REG */ #define DMA_GSTS_TES (((u32)1) << 31) @@ -174,6 +178,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) #define DMA_GSTS_QIES (((u32)1) << 26) #define DMA_GSTS_IRTPS (((u32)1) << 24) #define DMA_GSTS_IRES (((u32)1) << 25) +#define DMA_GSTS_CFIS (((u32)1) << 23) /* CCMD_REG */ #define DMA_CCMD_ICC (((u64)1) << 63) @@ -194,6 +199,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) /* FSTS_REG */ #define DMA_FSTS_PPF ((u32)2) #define DMA_FSTS_PFO ((u32)1) +#define DMA_FSTS_IQE (1 << 4) +#define DMA_FSTS_ICE (1 << 5) +#define DMA_FSTS_ITE (1 << 6) #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) /* FRCD_REG, 32 bits access */ @@ -222,7 +230,8 @@ do { \ enum { QI_FREE, QI_IN_USE, - QI_DONE + QI_DONE, + QI_ABORT }; #define QI_CC_TYPE 0x1 @@ -251,6 +260,12 @@ enum { #define QI_CC_DID(did) (((u64)did) << 16) #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) +#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) +#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) +#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +#define QI_DEV_IOTLB_SIZE 1 +#define QI_DEV_IOTLB_MAX_INVS 32 + struct qi_desc { u64 low, high; }; @@ -277,10 +292,18 @@ struct ir_table { #endif struct iommu_flush { - int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, - u64 type, int non_present_entry_flush); - int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, - unsigned int size_order, u64 type, int non_present_entry_flush); + void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type); + void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +}; + +enum { + SR_DMAR_FECTL_REG, + SR_DMAR_FEDATA_REG, + SR_DMAR_FEADDR_REG, + SR_DMAR_FEUADDR_REG, + MAX_SR_DMAR_REGS }; struct intel_iommu { @@ -291,6 +314,9 @@ struct intel_iommu { spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ + int msagaw; /* max sagaw of this iommu */ + unsigned int irq; + unsigned char name[13]; /* Device Name */ #ifdef CONFIG_DMAR unsigned long *domain_ids; /* bitmap of domains */ @@ -298,11 +324,11 @@ struct intel_iommu { spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ - unsigned int irq; - unsigned char name[7]; /* Device Name */ struct iommu_flush flush; #endif struct q_inval *qi; /* Queued invalidation info */ + u32 *iommu_state; /* Store iommu states between suspend and resume.*/ + #ifdef CONFIG_INTR_REMAP struct ir_table *ir_table; /* Interrupt remapping info */ #endif @@ -316,25 +342,22 @@ static inline void __iommu_flush_cache( } extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); +extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); extern int alloc_iommu(struct dmar_drhd_unit *drhd); extern void free_iommu(struct intel_iommu *iommu); extern int dmar_enable_qi(struct intel_iommu *iommu); +extern void dmar_disable_qi(struct intel_iommu *iommu); +extern int dmar_reenable_qi(struct intel_iommu *iommu); extern void qi_global_iec(struct intel_iommu *iommu); -extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, - u8 fm, u64 type, int non_present_entry_flush); -extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, - unsigned int size_order, u64 type, - int non_present_entry_flush); - -extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); +extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type); +extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, + u64 addr, unsigned mask); -extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); -extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); -extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); -extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); -extern int intel_map_sg(struct device *, struct scatterlist *, int, int); -extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); +extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9127f6b51a3..1ac57e522a1 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -14,6 +14,7 @@ #include <linux/irqflags.h> #include <linux/smp.h> #include <linux/percpu.h> +#include <linux/hrtimer.h> #include <asm/atomic.h> #include <asm/ptrace.h> @@ -49,6 +50,9 @@ * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is * registered first in an shared interrupt is considered for * performance reasons) + * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. + * Used by threaded interrupts which need to keep the + * irq line disabled until the threaded handler has been run. */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 @@ -58,9 +62,38 @@ #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 +#define IRQF_ONESHOT 0x00002000 + +/* + * Bits used by threaded handlers: + * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run + * IRQTF_DIED - handler thread died + * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed + * IRQTF_AFFINITY - irq thread is requested to adjust affinity + */ +enum { + IRQTF_RUNTHREAD, + IRQTF_DIED, + IRQTF_WARNED, + IRQTF_AFFINITY, +}; typedef irqreturn_t (*irq_handler_t)(int, void *); +/** + * struct irqaction - per interrupt action descriptor + * @handler: interrupt handler function + * @flags: flags (see IRQF_* above) + * @mask: no comment as it is useless and about to be removed + * @name: name of the device + * @dev_id: cookie to identify the device + * @next: pointer to the next irqaction for shared interrupts + * @irq: interrupt number + * @dir: pointer to the proc/irq/NN/name entry + * @thread_fn: interupt handler function for threaded interrupts + * @thread: thread pointer for threaded interrupts + * @thread_flags: flags related to @thread + */ struct irqaction { irq_handler_t handler; unsigned long flags; @@ -70,18 +103,68 @@ struct irqaction { struct irqaction *next; int irq; struct proc_dir_entry *dir; + irq_handler_t thread_fn; + struct task_struct *thread; + unsigned long thread_flags; }; extern irqreturn_t no_action(int cpl, void *dev_id); -extern int __must_check request_irq(unsigned int, irq_handler_t handler, - unsigned long, const char *, void *); + +#ifdef CONFIG_GENERIC_HARDIRQS +extern int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev); + +static inline int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev) +{ + return request_threaded_irq(irq, handler, NULL, flags, name, dev); +} + +extern void exit_irq_thread(void); +#else + +extern int __must_check +request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev); + +/* + * Special function to avoid ifdeffery in kernel/irq/devres.c which + * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, + * m68k). I really love these $@%#!* obvious Makefile references: + * ../../../kernel/irq/devres.o + */ +static inline int __must_check +request_threaded_irq(unsigned int irq, irq_handler_t handler, + irq_handler_t thread_fn, + unsigned long flags, const char *name, void *dev) +{ + return request_irq(irq, handler, flags, name, dev); +} + +static inline void exit_irq_thread(void) { } +#endif + extern void free_irq(unsigned int, void *); struct device; -extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, - irq_handler_t handler, unsigned long irqflags, - const char *devname, void *dev_id); +extern int __must_check +devm_request_threaded_irq(struct device *dev, unsigned int irq, + irq_handler_t handler, irq_handler_t thread_fn, + unsigned long irqflags, const char *devname, + void *dev_id); + +static inline int __must_check +devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, + unsigned long irqflags, const char *devname, void *dev_id) +{ + return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, + devname, dev_id); +} + extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); /* @@ -106,6 +189,21 @@ extern void disable_irq_nosync(unsigned int irq); extern void disable_irq(unsigned int irq); extern void enable_irq(unsigned int irq); +/* The following three functions are for the core kernel use only. */ +#ifdef CONFIG_GENERIC_HARDIRQS +extern void suspend_device_irqs(void); +extern void resume_device_irqs(void); +#ifdef CONFIG_PM_SLEEP +extern int check_wakeup_irqs(void); +#else +static inline int check_wakeup_irqs(void) { return 0; } +#endif +#else +static inline void suspend_device_irqs(void) { }; +static inline void resume_device_irqs(void) { }; +static inline int check_wakeup_irqs(void) { return 0; } +#endif + #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) extern cpumask_var_t irq_default_affinity; @@ -258,6 +356,11 @@ enum NR_SOFTIRQS }; +/* map softirq index to softirq name. update 'softirq_to_name' in + * kernel/softirq.c when adding a new softirq. + */ +extern char *softirq_to_name[NR_SOFTIRQS]; + /* softirq mask and active fields moved to irq_cpustat_t in * asm/hardirq.h to get better cache usage. KAO */ @@ -274,6 +377,7 @@ extern void softirq_init(void); #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void wakeup_softirqd(void); /* This is the worklist that queues up per-cpu softirq work. * @@ -375,6 +479,20 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t) __tasklet_hi_schedule(t); } +extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); + +/* + * This version avoids touching any other tasklets. Needed for kmemcheck + * in order not to take any page faults while enqueueing this tasklet; + * consider VERY carefully whether you really need this or + * tasklet_hi_schedule()... + */ +static inline void tasklet_hi_schedule_first(struct tasklet_struct *t) +{ + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) + __tasklet_hi_schedule_first(t); +} + static inline void tasklet_disable_nosync(struct tasklet_struct *t) { @@ -406,6 +524,31 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); +struct tasklet_hrtimer { + struct hrtimer timer; + struct tasklet_struct tasklet; + enum hrtimer_restart (*function)(struct hrtimer *); +}; + +extern void +tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t which_clock, enum hrtimer_mode mode); + +static inline +int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, + const enum hrtimer_mode mode) +{ + return hrtimer_start(&ttimer->timer, time, mode); +} + +static inline +void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) +{ + hrtimer_cancel(&ttimer->timer); + tasklet_kill(&ttimer->tasklet); +} + /* * Autoprobing for irqs: * @@ -462,12 +605,19 @@ static inline void init_irq_proc(void) } #endif +#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ) +extern void debug_poll_all_shared_irqs(void); +#else +static inline void debug_poll_all_shared_irqs(void) { } +#endif + int show_interrupts(struct seq_file *p, void *v); struct irq_desc; extern int early_irq_init(void); +extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); -extern int arch_init_chip_data(struct irq_desc *desc, int cpu); +extern int arch_init_chip_data(struct irq_desc *desc, int node); #endif diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 82df31726a5..0adb0f91568 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -30,11 +30,14 @@ * See Documentation/io_mapping.txt */ -/* this struct isn't actually defined anywhere */ -struct io_mapping; - #ifdef CONFIG_HAVE_ATOMIC_IOMAP +struct io_mapping { + resource_size_t base; + unsigned long size; + pgprot_t prot; +}; + /* * For small address space machines, mapping large objects * into the kernel virtual space isn't practical. Where @@ -43,23 +46,40 @@ struct io_mapping; */ static inline struct io_mapping * -io_mapping_create_wc(unsigned long base, unsigned long size) +io_mapping_create_wc(resource_size_t base, unsigned long size) { - return (struct io_mapping *) base; + struct io_mapping *iomap; + + if (!is_io_mapping_possible(base, size)) + return NULL; + + iomap = kmalloc(sizeof(*iomap), GFP_KERNEL); + if (!iomap) + return NULL; + + iomap->base = base; + iomap->size = size; + iomap->prot = pgprot_writecombine(__pgprot(__PAGE_KERNEL)); + return iomap; } static inline void io_mapping_free(struct io_mapping *mapping) { + kfree(mapping); } /* Atomic map/unmap */ static inline void * io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset) { - offset += (unsigned long) mapping; - return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0, - __pgprot(__PAGE_KERNEL_WC)); + resource_size_t phys_addr; + unsigned long pfn; + + BUG_ON(offset >= mapping->size); + phys_addr = mapping->base + offset; + pfn = (unsigned long) (phys_addr >> PAGE_SHIFT); + return iomap_atomic_prot_pfn(pfn, KM_USER0, mapping->prot); } static inline void @@ -71,8 +91,12 @@ io_mapping_unmap_atomic(void *vaddr) static inline void * io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) { - offset += (unsigned long) mapping; - return ioremap_wc(offset, PAGE_SIZE); + resource_size_t phys_addr; + + BUG_ON(offset >= mapping->size); + phys_addr = mapping->base + offset; + + return ioremap_wc(phys_addr, PAGE_SIZE); } static inline void @@ -83,9 +107,12 @@ io_mapping_unmap(void *vaddr) #else +/* this struct isn't actually defined anywhere */ +struct io_mapping; + /* Create the io_mapping object*/ static inline struct io_mapping * -io_mapping_create_wc(unsigned long base, unsigned long size) +io_mapping_create_wc(resource_size_t base, unsigned long size) { return (struct io_mapping *) ioremap_wc(base, size); } diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 08b987bccf8..4da4a75c3f1 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -64,7 +64,7 @@ struct cfq_io_context { * and kmalloc'ed. These could be shared between processes. */ struct io_context { - atomic_t refcount; + atomic_long_t refcount; atomic_t nr_tasks; /* all the fields below are protected by this lock */ @@ -91,7 +91,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) * if ref count is zero, don't allow sharing (ioc is going away, it's * a race). */ - if (ioc && atomic_inc_not_zero(&ioc->refcount)) { + if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { atomic_inc(&ioc->nr_tasks); return ioc; } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 8a7bfb1b6ca..3af4ffd591b 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -21,6 +21,7 @@ #define IOMMU_READ (1) #define IOMMU_WRITE (2) +#define IOMMU_CACHE (4) /* DMA cache coherency */ struct device; @@ -28,6 +29,8 @@ struct iommu_domain { void *priv; }; +#define IOMMU_CAP_CACHE_COHERENCY 0x1 + struct iommu_ops { int (*domain_init)(struct iommu_domain *domain); void (*domain_destroy)(struct iommu_domain *domain); @@ -39,6 +42,8 @@ struct iommu_ops { size_t size); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, unsigned long iova); + int (*domain_has_cap)(struct iommu_domain *domain, + unsigned long cap); }; #ifdef CONFIG_IOMMU_API @@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, size_t size); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, unsigned long iova); +extern int iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap); #else /* CONFIG_IOMMU_API */ @@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, return 0; } +static inline int domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + return 0; +} + #endif /* CONFIG_IOMMU_API */ #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 32e4b2f7229..786e7b8cece 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -49,6 +49,8 @@ struct resource_list { #define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ #define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ +#define IORESOURCE_MEM_64 0x00100000 + #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ #define IORESOURCE_DISABLED 0x10000000 #define IORESOURCE_UNSET 0x20000000 diff --git a/include/linux/ip6_tunnel.h b/include/linux/ip6_tunnel.h index 1e7cc4af40d..acb9ad684d6 100644 --- a/include/linux/ip6_tunnel.h +++ b/include/linux/ip6_tunnel.h @@ -1,6 +1,8 @@ #ifndef _IP6_TUNNEL_H #define _IP6_TUNNEL_H +#include <linux/types.h> + #define IPV6_TLV_TNL_ENCAP_LIMIT 4 #define IPV6_DEFAULT_TNL_ENCAP_LIMIT 4 diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h index 0f434a28fb5..148265e63e8 100644 --- a/include/linux/ip_vs.h +++ b/include/linux/ip_vs.h @@ -96,10 +96,10 @@ */ struct ip_vs_service_user { /* virtual service addresses */ - u_int16_t protocol; + __u16 protocol; __be32 addr; /* virtual ip address */ __be16 port; - u_int32_t fwmark; /* firwall mark of service */ + __u32 fwmark; /* firwall mark of service */ /* virtual service options */ char sched_name[IP_VS_SCHEDNAME_MAXLEN]; @@ -119,8 +119,8 @@ struct ip_vs_dest_user { int weight; /* destination weight */ /* thresholds for active connections */ - u_int32_t u_threshold; /* upper threshold */ - u_int32_t l_threshold; /* lower threshold */ + __u32 u_threshold; /* upper threshold */ + __u32 l_threshold; /* lower threshold */ }; @@ -159,10 +159,10 @@ struct ip_vs_getinfo { /* The argument to IP_VS_SO_GET_SERVICE */ struct ip_vs_service_entry { /* which service: user fills in these */ - u_int16_t protocol; + __u16 protocol; __be32 addr; /* virtual address */ __be16 port; - u_int32_t fwmark; /* firwall mark of service */ + __u32 fwmark; /* firwall mark of service */ /* service options */ char sched_name[IP_VS_SCHEDNAME_MAXLEN]; @@ -184,12 +184,12 @@ struct ip_vs_dest_entry { unsigned conn_flags; /* connection flags */ int weight; /* destination weight */ - u_int32_t u_threshold; /* upper threshold */ - u_int32_t l_threshold; /* lower threshold */ + __u32 u_threshold; /* upper threshold */ + __u32 l_threshold; /* lower threshold */ - u_int32_t activeconns; /* active connections */ - u_int32_t inactconns; /* inactive connections */ - u_int32_t persistconns; /* persistent connections */ + __u32 activeconns; /* active connections */ + __u32 inactconns; /* inactive connections */ + __u32 persistconns; /* persistent connections */ /* statistics */ struct ip_vs_stats_user stats; @@ -199,10 +199,10 @@ struct ip_vs_dest_entry { /* The argument to IP_VS_SO_GET_DESTS */ struct ip_vs_get_dests { /* which service: user fills in these */ - u_int16_t protocol; + __u16 protocol; __be32 addr; /* virtual address */ __be16 port; - u_int32_t fwmark; /* firwall mark of service */ + __u32 fwmark; /* firwall mark of service */ /* number of real servers */ unsigned int num_dests; diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index ea330f9e710..e408722a84c 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -25,7 +25,7 @@ struct ipc_ids { }; struct ipc_namespace { - struct kref kref; + atomic_t count; struct ipc_ids ids[3]; int sem_ctls[4]; @@ -44,42 +44,66 @@ struct ipc_namespace { int shm_tot; struct notifier_block ipcns_nb; + + /* The kern_mount of the mqueuefs sb. We take a ref on it */ + struct vfsmount *mq_mnt; + + /* # queues in this ns, protected by mq_lock */ + unsigned int mq_queues_count; + + /* next fields are set through sysctl */ + unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */ + unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */ + unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */ + }; extern struct ipc_namespace init_ipc_ns; extern atomic_t nr_ipc_ns; -#ifdef CONFIG_SYSVIPC +extern spinlock_t mq_lock; +#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, +#else +#define INIT_IPC_NS(ns) +#endif +#ifdef CONFIG_SYSVIPC extern int register_ipcns_notifier(struct ipc_namespace *); extern int cond_register_ipcns_notifier(struct ipc_namespace *); extern void unregister_ipcns_notifier(struct ipc_namespace *); extern int ipcns_notify(unsigned long); - #else /* CONFIG_SYSVIPC */ -#define INIT_IPC_NS(ns) +static inline int register_ipcns_notifier(struct ipc_namespace *ns) +{ return 0; } +static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) +{ return 0; } +static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } +static inline int ipcns_notify(unsigned long l) { return 0; } #endif /* CONFIG_SYSVIPC */ -#if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) -extern void free_ipc_ns(struct kref *kref); +#ifdef CONFIG_POSIX_MQUEUE +extern int mq_init_ns(struct ipc_namespace *ns); +/* default values */ +#define DFLT_QUEUESMAX 256 /* max number of message queues */ +#define DFLT_MSGMAX 10 /* max number of messages in each queue */ +#define HARD_MSGMAX (131072/sizeof(void *)) +#define DFLT_MSGSIZEMAX 8192 /* max message size */ +#else +static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } +#endif + +#if defined(CONFIG_IPC_NS) extern struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns); -extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, - void (*free)(struct ipc_namespace *, - struct kern_ipc_perm *)); - static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { if (ns) - kref_get(&ns->kref); + atomic_inc(&ns->count); return ns; } -static inline void put_ipc_ns(struct ipc_namespace *ns) -{ - kref_put(&ns->kref, free_ipc_ns); -} +extern void put_ipc_ns(struct ipc_namespace *ns); #else static inline struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) @@ -99,4 +123,18 @@ static inline void put_ipc_ns(struct ipc_namespace *ns) { } #endif + +#ifdef CONFIG_POSIX_MQUEUE_SYSCTL + +struct ctl_table_header; +extern struct ctl_table_header *mq_register_sysctl_table(void); + +#else /* CONFIG_POSIX_MQUEUE_SYSCTL */ + +static inline struct ctl_table_header *mq_register_sysctl_table(void) +{ + return NULL; +} + +#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */ #endif diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 7ebdb4fb4e5..65aae34759d 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h @@ -198,6 +198,8 @@ struct kernel_ipmi_msg { response. When you send a response message, this will be returned. */ +#define IPMI_OEM_RECV_TYPE 5 /* The response for OEM Channels */ + /* Note that async events and received commands do not have a completion code as the first byte of the incoming data, unlike a response. */ diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h index b56a158d587..df97e6e31e8 100644 --- a/include/linux/ipmi_msgdefs.h +++ b/include/linux/ipmi_msgdefs.h @@ -58,6 +58,12 @@ #define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35 #define IPMI_GET_CHANNEL_INFO_CMD 0x42 +/* Bit for BMC global enables. */ +#define IPMI_BMC_RCV_MSG_INTR 0x01 +#define IPMI_BMC_EVT_MSG_INTR 0x02 +#define IPMI_BMC_EVT_MSG_BUFF 0x04 +#define IPMI_BMC_SYS_LOG 0x08 + #define IPMI_NETFN_STORAGE_REQUEST 0x0a #define IPMI_NETFN_STORAGE_RESPONSE 0x0b #define IPMI_ADD_SEL_ENTRY_CMD 0x44 @@ -109,5 +115,7 @@ #define IPMI_CHANNEL_MEDIUM_USB1 10 #define IPMI_CHANNEL_MEDIUM_USB2 11 #define IPMI_CHANNEL_MEDIUM_SYSINTF 12 +#define IPMI_CHANNEL_MEDIUM_OEM_MIN 0x60 +#define IPMI_CHANNEL_MEDIUM_OEM_MAX 0x7f #endif /* __LINUX_IPMI_MSGDEFS_H */ diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 62b73668b60..f7c9c75a277 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h @@ -230,6 +230,6 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) automatically be dstroyed when the interface is destroyed. */ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, read_proc_t *read_proc, - void *data, struct module *owner); + void *data); #endif /* __LINUX_IPMI_SMI_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0b816cae533..c662efa6828 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -1,6 +1,7 @@ #ifndef _IPV6_H #define _IPV6_H +#include <linux/types.h> #include <linux/in6.h> #include <asm/byteorder.h> @@ -168,6 +169,12 @@ struct ipv6_devconf { __s32 accept_dad; void *sysctl; }; + +struct ipv6_params { + __s32 disable_ipv6; + __s32 autoconf; +}; +extern struct ipv6_params ipv6_defaults; #endif /* index values for the variables in ipv6_devconf */ diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h index b323ff57796..1e7d8af2def 100644 --- a/include/linux/ipv6_route.h +++ b/include/linux/ipv6_route.h @@ -13,6 +13,8 @@ #ifndef _LINUX_IPV6_ROUTE_H #define _LINUX_IPV6_ROUTE_H +#include <linux/types.h> + #define RTF_DEFAULT 0x00010000 /* default - learned via ND */ #define RTF_ALLONLINK 0x00020000 /* (deprecated and will be removed) fallback, no routers on link */ diff --git a/include/linux/ipx.h b/include/linux/ipx.h index eb19b4ea84f..aabb1d29402 100644 --- a/include/linux/ipx.h +++ b/include/linux/ipx.h @@ -1,5 +1,6 @@ #ifndef _IPX_H_ #define _IPX_H_ +#include <linux/types.h> #include <linux/sockios.h> #include <linux/socket.h> #define IPX_NODE_LEN 6 diff --git a/include/linux/irda.h b/include/linux/irda.h index 28f88ecba34..00bdad0e851 100644 --- a/include/linux/irda.h +++ b/include/linux/irda.h @@ -25,6 +25,8 @@ #ifndef KERNEL_IRDA_H #define KERNEL_IRDA_H +#include <linux/types.h> + /* Please do *not* add any #include in this file, this file is * included as-is in user space. * Please fix the calling file to properly included needed files before diff --git a/include/linux/irq.h b/include/linux/irq.h index f899b502f18..ae9653dbcd7 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -17,9 +17,12 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/cpumask.h> +#include <linux/gfp.h> #include <linux/irqreturn.h> #include <linux/irqnr.h> #include <linux/errno.h> +#include <linux/topology.h> +#include <linux/wait.h> #include <asm/irq.h> #include <asm/ptrace.h> @@ -65,6 +68,9 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ +#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ +#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ +#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ #ifdef CONFIG_IRQ_PER_CPU # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) @@ -96,6 +102,9 @@ struct msi_desc; * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ * @set_wake: enable/disable power-management wake-on of an IRQ * + * @bus_lock: function to lock access to slow bus (i2c) chips + * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips + * * @release: release function solely used by UML * @typename: obsoleted by name, kept as migration helper */ @@ -113,12 +122,15 @@ struct irq_chip { void (*eoi)(unsigned int irq); void (*end)(unsigned int irq); - void (*set_affinity)(unsigned int irq, + int (*set_affinity)(unsigned int irq, const struct cpumask *dest); int (*retrigger)(unsigned int irq); int (*set_type)(unsigned int irq, unsigned int flow_type); int (*set_wake)(unsigned int irq, unsigned int on); + void (*bus_lock)(unsigned int irq); + void (*bus_sync_unlock)(unsigned int irq); + /* Currently used only by UML, might disappear one day.*/ #ifdef CONFIG_IRQ_RELEASE_METHOD void (*release)(unsigned int irq, void *dev_id); @@ -153,19 +165,19 @@ struct irq_2_iommu; * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP * @affinity: IRQ affinity on SMP - * @cpu: cpu index useful for balancing + * @node: node index useful for balancing * @pending_mask: pending rebalanced interrupts + * @threads_active: number of irqaction threads currently running + * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers * @dir: /proc/irq/ procfs entry * @name: flow handler name for /proc/interrupts output */ struct irq_desc { unsigned int irq; -#ifdef CONFIG_SPARSE_IRQ struct timer_rand_state *timer_rand_state; unsigned int *kstat_irqs; -# ifdef CONFIG_INTR_REMAP +#ifdef CONFIG_INTR_REMAP struct irq_2_iommu *irq_2_iommu; -# endif #endif irq_flow_handler_t handle_irq; struct irq_chip *chip; @@ -182,12 +194,14 @@ struct irq_desc { unsigned int irqs_unhandled; spinlock_t lock; #ifdef CONFIG_SMP - cpumask_t affinity; - unsigned int cpu; -#endif + cpumask_var_t affinity; + unsigned int node; #ifdef CONFIG_GENERIC_PENDING_IRQ - cpumask_t pending_mask; + cpumask_var_t pending_mask; +#endif #endif + atomic_t threads_active; + wait_queue_head_t wait_for_threads; #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif @@ -195,40 +209,23 @@ struct irq_desc { } ____cacheline_internodealigned_in_smp; extern void arch_init_copy_chip_data(struct irq_desc *old_desc, - struct irq_desc *desc, int cpu); + struct irq_desc *desc, int node); extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); #ifndef CONFIG_SPARSE_IRQ extern struct irq_desc irq_desc[NR_IRQS]; -#else /* CONFIG_SPARSE_IRQ */ -extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); - -#define kstat_irqs_this_cpu(DESC) \ - ((DESC)->kstat_irqs[smp_processor_id()]) -#define kstat_incr_irqs_this_cpu(irqno, DESC) \ - ((DESC)->kstat_irqs[smp_processor_id()]++) - -#endif /* CONFIG_SPARSE_IRQ */ - -extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); +#endif -static inline struct irq_desc * -irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) -{ -#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC - return irq_to_desc(irq); +#ifdef CONFIG_NUMA_IRQ_DESC +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); #else +static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) +{ return desc; -#endif } +#endif -/* - * Migration helpers for obsolete names, they will go away: - */ -#define hw_interrupt_type irq_chip -typedef struct irq_chip hw_irq_controller; -#define no_irq_type no_irq_chip -typedef struct irq_desc irq_desc_t; +extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); /* * Pick up the arch-dependent methods: @@ -236,6 +233,7 @@ typedef struct irq_desc irq_desc_t; #include <asm/hw_irq.h> extern int setup_irq(unsigned int irq, struct irqaction *new); +extern void remove_irq(unsigned int irq, struct irqaction *act); #ifdef CONFIG_GENERIC_HARDIRQS @@ -280,7 +278,7 @@ static inline int irq_balancing_disabled(unsigned int irq) } /* Handle irq action chains: */ -extern int handle_IRQ_event(unsigned int irq, struct irqaction *action); +extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); /* * Built-in IRQ handlers for various IRQ types, @@ -292,6 +290,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); +extern void handle_nested_irq(unsigned int irq); /* * Monolithic do_IRQ implementation. @@ -325,7 +324,7 @@ static inline void generic_handle_irq(unsigned int irq) /* Handling of unhandled and spurious interrupts: */ extern void note_interrupt(unsigned int irq, struct irq_desc *desc, - int action_ret); + irqreturn_t action_ret); /* Resending of interrupts :*/ void check_irq_resend(struct irq_desc *desc, unsigned int irq); @@ -382,11 +381,13 @@ set_irq_chained_handler(unsigned int irq, __set_irq_handler(irq, handle, 1, NULL); } +extern void set_irq_nested_thread(unsigned int irq, int nest); + extern void set_irq_noprobe(unsigned int irq); extern void set_irq_probe(unsigned int irq); /* Handle dynamic irq creation and destruction */ -extern unsigned int create_irq_nr(unsigned int irq_want); +extern unsigned int create_irq_nr(unsigned int irq_want, int node); extern int create_irq(void); extern void destroy_irq(unsigned int irq); @@ -422,4 +423,99 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); #endif /* !CONFIG_S390 */ +#ifdef CONFIG_SMP +/** + * alloc_desc_masks - allocate cpumasks for irq_desc + * @desc: pointer to irq_desc struct + * @node: node which will be handling the cpumasks + * @boot: true if need bootmem + * + * Allocates affinity and pending_mask cpumask if required. + * Returns true if successful (or not required). + */ +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + gfp_t gfp = GFP_ATOMIC; + + if (boot) + gfp = GFP_NOWAIT; + +#ifdef CONFIG_CPUMASK_OFFSTACK + if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) + return false; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { + free_cpumask_var(desc->affinity); + return false; + } +#endif +#endif + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ + cpumask_setall(desc->affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_clear(desc->pending_mask); +#endif +} + +/** + * init_copy_desc_masks - copy cpumasks for irq_desc + * @old_desc: pointer to old irq_desc struct + * @new_desc: pointer to new irq_desc struct + * + * Insures affinity and pending_masks are copied to new irq_desc. + * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the + * irq_desc struct so the copy is redundant. + */ + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +#ifdef CONFIG_CPUMASK_OFFSTACK + cpumask_copy(new_desc->affinity, old_desc->affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); +#endif +#endif +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ + free_cpumask_var(old_desc->affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(old_desc->pending_mask); +#endif +} + +#else /* !CONFIG_SMP */ + +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ +} + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} +#endif /* CONFIG_SMP */ + #endif /* _LINUX_IRQ_H */ diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 74bde13224c..b02a3f1d46a 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -24,8 +24,8 @@ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) -# define trace_softirq_enter() do { current->softirq_context++; } while (0) -# define trace_softirq_exit() do { current->softirq_context--; } while (0) +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else # define trace_hardirqs_on() do { } while (0) @@ -38,8 +38,8 @@ # define trace_softirqs_enabled(p) 0 # define trace_hardirq_enter() do { } while (0) # define trace_hardirq_exit() do { } while (0) -# define trace_softirq_enter() do { } while (0) -# define trace_softirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) # define INIT_TRACE_IRQFLAGS #endif diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 86af92e9e84..7bf89bc8cbc 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -20,6 +20,7 @@ # define for_each_irq_desc_reverse(irq, desc) \ for (irq = nr_irqs - 1; irq >= 0; irq--) + #else /* CONFIG_GENERIC_HARDIRQS */ extern int nr_irqs; @@ -28,13 +29,23 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); # define for_each_irq_desc(irq, desc) \ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ irq++, desc = irq_to_desc(irq)) \ - if (desc) + if (!desc) \ + ; \ + else # define for_each_irq_desc_reverse(irq, desc) \ for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ irq--, desc = irq_to_desc(irq)) \ - if (desc) + if (!desc) \ + ; \ + else + +#ifdef CONFIG_SMP +#define irq_node(irq) (irq_to_desc(irq)->node) +#else +#define irq_node(irq) 0 +#endif #endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index 881883c2009..819acaaac3f 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h @@ -1,25 +1,19 @@ -/* irqreturn.h */ #ifndef _LINUX_IRQRETURN_H #define _LINUX_IRQRETURN_H -/* - * For 2.4.x compatibility, 2.4.x can use - * - * typedef void irqreturn_t; - * #define IRQ_NONE - * #define IRQ_HANDLED - * #define IRQ_RETVAL(x) - * - * To mix old-style and new-style irq handler returns. - * - * IRQ_NONE means we didn't handle it. - * IRQ_HANDLED means that we did have a valid interrupt and handled it. - * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled) +/** + * enum irqreturn + * @IRQ_NONE interrupt was not from this device + * @IRQ_HANDLED interrupt was handled by this device + * @IRQ_WAKE_THREAD handler requests to wake the handler thread */ -typedef int irqreturn_t; +enum irqreturn { + IRQ_NONE, + IRQ_HANDLED, + IRQ_WAKE_THREAD, +}; -#define IRQ_NONE (0) -#define IRQ_HANDLED (1) -#define IRQ_RETVAL(x) ((x) != 0) +typedef enum irqreturn irqreturn_t; +#define IRQ_RETVAL(x) ((x) != IRQ_NONE) #endif diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h index 35e9b0fd014..7acb87a4487 100644 --- a/include/linux/isdn/capilli.h +++ b/include/linux/isdn/capilli.h @@ -79,7 +79,7 @@ int attach_capi_ctr(struct capi_ctr *); int detach_capi_ctr(struct capi_ctr *); void capi_ctr_ready(struct capi_ctr * card); -void capi_ctr_reseted(struct capi_ctr * card); +void capi_ctr_down(struct capi_ctr * card); void capi_ctr_suspend_output(struct capi_ctr * card); void capi_ctr_resume_output(struct capi_ctr * card); void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb); diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h new file mode 100644 index 00000000000..4b3ecc40889 --- /dev/null +++ b/include/linux/isdn/hdlc.h @@ -0,0 +1,82 @@ +/* + * hdlc.h -- General purpose ISDN HDLC decoder. + * + * Implementation of a HDLC decoder/encoder in software. + * Neccessary because some ISDN devices don't have HDLC + * controllers. + * + * Copyright (C) + * 2009 Karsten Keil <keil@b1-systems.de> + * 2002 Wolfgang Mües <wolfgang@iksw-muees.de> + * 2001 Frode Isaksen <fisaksen@bewan.com> + * 2001 Kai Germaschewski <kai.germaschewski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ISDNHDLC_H__ +#define __ISDNHDLC_H__ + +struct isdnhdlc_vars { + int bit_shift; + int hdlc_bits1; + int data_bits; + int ffbit_shift; /* encoding only */ + int state; + int dstpos; + + u16 crc; + + u8 cbin; + u8 shift_reg; + u8 ffvalue; + + /* set if transferring data */ + u32 data_received:1; + /* set if D channel (send idle instead of flags) */ + u32 dchannel:1; + /* set if 56K adaptation */ + u32 do_adapt56:1; + /* set if in closing phase (need to send CRC + flag) */ + u32 do_closing:1; + /* set if data is bitreverse */ + u32 do_bitreverse:1; +}; + +/* Feature Flags */ +#define HDLC_56KBIT 0x01 +#define HDLC_DCHANNEL 0x02 +#define HDLC_BITREVERSE 0x04 + +/* + The return value from isdnhdlc_decode is + the frame length, 0 if no complete frame was decoded, + or a negative error number +*/ +#define HDLC_FRAMING_ERROR 1 +#define HDLC_CRC_ERROR 2 +#define HDLC_LENGTH_ERROR 3 + +extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src, + int slen, int *count, u8 *dst, int dsize); + +extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features); + +extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src, + u16 slen, int *count, u8 *dst, int dsize); + +#endif /* __ISDNHDLC_H__ */ diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h index f2720280b9e..062d20f7432 100644 --- a/include/linux/ivtv.h +++ b/include/linux/ivtv.h @@ -60,10 +60,10 @@ struct ivtv_dma_frame { #define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame) -/* These are the VBI types as they appear in the embedded VBI private packets. */ -#define IVTV_SLICED_TYPE_TELETEXT_B (1) -#define IVTV_SLICED_TYPE_CAPTION_525 (4) -#define IVTV_SLICED_TYPE_WSS_625 (5) -#define IVTV_SLICED_TYPE_VPS (7) +/* Deprecated defines: applications should use the defines from videodev2.h */ +#define IVTV_SLICED_TYPE_TELETEXT_B V4L2_MPEG_VBI_IVTV_TELETEXT_B +#define IVTV_SLICED_TYPE_CAPTION_525 V4L2_MPEG_VBI_IVTV_CAPTION_525 +#define IVTV_SLICED_TYPE_WSS_625 V4L2_MPEG_VBI_IVTV_WSS_625 +#define IVTV_SLICED_TYPE_VPS V4L2_MPEG_VBI_IVTV_VPS #endif /* _LINUX_IVTV_H */ diff --git a/include/linux/ivtvfb.h b/include/linux/ivtvfb.h index e20af47b59a..9d88b29ddf5 100644 --- a/include/linux/ivtvfb.h +++ b/include/linux/ivtvfb.h @@ -33,6 +33,6 @@ struct ivtvfb_dma_frame { }; #define IVTVFB_IOC_DMA_FRAME _IOW('V', BASE_VIDIOC_PRIVATE+0, struct ivtvfb_dma_frame) -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t) +#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) #endif diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 64246dce566..c2049a04fa0 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -35,7 +35,7 @@ #define journal_oom_retry 1 /* - * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds + * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds * certain classes of error which can occur due to failed IOs. Under * normal use we want ext3 to continue after such errors, because * hardware _can_ fail, but for debugging purposes when running tests on @@ -552,6 +552,11 @@ struct transaction_s */ int t_handle_count; + /* + * This transaction is being forced and some process is + * waiting for it to finish. + */ + int t_synchronous_commit:1; }; /** @@ -973,7 +978,8 @@ extern void journal_destroy_revoke(journal_t *); extern int journal_revoke (handle_t *, unsigned long, struct buffer_head *); extern int journal_cancel_revoke(handle_t *, struct journal_head *); -extern void journal_write_revoke_records(journal_t *, transaction_t *); +extern void journal_write_revoke_records(journal_t *, + transaction_t *, int); /* Recovery revoke support */ extern int journal_set_revoke(journal_t *, unsigned long, tid_t); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b45109c61fb..d97eb652d6c 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -308,7 +308,8 @@ void buffer_assertion_failure(struct buffer_head *bh); int val = (expr); \ if (!val) { \ printk(KERN_ERR \ - "EXT3-fs unexpected failure: %s;\n",# expr); \ + "JBD2 unexpected failure: %s: %s;\n", \ + __func__, #expr); \ printk(KERN_ERR why "\n"); \ } \ val; \ @@ -648,6 +649,12 @@ struct transaction_s int t_handle_count; /* + * This transaction is being forced and some process is + * waiting for it to finish. + */ + int t_synchronous_commit:1; + + /* * For use by the filesystem to store fs-specific data * structures associated with the transaction */ @@ -1149,7 +1156,8 @@ extern int jbd2_journal_clear_err (journal_t *); extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *); extern int jbd2_journal_force_commit(journal_t *); extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode); -extern int jbd2_journal_begin_ordered_truncate(struct jbd2_inode *inode, loff_t new_size); +extern int jbd2_journal_begin_ordered_truncate(journal_t *journal, + struct jbd2_inode *inode, loff_t new_size); extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode); extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode); @@ -1185,7 +1193,8 @@ extern int jbd2_journal_init_revoke_caches(void); extern void jbd2_journal_destroy_revoke(journal_t *); extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); -extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *); +extern void jbd2_journal_write_revoke_records(journal_t *, + transaction_t *, int); /* Recovery revoke support */ extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); @@ -1306,6 +1315,12 @@ extern int jbd_blocks_per_page(struct inode *inode); #define BUFFER_TRACE2(bh, bh2, info) do {} while (0) #define JBUFFER_TRACE(jh, info) do {} while (0) +/* + * jbd2_dev_to_name is a utility function used by the jbd2 and ext4 + * tracing infrastructure to map a dev_t to a device name. + */ +extern const char *jbd2_dev_to_name(dev_t device); + #endif /* __KERNEL__ */ #endif /* _LINUX_JBD2_H */ diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h index da720bc3eb1..2b32d638147 100644 --- a/include/linux/jffs2.h +++ b/include/linux/jffs2.h @@ -12,6 +12,7 @@ #ifndef __LINUX_JFFS2_H__ #define __LINUX_JFFS2_H__ +#include <linux/types.h> #include <linux/magic.h> /* You must include something which defines the C99 uintXX_t types. @@ -91,15 +92,15 @@ byteswapping */ typedef struct { - uint32_t v32; + __u32 v32; } __attribute__((packed)) jint32_t; typedef struct { - uint32_t m; + __u32 m; } __attribute__((packed)) jmode_t; typedef struct { - uint16_t v16; + __u16 v16; } __attribute__((packed)) jint16_t; struct jffs2_unknown_node @@ -121,12 +122,12 @@ struct jffs2_raw_dirent jint32_t version; jint32_t ino; /* == zero for unlink */ jint32_t mctime; - uint8_t nsize; - uint8_t type; - uint8_t unused[2]; + __u8 nsize; + __u8 type; + __u8 unused[2]; jint32_t node_crc; jint32_t name_crc; - uint8_t name[0]; + __u8 name[0]; }; /* The JFFS2 raw inode structure: Used for storage on physical media. */ @@ -153,12 +154,12 @@ struct jffs2_raw_inode jint32_t offset; /* Where to begin to write. */ jint32_t csize; /* (Compressed) data size */ jint32_t dsize; /* Size of the node's data. (after decompression) */ - uint8_t compr; /* Compression algorithm used */ - uint8_t usercompr; /* Compression algorithm requested by the user */ + __u8 compr; /* Compression algorithm used */ + __u8 usercompr; /* Compression algorithm requested by the user */ jint16_t flags; /* See JFFS2_INO_FLAG_* */ jint32_t data_crc; /* CRC for the (compressed) data. */ jint32_t node_crc; /* CRC for the raw inode (excluding data) */ - uint8_t data[0]; + __u8 data[0]; }; struct jffs2_raw_xattr { @@ -168,12 +169,12 @@ struct jffs2_raw_xattr { jint32_t hdr_crc; jint32_t xid; /* XATTR identifier number */ jint32_t version; - uint8_t xprefix; - uint8_t name_len; + __u8 xprefix; + __u8 name_len; jint16_t value_len; jint32_t data_crc; jint32_t node_crc; - uint8_t data[0]; + __u8 data[0]; } __attribute__((packed)); struct jffs2_raw_xref diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index f3fe34391d8..792274269f2 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -13,10 +13,17 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +struct module; + #ifdef CONFIG_KALLSYMS /* Lookup the address for a symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name); +/* Call a function on each kallsyms symbol in the core kernel */ +int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, + unsigned long), + void *data); + extern int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset); @@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name) return 0; } +static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + static inline int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 343df9ef241..2b5b1e0899a 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -16,7 +16,7 @@ #include <linux/log2.h> #include <linux/typecheck.h> #include <linux/ratelimit.h> -#include <linux/dynamic_printk.h> +#include <linux/dynamic_debug.h> #include <asm/byteorder.h> #include <asm/bug.h> @@ -58,7 +58,7 @@ extern const char linux_proc_banner[]; #define _RET_IP_ (unsigned long)__builtin_return_address(0) #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) -#ifdef CONFIG_LBD +#ifdef CONFIG_LBDAF # include <asm/div64.h> # define sector_div(a, b) do_div(a, b) #else @@ -97,12 +97,14 @@ extern const char linux_proc_banner[]; #define KERN_INFO "<6>" /* informational */ #define KERN_DEBUG "<7>" /* debug-level messages */ +/* Use the default kernel loglevel */ +#define KERN_DEFAULT "<d>" /* * Annotation for a "continued" line of log printout (only done after a * line that had no enclosing \n). Only to be used by core/arch code * during early bootup (a continued line is not SMP-safe otherwise). */ -#define KERN_CONT "" +#define KERN_CONT "<c>" extern int console_printk[]; @@ -123,7 +125,7 @@ extern int _cond_resched(void); #endif #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP - void __might_sleep(char *file, int line); + void __might_sleep(char *file, int line, int preempt_offset); /** * might_sleep - annotation for functions that can sleep * @@ -135,8 +137,9 @@ extern int _cond_resched(void); * supposed to. */ # define might_sleep() \ - do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) + do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) #else + static inline void __might_sleep(char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) #endif @@ -242,6 +245,20 @@ extern struct ratelimit_state printk_ratelimit_state; extern int printk_ratelimit(void); extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msec); + +/* + * Print a one-time message (analogous to WARN_ONCE() et al): + */ +#define printk_once(x...) ({ \ + static int __print_once = 1; \ + \ + if (__print_once) { \ + __print_once = 0; \ + printk(x); \ + } \ +}) + +void log_buf_kexec_setup(void); #else static inline int vprintk(const char *s, va_list args) __attribute__ ((format (printf, 1, 0))); @@ -253,6 +270,13 @@ static inline int printk_ratelimit(void) { return 0; } static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ unsigned int interval_msec) \ { return false; } + +/* No effect, but we still get type checking even in the !PRINTK case: */ +#define printk_once(x...) printk(x) + +static inline void log_buf_kexec_setup(void) +{ +} #endif extern int printk_needs_cpu(int cpu); @@ -280,6 +304,7 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in extern int panic_timeout; extern int panic_on_oops; extern int panic_on_unrecovered_nmi; +extern int panic_on_io_nmi; extern const char *print_tainted(void); extern void add_taint(unsigned flag); extern int test_taint(unsigned flag); @@ -353,14 +378,26 @@ static inline char *pack_hex_byte(char *buf, u8 byte) printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info(fmt, ...) \ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +#define pr_cont(fmt, ...) \ + printk(KERN_CONT fmt, ##__VA_ARGS__) + +/* pr_devel() should produce zero code unless DEBUG is defined */ +#ifdef DEBUG +#define pr_devel(fmt, ...) \ + printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_devel(fmt, ...) \ + ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) +#endif /* If you are writing a driver, please use dev_dbg instead */ #if defined(DEBUG) #define pr_debug(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) -#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG) +#elif defined(CONFIG_DYNAMIC_DEBUG) +/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ #define pr_debug(fmt, ...) do { \ - dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ + dynamic_pr_debug(fmt, ##__VA_ARGS__); \ } while (0) #else #define pr_debug(fmt, ...) \ @@ -368,6 +405,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte) #endif /* + * General tracing related utility functions - trace_printk(), + * tracing_on/tracing_off and tracing_start()/tracing_stop + * + * Use tracing_on/tracing_off when you want to quickly turn on or off + * tracing. It simply enables or disables the recording of the trace events. + * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on + * file, which gives a means for the kernel and userspace to interact. + * Place a tracing_off() in the kernel where you want tracing to end. + * From user space, examine the trace, and then echo 1 > tracing_on + * to continue tracing. + * + * tracing_stop/tracing_start has slightly more overhead. It is used + * by things like suspend to ram where disabling the recording of the + * trace is not enough, but tracing must actually stop because things + * like calling smp_processor_id() may crash the system. + * + * Most likely, you want to use tracing_on/tracing_off. + */ +#ifdef CONFIG_RING_BUFFER +void tracing_on(void); +void tracing_off(void); +/* trace_off_permanent stops recording with no way to bring it back */ +void tracing_off_permanent(void); +int tracing_is_on(void); +#else +static inline void tracing_on(void) { } +static inline void tracing_off(void) { } +static inline void tracing_off_permanent(void) { } +static inline int tracing_is_on(void) { return 0; } +#endif +#ifdef CONFIG_TRACING +extern void tracing_start(void); +extern void tracing_stop(void); +extern void ftrace_off_permanent(void); + +extern void +ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); + +static inline void __attribute__ ((format (printf, 1, 2))) +____trace_printk_check_format(const char *fmt, ...) +{ +} +#define __trace_printk_check_format(fmt, args...) \ +do { \ + if (0) \ + ____trace_printk_check_format(fmt, ##args); \ +} while (0) + +/** + * trace_printk - printf formatting in the ftrace buffer + * @fmt: the printf format for printing + * + * Note: __trace_printk is an internal function for trace_printk and + * the @ip is passed in via the trace_printk macro. + * + * This function allows a kernel developer to debug fast path sections + * that printk is not appropriate for. By scattering in various + * printk like tracing in the code, a developer can quickly see + * where problems are occurring. + * + * This is intended as a debugging tool for the developer only. + * Please refrain from leaving trace_printks scattered around in + * your code. + */ + +#define trace_printk(fmt, args...) \ +do { \ + __trace_printk_check_format(fmt, ##args); \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ + } else \ + __trace_printk(_THIS_IP_, fmt, ##args); \ +} while (0) + +extern int +__trace_bprintk(unsigned long ip, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +extern int +__trace_printk(unsigned long ip, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +/* + * The double __builtin_constant_p is because gcc will give us an error + * if we try to allocate the static variable to fmt if it is not a + * constant. Even with the outer if statement. + */ +#define ftrace_vprintk(fmt, vargs) \ +do { \ + if (__builtin_constant_p(fmt)) { \ + static const char *trace_printk_fmt \ + __attribute__((section("__trace_printk_fmt"))) = \ + __builtin_constant_p(fmt) ? fmt : NULL; \ + \ + __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ + } else \ + __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ +} while (0) + +extern int +__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); + +extern int +__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); + +extern void ftrace_dump(void); +#else +static inline void +ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } +static inline int +trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); + +static inline void tracing_start(void) { } +static inline void tracing_stop(void) { } +static inline void ftrace_off_permanent(void) { } +static inline int +trace_printk(const char *fmt, ...) +{ + return 0; +} +static inline int +ftrace_vprintk(const char *fmt, va_list ap) +{ + return 0; +} +static inline void ftrace_dump(void) { } +#endif /* CONFIG_TRACING */ + +/* * Display an IP address in readable format. */ @@ -378,18 +548,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte) ((unsigned char *)&addr)[3] #define NIPQUAD_FMT "%u.%u.%u.%u" -#if defined(__LITTLE_ENDIAN) -#define HIPQUAD(addr) \ - ((unsigned char *)&addr)[3], \ - ((unsigned char *)&addr)[2], \ - ((unsigned char *)&addr)[1], \ - ((unsigned char *)&addr)[0] -#elif defined(__BIG_ENDIAN) -#define HIPQUAD NIPQUAD -#else -#error "Please fix asm/byteorder.h" -#endif /* __LITTLE_ENDIAN */ - /* * min()/max()/clamp() macros that also do * strict type-checking.. See the @@ -480,7 +638,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte) /* * swap - swap value of @a and @b */ -#define swap(a, b) ({ typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; }) +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) /** * container_of - cast a member of a structure out to the containing structure diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 570d2041311..348fa8874b5 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -5,6 +5,7 @@ #include <linux/threads.h> #include <linux/percpu.h> #include <linux/cpumask.h> +#include <linux/interrupt.h> #include <asm/irq.h> #include <asm/cputime.h> @@ -28,9 +29,10 @@ struct cpu_usage_stat { struct kernel_stat { struct cpu_usage_stat cpustat; -#ifndef CONFIG_SPARSE_IRQ +#ifndef CONFIG_GENERIC_HARDIRQS unsigned int irqs[NR_IRQS]; #endif + unsigned int softirqs[NR_SOFTIRQS]; }; DECLARE_PER_CPU(struct kernel_stat, kstat); @@ -41,7 +43,7 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); extern unsigned long long nr_context_switches(void); -#ifndef CONFIG_SPARSE_IRQ +#ifndef CONFIG_GENERIC_HARDIRQS #define kstat_irqs_this_cpu(irq) \ (kstat_this_cpu.irqs[irq]) @@ -52,18 +54,31 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, { kstat_this_cpu.irqs[irq]++; } -#endif - -#ifndef CONFIG_SPARSE_IRQ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { return kstat_cpu(cpu).irqs[irq]; } #else +#include <linux/irq.h> extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); +#define kstat_irqs_this_cpu(DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]) +#define kstat_incr_irqs_this_cpu(irqno, DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]++) + #endif +static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) +{ + kstat_this_cpu.softirqs[irq]++; +} + +static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) +{ + return kstat_cpu(cpu).softirqs[irq]; +} + /* * Number of interrupts per specific IRQ source, since bootup */ @@ -78,7 +93,12 @@ static inline unsigned int kstat_irqs(unsigned int irq) return sum; } + +/* + * Lock/unlock the current runqueue - to extract task statistics: + */ extern unsigned long long task_delta_exec(struct task_struct *); + extern void account_user_time(struct task_struct *, cputime_t, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); extern void account_steal_time(cputime_t); diff --git a/include/linux/key.h b/include/linux/key.h index 21d32a142c0..cd50dfa1d4c 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -20,6 +20,7 @@ #include <linux/rbtree.h> #include <linux/rcupdate.h> #include <linux/sysctl.h> +#include <linux/rwsem.h> #include <asm/atomic.h> #ifdef __KERNEL__ @@ -128,7 +129,10 @@ struct key { struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ - time_t expiry; /* time at which key expires (or 0) */ + union { + time_t expiry; /* time at which key expires (or 0) */ + time_t revoked_at; /* time at which key was revoked */ + }; uid_t uid; gid_t gid; key_perm_t perm; /* access permissions */ @@ -274,6 +278,8 @@ static inline key_serial_t key_serial(struct key *key) extern ctl_table key_sysctls[]; #endif +extern void key_replace_session_keyring(void); + /* * the userspace interface */ @@ -296,6 +302,7 @@ extern void key_init(void); #define key_fsuid_changed(t) do { } while(0) #define key_fsgid_changed(t) do { } while(0) #define key_init() do { } while(0) +#define key_replace_session_keyring() do { } while(0) #endif /* CONFIG_KEYS */ #endif /* __KERNEL__ */ diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h index a3c984d780f..33a63f62d57 100644 --- a/include/linux/keyboard.h +++ b/include/linux/keyboard.h @@ -56,6 +56,7 @@ extern int unregister_keyboard_notifier(struct notifier_block *nb); #define KT_ASCII 9 #define KT_LOCK 10 #define KT_SLOCK 12 +#define KT_DEAD2 13 #define KT_BRL 14 #define K(t,v) (((t)<<8)|(v)) diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h index c0688eb7209..bd383f1944f 100644 --- a/include/linux/keyctl.h +++ b/include/linux/keyctl.h @@ -52,5 +52,6 @@ #define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ #define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ #define KEYCTL_GET_SECURITY 17 /* get key security label */ +#define KEYCTL_SESSION_TO_PARENT 18 /* apply session keyring to parent process */ #endif /* _LINUX_KEYCTL_H */ diff --git a/include/linux/klist.h b/include/linux/klist.h index d5a27af9dba..e91a4e59b77 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -22,7 +22,7 @@ struct klist { struct list_head k_list; void (*get)(struct klist_node *); void (*put)(struct klist_node *); -}; +} __attribute__ ((aligned (4))); #define KLIST_INIT(_name, _get, _put) \ { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h new file mode 100644 index 00000000000..dc2fd545db0 --- /dev/null +++ b/include/linux/kmemcheck.h @@ -0,0 +1,160 @@ +#ifndef LINUX_KMEMCHECK_H +#define LINUX_KMEMCHECK_H + +#include <linux/mm_types.h> +#include <linux/types.h> + +#ifdef CONFIG_KMEMCHECK +extern int kmemcheck_enabled; + +/* The slab-related functions. */ +void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node); +void kmemcheck_free_shadow(struct page *page, int order); +void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size); +void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size); + +void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order, + gfp_t gfpflags); + +void kmemcheck_show_pages(struct page *p, unsigned int n); +void kmemcheck_hide_pages(struct page *p, unsigned int n); + +bool kmemcheck_page_is_tracked(struct page *p); + +void kmemcheck_mark_unallocated(void *address, unsigned int n); +void kmemcheck_mark_uninitialized(void *address, unsigned int n); +void kmemcheck_mark_initialized(void *address, unsigned int n); +void kmemcheck_mark_freed(void *address, unsigned int n); + +void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n); +void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n); +void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n); + +int kmemcheck_show_addr(unsigned long address); +int kmemcheck_hide_addr(unsigned long address); + +bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); + +#else +#define kmemcheck_enabled 0 + +static inline void +kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) +{ +} + +static inline void +kmemcheck_free_shadow(struct page *page, int order) +{ +} + +static inline void +kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, + size_t size) +{ +} + +static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object, + size_t size) +{ +} + +static inline void kmemcheck_pagealloc_alloc(struct page *p, + unsigned int order, gfp_t gfpflags) +{ +} + +static inline bool kmemcheck_page_is_tracked(struct page *p) +{ + return false; +} + +static inline void kmemcheck_mark_unallocated(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_freed(void *address, unsigned int n) +{ +} + +static inline void kmemcheck_mark_unallocated_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_uninitialized_pages(struct page *p, + unsigned int n) +{ +} + +static inline void kmemcheck_mark_initialized_pages(struct page *p, + unsigned int n) +{ +} + +static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) +{ + return true; +} + +#endif /* CONFIG_KMEMCHECK */ + +/* + * Bitfield annotations + * + * How to use: If you have a struct using bitfields, for example + * + * struct a { + * int x:8, y:8; + * }; + * + * then this should be rewritten as + * + * struct a { + * kmemcheck_bitfield_begin(flags); + * int x:8, y:8; + * kmemcheck_bitfield_end(flags); + * }; + * + * Now the "flags_begin" and "flags_end" members may be used to refer to the + * beginning and end, respectively, of the bitfield (and things like + * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- + * fields should be annotated: + * + * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); + * kmemcheck_annotate_bitfield(a, flags); + * + * Note: We provide the same definitions for both kmemcheck and non- + * kmemcheck kernels. This makes it harder to introduce accidental errors. It + * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield(). + */ +#define kmemcheck_bitfield_begin(name) \ + int name##_begin[0]; + +#define kmemcheck_bitfield_end(name) \ + int name##_end[0]; + +#define kmemcheck_annotate_bitfield(ptr, name) \ + do if (ptr) { \ + int _n = (long) &((ptr)->name##_end) \ + - (long) &((ptr)->name##_begin); \ + BUILD_BUG_ON(_n < 0); \ + \ + kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ + } while (0) + +#define kmemcheck_annotate_variable(var) \ + do { \ + kmemcheck_mark_initialized(&(var), sizeof(var)); \ + } while (0) \ + +#endif /* LINUX_KMEMCHECK_H */ diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 00000000000..3c7497d46ee --- /dev/null +++ b/include/linux/kmemleak.h @@ -0,0 +1,100 @@ +/* + * include/linux/kmemleak.h + * + * Copyright (C) 2008 ARM Limited + * Written by Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __KMEMLEAK_H +#define __KMEMLEAK_H + +#ifdef CONFIG_DEBUG_KMEMLEAK + +extern void kmemleak_init(void) __ref; +extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp) __ref; +extern void kmemleak_free(const void *ptr) __ref; +extern void kmemleak_free_part(const void *ptr, size_t size) __ref; +extern void kmemleak_padding(const void *ptr, unsigned long offset, + size_t size) __ref; +extern void kmemleak_not_leak(const void *ptr) __ref; +extern void kmemleak_ignore(const void *ptr) __ref; +extern void kmemleak_scan_area(const void *ptr, unsigned long offset, + size_t length, gfp_t gfp) __ref; +extern void kmemleak_no_scan(const void *ptr) __ref; + +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, unsigned long flags, + gfp_t gfp) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_alloc(ptr, size, min_count, gfp); +} + +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_free(ptr); +} + +static inline void kmemleak_erase(void **ptr) +{ + *ptr = NULL; +} + +#else + +static inline void kmemleak_init(void) +{ +} +static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp) +{ +} +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, unsigned long flags, + gfp_t gfp) +{ +} +static inline void kmemleak_free(const void *ptr) +{ +} +static inline void kmemleak_free_part(const void *ptr, size_t size) +{ +} +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) +{ +} +static inline void kmemleak_not_leak(const void *ptr) +{ +} +static inline void kmemleak_ignore(const void *ptr) +{ +} +static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, + size_t length, gfp_t gfp) +{ +} +static inline void kmemleak_erase(void **ptr) +{ +} +static inline void kmemleak_no_scan(const void *ptr) +{ +} + +#endif /* CONFIG_DEBUG_KMEMLEAK */ + +#endif /* __KMEMLEAK_H */ diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h new file mode 100644 index 00000000000..b616d3930c3 --- /dev/null +++ b/include/linux/kmemtrace.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2008 Eduard - Gabriel Munteanu + * + * This file is released under GPL version 2. + */ + +#ifndef _LINUX_KMEMTRACE_H +#define _LINUX_KMEMTRACE_H + +#ifdef __KERNEL__ + +#include <trace/events/kmem.h> + +#ifdef CONFIG_KMEMTRACE +extern void kmemtrace_init(void); +#else +static inline void kmemtrace_init(void) +{ +} +#endif + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_KMEMTRACE_H */ + diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 92213a9194e..384ca8bbf1a 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -29,10 +29,15 @@ #ifdef CONFIG_MODULES /* modprobe exit status on success, -ve on error. Return value * usually useless though. */ -extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); -#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) +extern int __request_module(bool wait, const char *name, ...) \ + __attribute__((format(printf, 2, 3))); +#define request_module(mod...) __request_module(true, mod) +#define request_module_nowait(mod...) __request_module(false, mod) +#define try_then_request_module(x, mod...) \ + ((x) ?: (__request_module(true, mod), (x))) #else -static inline int request_module(const char * name, ...) { return -ENOSYS; } +static inline int request_module(const char *name, ...) { return -ENOSYS; } +static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; } #define try_then_request_module(x, mod...) (x) #endif diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 5437ac0276e..58ae8e00fcd 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -68,10 +68,13 @@ struct kobject { unsigned int state_in_sysfs:1; unsigned int state_add_uevent_sent:1; unsigned int state_remove_uevent_sent:1; + unsigned int uevent_suppress:1; }; extern int kobject_set_name(struct kobject *kobj, const char *name, ...) __attribute__((format(printf, 2, 3))); +extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs); static inline const char *kobject_name(const struct kobject *kobj) { diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index d6ea19e314b..bcd9c07848b 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -49,6 +49,13 @@ /* Attach to insert probes on any functions which should be ignored*/ #define __kprobes __attribute__((__section__(".kprobes.text"))) notrace +#else /* CONFIG_KPROBES */ +typedef int kprobe_opcode_t; +struct arch_specific_insn { + int dummy; +}; +#define __kprobes notrace +#endif /* CONFIG_KPROBES */ struct kprobe; struct pt_regs; @@ -87,12 +94,16 @@ struct kprobe { /* Called after addr is executed, unless... */ kprobe_post_handler_t post_handler; - /* ... called if executing addr causes a fault (eg. page fault). - * Return 1 if it handled fault, otherwise kernel will see it. */ + /* + * ... called if executing addr causes a fault (eg. page fault). + * Return 1 if it handled fault, otherwise kernel will see it. + */ kprobe_fault_handler_t fault_handler; - /* ... called if breakpoint trap occurs in probe handler. - * Return 1 if it handled break, otherwise kernel will see it. */ + /* + * ... called if breakpoint trap occurs in probe handler. + * Return 1 if it handled break, otherwise kernel will see it. + */ kprobe_break_handler_t break_handler; /* Saved opcode (which has been replaced with breakpoint) */ @@ -101,18 +112,28 @@ struct kprobe { /* copy of the original instruction */ struct arch_specific_insn ainsn; - /* Indicates various status flags. Protected by kprobe_mutex. */ + /* + * Indicates various status flags. + * Protected by kprobe_mutex after this kprobe is registered. + */ u32 flags; }; /* Kprobe status flags */ #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ +#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ +/* Has this kprobe gone ? */ static inline int kprobe_gone(struct kprobe *p) { return p->flags & KPROBE_FLAG_GONE; } +/* Is this kprobe disabled ? */ +static inline int kprobe_disabled(struct kprobe *p) +{ + return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); +} /* * Special probe type that uses setjmp-longjmp type tricks to resume * execution at a specified entry with a matching prototype corresponding @@ -131,23 +152,6 @@ struct jprobe { /* For backward compatibility with old code using JPROBE_ENTRY() */ #define JPROBE_ENTRY(handler) (handler) -DECLARE_PER_CPU(struct kprobe *, current_kprobe); -DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); - -#ifdef CONFIG_KRETPROBES -extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, - struct pt_regs *regs); -extern int arch_trampoline_kprobe(struct kprobe *p); -#else /* CONFIG_KRETPROBES */ -static inline void arch_prepare_kretprobe(struct kretprobe *rp, - struct pt_regs *regs) -{ -} -static inline int arch_trampoline_kprobe(struct kprobe *p) -{ - return 0; -} -#endif /* CONFIG_KRETPROBES */ /* * Function-return probe - * Note: @@ -188,6 +192,33 @@ struct kprobe_blackpoint { unsigned long range; }; +#ifdef CONFIG_KPROBES +DECLARE_PER_CPU(struct kprobe *, current_kprobe); +DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +/* + * For #ifdef avoidance: + */ +static inline int kprobes_built_in(void) +{ + return 1; +} + +#ifdef CONFIG_KRETPROBES +extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs); +extern int arch_trampoline_kprobe(struct kprobe *p); +#else /* CONFIG_KRETPROBES */ +static inline void arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) +{ +} +static inline int arch_trampoline_kprobe(struct kprobe *p) +{ + return 0; +} +#endif /* CONFIG_KRETPROBES */ + extern struct kretprobe_blackpoint kretprobe_blacklist[]; static inline void kretprobe_assert(struct kretprobe_instance *ri, @@ -262,12 +293,19 @@ void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); -#else /* CONFIG_KPROBES */ +int disable_kprobe(struct kprobe *kp); +int enable_kprobe(struct kprobe *kp); -#define __kprobes notrace -struct jprobe; -struct kretprobe; +#else /* !CONFIG_KPROBES: */ +static inline int kprobes_built_in(void) +{ + return 0; +} +static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + return 0; +} static inline struct kprobe *get_kprobe(void *addr) { return NULL; @@ -324,5 +362,30 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) static inline void kprobe_flush_task(struct task_struct *tk) { } -#endif /* CONFIG_KPROBES */ -#endif /* _LINUX_KPROBES_H */ +static inline int disable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +static inline int enable_kprobe(struct kprobe *kp) +{ + return -ENOSYS; +} +#endif /* CONFIG_KPROBES */ +static inline int disable_kretprobe(struct kretprobe *rp) +{ + return disable_kprobe(&rp->kp); +} +static inline int enable_kretprobe(struct kretprobe *rp) +{ + return enable_kprobe(&rp->kp); +} +static inline int disable_jprobe(struct jprobe *jp) +{ + return disable_kprobe(&jp->kp); +} +static inline int enable_jprobe(struct jprobe *jp) +{ + return enable_kprobe(&jp->kp); +} + +#endif /* _LINUX_KPROBES_H */ diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 5715f190760..3db5d8d3748 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h @@ -48,7 +48,10 @@ struct kvm_irq_level { * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. */ - __u32 irq; + union { + __u32 irq; + __s32 status; + }; __u32 level; }; @@ -58,10 +61,10 @@ struct kvm_irqchip { __u32 pad; union { char dummy[512]; /* reserving space */ -#ifdef CONFIG_X86 +#ifdef __KVM_HAVE_PIT struct kvm_pic_state pic; #endif -#if defined(CONFIG_X86) || defined(CONFIG_IA64) +#ifdef __KVM_HAVE_IOAPIC struct kvm_ioapic_state ioapic; #endif } chip; @@ -116,7 +119,7 @@ struct kvm_run { __u32 error_code; } ex; /* KVM_EXIT_IO */ - struct kvm_io { + struct { #define KVM_EXIT_IO_IN 0 #define KVM_EXIT_IO_OUT 1 __u8 direction; @@ -126,6 +129,7 @@ struct kvm_run { __u64 data_offset; /* relative to kvm_run start */ } io; struct { + struct kvm_debug_exit_arch arch; } debug; /* KVM_EXIT_MMIO */ struct { @@ -217,28 +221,13 @@ struct kvm_interrupt { __u32 irq; }; -struct kvm_breakpoint { - __u32 enabled; - __u32 padding; - __u64 address; -}; - -/* for KVM_DEBUG_GUEST */ -struct kvm_debug_guest { - /* int */ - __u32 enabled; - __u32 pad; - struct kvm_breakpoint breakpoints[4]; - __u32 singlestep; -}; - /* for KVM_GET_DIRTY_LOG */ struct kvm_dirty_log { __u32 slot; - __u32 padding; + __u32 padding1; union { void __user *dirty_bitmap; /* one bit per page */ - __u64 padding; + __u64 padding2; }; }; @@ -292,6 +281,17 @@ struct kvm_s390_interrupt { __u64 parm64; }; +/* for KVM_SET_GUEST_DEBUG */ + +#define KVM_GUESTDBG_ENABLE 0x00000001 +#define KVM_GUESTDBG_SINGLESTEP 0x00000002 + +struct kvm_guest_debug { + __u32 control; + __u32 pad; + struct kvm_guest_debug_arch arch; +}; + #define KVM_TRC_SHIFT 16 /* * kvm trace categories @@ -384,18 +384,75 @@ struct kvm_trace_rec { #define KVM_CAP_MP_STATE 14 #define KVM_CAP_COALESCED_MMIO 15 #define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ -#if defined(CONFIG_X86)||defined(CONFIG_IA64) +#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT #define KVM_CAP_DEVICE_ASSIGNMENT 17 #endif #define KVM_CAP_IOMMU 18 -#if defined(CONFIG_X86) +#ifdef __KVM_HAVE_MSI #define KVM_CAP_DEVICE_MSI 20 #endif /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 -#if defined(CONFIG_X86) +#ifdef __KVM_HAVE_USER_NMI #define KVM_CAP_USER_NMI 22 #endif +#ifdef __KVM_HAVE_GUEST_DEBUG +#define KVM_CAP_SET_GUEST_DEBUG 23 +#endif +#ifdef __KVM_HAVE_PIT +#define KVM_CAP_REINJECT_CONTROL 24 +#endif +#ifdef __KVM_HAVE_IOAPIC +#define KVM_CAP_IRQ_ROUTING 25 +#endif +#define KVM_CAP_IRQ_INJECT_STATUS 26 +#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT +#define KVM_CAP_DEVICE_DEASSIGNMENT 27 +#endif +#ifdef __KVM_HAVE_MSIX +#define KVM_CAP_DEVICE_MSIX 28 +#endif +#define KVM_CAP_ASSIGN_DEV_IRQ 29 +/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ +#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 + +#ifdef KVM_CAP_IRQ_ROUTING + +struct kvm_irq_routing_irqchip { + __u32 irqchip; + __u32 pin; +}; + +struct kvm_irq_routing_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + __u32 pad; +}; + +/* gsi routing entry types */ +#define KVM_IRQ_ROUTING_IRQCHIP 1 +#define KVM_IRQ_ROUTING_MSI 2 + +struct kvm_irq_routing_entry { + __u32 gsi; + __u32 type; + __u32 flags; + __u32 pad; + union { + struct kvm_irq_routing_irqchip irqchip; + struct kvm_irq_routing_msi msi; + __u32 pad[8]; + } u; +}; + +struct kvm_irq_routing { + __u32 nr; + __u32 flags; + struct kvm_irq_routing_entry entries[0]; +}; + +#endif /* * ioctls for VM fds @@ -421,14 +478,26 @@ struct kvm_trace_rec { #define KVM_CREATE_PIT _IO(KVMIO, 0x64) #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state) #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state) +#define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level) #define KVM_REGISTER_COALESCED_MMIO \ _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) #define KVM_UNREGISTER_COALESCED_MMIO \ _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ struct kvm_assigned_pci_dev) +#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) +/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ struct kvm_assigned_irq) +#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) +#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) +#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ + struct kvm_assigned_pci_dev) +#define KVM_ASSIGN_SET_MSIX_NR \ + _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr) +#define KVM_ASSIGN_SET_MSIX_ENTRY \ + _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry) +#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) /* * ioctls for vcpu fds @@ -440,7 +509,8 @@ struct kvm_trace_rec { #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) -#define KVM_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest) +/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ +#define KVM_DEBUG_GUEST __KVM_DEPRECATED_DEBUG_GUEST #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) @@ -469,6 +539,29 @@ struct kvm_trace_rec { #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) /* Available with KVM_CAP_NMI */ #define KVM_NMI _IO(KVMIO, 0x9a) +/* Available with KVM_CAP_SET_GUEST_DEBUG */ +#define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug) + +/* + * Deprecated interfaces + */ +struct kvm_breakpoint { + __u32 enabled; + __u32 padding; + __u64 address; +}; + +struct kvm_debug_guest { + __u32 enabled; + __u32 pad; + struct kvm_breakpoint breakpoints[4]; + __u32 singlestep; +}; + +#define __KVM_DEPRECATED_DEBUG_GUEST _IOW(KVMIO, 0x87, struct kvm_debug_guest) + +#define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *) +#define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *) #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) @@ -495,6 +588,8 @@ struct kvm_trace_rec { #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) +#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) + struct kvm_assigned_pci_dev { __u32 assigned_dev_id; __u32 busnr; @@ -505,6 +600,17 @@ struct kvm_assigned_pci_dev { }; }; +#define KVM_DEV_IRQ_HOST_INTX (1 << 0) +#define KVM_DEV_IRQ_HOST_MSI (1 << 1) +#define KVM_DEV_IRQ_HOST_MSIX (1 << 2) + +#define KVM_DEV_IRQ_GUEST_INTX (1 << 8) +#define KVM_DEV_IRQ_GUEST_MSI (1 << 9) +#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) + +#define KVM_DEV_IRQ_HOST_MASK 0x00ff +#define KVM_DEV_IRQ_GUEST_MASK 0xff00 + struct kvm_assigned_irq { __u32 assigned_dev_id; __u32 host_irq; @@ -520,8 +626,19 @@ struct kvm_assigned_irq { }; }; -#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) -#define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) +struct kvm_assigned_msix_nr { + __u32 assigned_dev_id; + __u16 entry_nr; + __u16 padding; +}; + +#define KVM_MAX_MSIX_PER_DEV 512 +struct kvm_assigned_msix_entry { + __u32 assigned_dev_id; + __u32 gsi; + __u16 entry; /* The index of entry in the MSI-X table */ + __u16 padding[3]; +}; #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ec49d0be7f5..3060bdc35ff 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -37,6 +37,8 @@ #define KVM_REQ_PENDING_TIMER 5 #define KVM_REQ_UNHALT 6 #define KVM_REQ_MMU_SYNC 7 +#define KVM_REQ_KVMCLOCK_UPDATE 8 +#define KVM_REQ_KICK 9 #define KVM_USERSPACE_IRQ_SOURCE_ID 0 @@ -71,9 +73,8 @@ struct kvm_vcpu { struct mutex mutex; int cpu; struct kvm_run *run; - int guest_mode; unsigned long requests; - struct kvm_guest_debug guest_debug; + unsigned long guest_debug; int fpu_active; int guest_fpu_loaded; wait_queue_head_t wq; @@ -107,9 +108,25 @@ struct kvm_memory_slot { int user_alloc; }; +struct kvm_kernel_irq_routing_entry { + u32 gsi; + u32 type; + int (*set)(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int level); + union { + struct { + unsigned irqchip; + unsigned pin; + } irqchip; + struct msi_msg msi; + }; + struct list_head link; +}; + struct kvm { struct mutex lock; /* protects the vcpus array and APIC accesses */ spinlock_t mmu_lock; + spinlock_t requests_lock; struct rw_semaphore slots_lock; struct mm_struct *mm; /* userspace tied to this vm */ int nmemslots; @@ -127,6 +144,11 @@ struct kvm { struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; #endif +#ifdef CONFIG_HAVE_KVM_IRQCHIP + struct list_head irq_routing; /* of kvm_kernel_irq_routing_entry */ + struct hlist_head mask_notifier_list; +#endif + #ifdef KVM_ARCH_WANT_MMU_NOTIFIER struct mmu_notifier mmu_notifier; unsigned long mmu_notifier_seq; @@ -237,7 +259,6 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, int user_alloc); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); -void kvm_arch_destroy_vm(struct kvm *kvm); int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); @@ -255,8 +276,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); -int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, - struct kvm_debug_guest *dbg); +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg); int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); int kvm_arch_init(void *opaque); @@ -279,12 +300,14 @@ int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); void kvm_arch_check_processor_compat(void *rtn); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); +int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); void kvm_free_physmem(struct kvm *kvm); struct kvm *kvm_arch_create_vm(void); void kvm_arch_destroy_vm(struct kvm *kvm); void kvm_free_all_assigned_devices(struct kvm *kvm); +void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_get_interrupt(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *v); @@ -299,6 +322,13 @@ struct kvm_irq_ack_notifier { void (*irq_acked)(struct kvm_irq_ack_notifier *kian); }; +#define KVM_ASSIGNED_MSIX_PENDING 0x1 +struct kvm_guest_msix_entry { + u32 vector; + u16 entry; + u16 flags; +}; + struct kvm_assigned_dev_kernel { struct kvm_irq_ack_notifier ack_notifier; struct work_struct interrupt_work; @@ -306,28 +336,43 @@ struct kvm_assigned_dev_kernel { int assigned_dev_id; int host_busnr; int host_devfn; + unsigned int entries_nr; int host_irq; bool host_irq_disabled; + struct msix_entry *host_msix_entries; int guest_irq; - struct msi_msg guest_msi; -#define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) -#define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) -#define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) -#define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) + struct kvm_guest_msix_entry *guest_msix_entries; unsigned long irq_requested_type; int irq_source_id; int flags; struct pci_dev *dev; struct kvm *kvm; + spinlock_t assigned_dev_lock; }; -void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); -void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); + +struct kvm_irq_mask_notifier { + void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); + int irq; + struct hlist_node link; +}; + +void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, + struct kvm_irq_mask_notifier *kimn); +void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, + struct kvm_irq_mask_notifier *kimn); +void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask); + +int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); +void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian); void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); int kvm_request_irq_source_id(struct kvm *kvm); void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); +/* For vcpu->arch.iommu_flags */ +#define KVM_IOMMU_CACHE_COHERENCY 0x1 + #ifdef CONFIG_IOMMU_API int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages); @@ -463,4 +508,21 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se } #endif +#ifdef CONFIG_HAVE_KVM_IRQCHIP + +#define KVM_MAX_IRQ_ROUTES 1024 + +int kvm_setup_default_irq_routing(struct kvm *kvm); +int kvm_set_irq_routing(struct kvm *kvm, + const struct kvm_irq_routing_entry *entries, + unsigned nr, + unsigned flags); +void kvm_free_irq_routing(struct kvm *kvm); + +#else + +static inline void kvm_free_irq_routing(struct kvm *kvm) {} + +#endif + #endif diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 9b6f395c962..fb46efbeabe 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -40,17 +40,31 @@ typedef unsigned long hfn_t; typedef hfn_t pfn_t; -struct kvm_pio_request { - unsigned long count; - int cur_count; - struct page *guest_pages[2]; - unsigned guest_page_offset; - int in; - int port; - int size; - int string; - int down; - int rep; +union kvm_ioapic_redirect_entry { + u64 bits; + struct { + u8 vector; + u8 delivery_mode:3; + u8 dest_mode:1; + u8 delivery_status:1; + u8 polarity:1; + u8 remote_irr:1; + u8 trig_mode:1; + u8 mask:1; + u8 reserve:7; + u8 reserved[4]; + u8 dest_id; + } fields; +}; + +struct kvm_lapic_irq { + u32 vector; + u32 delivery_mode; + u32 dest_mode; + u32 level; + u32 trig_mode; + u32 shorthand; + u32 dest_id; }; #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index 901c2d6377a..b0e99898527 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h @@ -9,6 +9,7 @@ #ifndef _INCLUDE_GUARD_LATENCYTOP_H_ #define _INCLUDE_GUARD_LATENCYTOP_H_ +#include <linux/compiler.h> #ifdef CONFIG_LATENCYTOP #define LT_SAVECOUNT 32 @@ -24,7 +25,14 @@ struct latency_record { struct task_struct; -void account_scheduler_latency(struct task_struct *task, int usecs, int inter); +extern int latencytop_enabled; +void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); +static inline void +account_scheduler_latency(struct task_struct *task, int usecs, int inter) +{ + if (unlikely(latencytop_enabled)) + __account_scheduler_latency(task, usecs, inter); +} void clear_all_latency_tracing(struct task_struct *p); diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h new file mode 100644 index 00000000000..42f854a1a19 --- /dev/null +++ b/include/linux/leds-bd2802.h @@ -0,0 +1,26 @@ +/* + * leds-bd2802.h - RGB LED Driver + * + * Copyright (C) 2009 Samsung Electronics + * Kim Kyuwon <q1.kim@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf + * + */ +#ifndef _LEDS_BD2802_H_ +#define _LEDS_BD2802_H_ + +struct bd2802_led_platform_data{ + int reset_gpio; + u8 rgb_time; +}; + +#define RGB_TIME(slopedown, slopeup, waveform) \ + ((slopedown) << 6 | (slopeup) << 4 | (waveform)) + +#endif /* _LEDS_BD2802_H_ */ + diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h new file mode 100644 index 00000000000..afc9f9fd70f --- /dev/null +++ b/include/linux/leds-lp3944.h @@ -0,0 +1,53 @@ +/* + * leds-lp3944.h - platform data structure for lp3944 led controller + * + * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __LINUX_LEDS_LP3944_H +#define __LINUX_LEDS_LP3944_H + +#include <linux/leds.h> +#include <linux/workqueue.h> + +#define LP3944_LED0 0 +#define LP3944_LED1 1 +#define LP3944_LED2 2 +#define LP3944_LED3 3 +#define LP3944_LED4 4 +#define LP3944_LED5 5 +#define LP3944_LED6 6 +#define LP3944_LED7 7 +#define LP3944_LEDS_MAX 8 + +#define LP3944_LED_STATUS_MASK 0x03 +enum lp3944_status { + LP3944_LED_STATUS_OFF = 0x0, + LP3944_LED_STATUS_ON = 0x1, + LP3944_LED_STATUS_DIM0 = 0x2, + LP3944_LED_STATUS_DIM1 = 0x3 +}; + +enum lp3944_type { + LP3944_LED_TYPE_NONE, + LP3944_LED_TYPE_LED, + LP3944_LED_TYPE_LED_INVERTED, +}; + +struct lp3944_led { + char *name; + enum lp3944_type type; + enum lp3944_status status; +}; + +struct lp3944_platform_data { + struct lp3944_led leds[LP3944_LEDS_MAX]; + u8 leds_size; +}; + +#endif /* __LINUX_LEDS_LP3944_H */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 24489da701e..d8bf9665e70 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -30,6 +30,7 @@ enum led_brightness { struct led_classdev { const char *name; int brightness; + int max_brightness; int flags; /* Lower 16 bits reflect status */ @@ -44,7 +45,10 @@ struct led_classdev { /* Get LED brightness level */ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); - /* Activate hardware accelerated blink */ + /* Activate hardware accelerated blink, delays are in + * miliseconds and if none is provided then a sensible default + * should be chosen. The call can adjust the timings if it can't + * match the values specified exactly. */ int (*blink_set)(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off); @@ -140,8 +144,14 @@ struct gpio_led { const char *name; const char *default_trigger; unsigned gpio; - u8 active_low; + unsigned active_low : 1; + unsigned retain_state_suspended : 1; + unsigned default_state : 2; + /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ }; +#define LEDS_GPIO_DEFSTATE_OFF 0 +#define LEDS_GPIO_DEFSTATE_ON 1 +#define LEDS_GPIO_DEFSTATE_KEEP 2 struct gpio_led_platform_data { int num_leds; diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h new file mode 100644 index 00000000000..33a07116748 --- /dev/null +++ b/include/linux/leds_pwm.h @@ -0,0 +1,21 @@ +/* + * PWM LED driver data - see drivers/leds/leds-pwm.c + */ +#ifndef __LINUX_LEDS_PWM_H +#define __LINUX_LEDS_PWM_H + +struct led_pwm { + const char *name; + const char *default_trigger; + unsigned pwm_id; + u8 active_low; + unsigned max_brightness; + unsigned pwm_period_ns; +}; + +struct led_pwm_platform_data { + int num_leds; + struct led_pwm *leds; +}; + +#endif diff --git a/include/linux/lguest.h b/include/linux/lguest.h index 175e63f4a8c..2fb1dcbcb5a 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h @@ -1,5 +1,7 @@ -/* Things the lguest guest needs to know. Note: like all lguest interfaces, - * this is subject to wild and random change between versions. */ +/* + * Things the lguest guest needs to know. Note: like all lguest interfaces, + * this is subject to wild and random change between versions. + */ #ifndef _LINUX_LGUEST_H #define _LINUX_LGUEST_H @@ -11,28 +13,41 @@ #define LG_CLOCK_MIN_DELTA 100UL #define LG_CLOCK_MAX_DELTA ULONG_MAX -/*G:032 The second method of communicating with the Host is to via "struct +/*G:031 + * The second method of communicating with the Host is to via "struct * lguest_data". Once the Guest's initialization hypercall tells the Host where - * this is, the Guest and Host both publish information in it. :*/ -struct lguest_data -{ - /* 512 == enabled (same as eflags in normal hardware). The Guest - * changes interrupts so often that a hypercall is too slow. */ + * this is, the Guest and Host both publish information in it. +:*/ +struct lguest_data { + /* + * 512 == enabled (same as eflags in normal hardware). The Guest + * changes interrupts so often that a hypercall is too slow. + */ unsigned int irq_enabled; /* Fine-grained interrupt disabling by the Guest */ DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS); - /* The Host writes the virtual address of the last page fault here, + /* + * The Host writes the virtual address of the last page fault here, * which saves the Guest a hypercall. CR2 is the native register where - * this address would normally be found. */ + * this address would normally be found. + */ unsigned long cr2; /* Wallclock time set by the Host. */ struct timespec time; - /* Async hypercall ring. Instead of directly making hypercalls, we can + /* + * Interrupt pending set by the Host. The Guest should do a hypercall + * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). + */ + int irq_pending; + + /* + * Async hypercall ring. Instead of directly making hypercalls, we can * place them in here for processing the next time the Host wants. - * This batching can be quite efficient. */ + * This batching can be quite efficient. + */ /* 0xFF == done (set by Host), 0 == pending (set by Guest). */ u8 hcall_status[LHCALL_RING_SIZE]; diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index a53407a4165..495203ff221 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -29,8 +29,10 @@ struct lguest_device_desc { __u8 type; /* The number of virtqueues (first in config array) */ __u8 num_vq; - /* The number of bytes of feature bits. Multiply by 2: one for host - * features and one for Guest acknowledgements. */ + /* + * The number of bytes of feature bits. Multiply by 2: one for host + * features and one for Guest acknowledgements. + */ __u8 feature_len; /* The number of bytes of the config array after virtqueues. */ __u8 config_len; @@ -39,8 +41,10 @@ struct lguest_device_desc { __u8 config[0]; }; -/*D:135 This is how we expect the device configuration field for a virtqueue - * to be laid out in config space. */ +/*D:135 + * This is how we expect the device configuration field for a virtqueue + * to be laid out in config space. + */ struct lguest_vqconfig { /* The number of entries in the virtio_ring */ __u16 num; @@ -57,10 +61,13 @@ enum lguest_req LHREQ_INITIALIZE, /* + base, pfnlimit, start */ LHREQ_GETDMA, /* No longer used */ LHREQ_IRQ, /* + irq */ - LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ + LHREQ_BREAK, /* No longer used */ + LHREQ_EVENTFD, /* + address, fd. */ }; -/* The alignment to use between consumer and producer parts of vring. - * x86 pagesize for historical reasons. */ +/* + * The alignment to use between consumer and producer parts of vring. + * x86 pagesize for historical reasons. + */ #define LGUEST_VRING_ALIGN 4096 #endif /* _LINUX_LGUEST_LAUNCHER */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 2c6bd66209f..76319bf03e3 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -143,7 +143,6 @@ enum { ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ - ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ @@ -187,7 +186,10 @@ enum { ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD * doesn't handle PIO interrupts */ ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ + ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */ + ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */ ATA_FLAG_DEBUGMSG = (1 << 13), + ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */ ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ @@ -207,6 +209,7 @@ enum { /* bits 24:31 of ap->flags are reserved for LLD specific flags */ + /* struct ata_port pflags */ ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ @@ -223,6 +226,9 @@ enum { ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ + ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ + ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ + /* struct ata_queued_cmd flags */ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ @@ -273,7 +279,7 @@ enum { * advised to wait only for the following duration before * doing SRST. */ - ATA_TMOUT_PMP_SRST_WAIT = 1000, + ATA_TMOUT_PMP_SRST_WAIT = 5000, /* ATA bus states */ BUS_UNKNOWN = 0, @@ -377,7 +383,10 @@ enum { ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands not multiple of 16 bytes */ - ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ + ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ + ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ + ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ + ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -527,6 +536,7 @@ struct ata_queued_cmd { unsigned long flags; /* ATA_QCFLAG_xxx */ unsigned int tag; unsigned int n_elem; + unsigned int orig_n_elem; int dma_dir; @@ -578,8 +588,9 @@ struct ata_device { acpi_handle acpi_handle; union acpi_object *gtf_cache; #endif - /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ + /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ u64 n_sectors; /* size of device, if ATA */ + u64 n_native_sectors; /* native size, if ATA */ unsigned int class; /* ATA_DEV_xxx */ unsigned long unpark_deadline; @@ -603,20 +614,22 @@ struct ata_device { u16 heads; /* Number of heads */ u16 sectors; /* Number of sectors per track */ - /* error history */ - int spdn_cnt; - struct ata_ering ering; - union { u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */ }; + + /* error history */ + int spdn_cnt; + /* ering is CLEAR_END, read comment above CLEAR_END */ + struct ata_ering ering; }; -/* Offset into struct ata_device. Fields above it are maintained - * acress device init. Fields below are zeroed. +/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are + * cleared to zero on ata_dev_init(). */ -#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors) +#define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors) +#define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering) struct ata_eh_info { struct ata_device *dev; /* offending device */ @@ -683,7 +696,10 @@ struct ata_port { struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ struct ata_port_operations *ops; spinlock_t *lock; + /* Flags owned by the EH context. Only EH should touch these once the + port is active */ unsigned long flags; /* ATA_FLAG_xxx */ + /* Flags that change dynamically, protected by ap->lock */ unsigned int pflags; /* ATA_PFLAG_xxx */ unsigned int print_id; /* user visible unique port ID */ unsigned int port_no; /* 0 based port no. inside the host */ @@ -745,7 +761,8 @@ struct ata_port { acpi_handle acpi_handle; struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ #endif - u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ + /* owned by EH */ + u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; }; /* The following initializer overrides a method to NULL whether one of @@ -788,6 +805,7 @@ struct ata_port_operations { ata_reset_fn_t pmp_hardreset; ata_postreset_fn_t pmp_postreset; void (*error_handler)(struct ata_port *ap); + void (*lost_interrupt)(struct ata_port *ap); void (*post_internal_cmd)(struct ata_queued_cmd *qc); /* @@ -829,6 +847,8 @@ struct ata_port_operations { void (*bmdma_start)(struct ata_queued_cmd *qc); void (*bmdma_stop)(struct ata_queued_cmd *qc); u8 (*bmdma_status)(struct ata_port *ap); + + void (*drain_fifo)(struct ata_queued_cmd *qc); #endif /* CONFIG_ATA_SFF */ ssize_t (*em_show)(struct ata_port *ap, char *buf); @@ -1001,6 +1021,9 @@ extern int ata_cable_sata(struct ata_port *ap); extern int ata_cable_ignore(struct ata_port *ap); extern int ata_cable_unknown(struct ata_port *ap); +extern void ata_pio_queue_task(struct ata_port *ap, void *data, + unsigned long delay); + /* Timing helpers */ extern unsigned int ata_pio_need_iordy(const struct ata_device *); extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode); @@ -1565,6 +1588,7 @@ extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); extern unsigned int ata_sff_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance); +extern void ata_sff_lost_interrupt(struct ata_port *ap); extern void ata_sff_freeze(struct ata_port *ap); extern void ata_sff_thaw(struct ata_port *ap); extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline); @@ -1577,9 +1601,11 @@ extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes, extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes); +extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc); extern void ata_sff_error_handler(struct ata_port *ap); extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc); extern int ata_sff_port_start(struct ata_port *ap); +extern int ata_sff_port_start32(struct ata_port *ap); extern void ata_sff_std_ports(struct ata_ioports *ioaddr); extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev, unsigned long xfer_mask); diff --git a/include/linux/linkage.h b/include/linux/linkage.h index fee9e59649c..691f59171c6 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -22,6 +22,15 @@ #define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE) /* + * For assembly routines. + * + * Note when using these that you must specify the appropriate + * alignment directives yourself + */ +#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw" +#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw" + +/* * This is used by architectures to keep arguments on the stack * untouched by the compiler by keeping them live until the end. * The argument stack may be owned by the assembly-language diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h index 08a92969c76..ca5bd91d12e 100644 --- a/include/linux/linux_logo.h +++ b/include/linux/linux_logo.h @@ -32,6 +32,22 @@ struct linux_logo { const unsigned char *data; }; +extern const struct linux_logo logo_linux_mono; +extern const struct linux_logo logo_linux_vga16; +extern const struct linux_logo logo_linux_clut224; +extern const struct linux_logo logo_blackfin_vga16; +extern const struct linux_logo logo_blackfin_clut224; +extern const struct linux_logo logo_dec_clut224; +extern const struct linux_logo logo_mac_clut224; +extern const struct linux_logo logo_parisc_clut224; +extern const struct linux_logo logo_sgi_clut224; +extern const struct linux_logo logo_sun_clut224; +extern const struct linux_logo logo_superh_mono; +extern const struct linux_logo logo_superh_vga16; +extern const struct linux_logo logo_superh_clut224; +extern const struct linux_logo logo_m32r_clut224; +extern const struct linux_logo logo_spe_clut224; + extern const struct linux_logo *fb_find_logo(int depth); #ifdef CONFIG_FB_LOGO_EXTRA extern void fb_append_extra_logo(const struct linux_logo *logo, diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h new file mode 100644 index 00000000000..ad651f4e45a --- /dev/null +++ b/include/linux/lis3lv02d.h @@ -0,0 +1,39 @@ +#ifndef __LIS3LV02D_H_ +#define __LIS3LV02D_H_ + +struct lis3lv02d_platform_data { + /* please note: the 'click' feature is only supported for + * LIS[32]02DL variants of the chip and will be ignored for + * others */ +#define LIS3_CLICK_SINGLE_X (1 << 0) +#define LIS3_CLICK_DOUBLE_X (1 << 1) +#define LIS3_CLICK_SINGLE_Y (1 << 2) +#define LIS3_CLICK_DOUBLE_Y (1 << 3) +#define LIS3_CLICK_SINGLE_Z (1 << 4) +#define LIS3_CLICK_DOUBLE_Z (1 << 5) + unsigned char click_flags; + unsigned char click_thresh_x; + unsigned char click_thresh_y; + unsigned char click_thresh_z; + unsigned char click_time_limit; + unsigned char click_latency; + unsigned char click_window; + +#define LIS3_IRQ1_DISABLE (0 << 0) +#define LIS3_IRQ1_FF_WU_1 (1 << 0) +#define LIS3_IRQ1_FF_WU_2 (2 << 0) +#define LIS3_IRQ1_FF_WU_12 (3 << 0) +#define LIS3_IRQ1_DATA_READY (4 << 0) +#define LIS3_IRQ1_CLICK (7 << 0) +#define LIS3_IRQ2_DISABLE (0 << 3) +#define LIS3_IRQ2_FF_WU_1 (1 << 3) +#define LIS3_IRQ2_FF_WU_2 (2 << 3) +#define LIS3_IRQ2_FF_WU_12 (3 << 3) +#define LIS3_IRQ2_DATA_READY (4 << 3) +#define LIS3_IRQ2_CLICK (7 << 3) +#define LIS3_IRQ_OPEN_DRAIN (1 << 6) +#define LIS3_IRQ_ACTIVE_HIGH (1 << 7) + unsigned char irq_cfg; +}; + +#endif /* __LIS3LV02D_H_ */ diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h index 93150ecf3ea..5d10ae364b5 100644 --- a/include/linux/list_nulls.h +++ b/include/linux/list_nulls.h @@ -56,6 +56,18 @@ static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) return is_a_nulls(h->first); } +static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, + struct hlist_nulls_head *h) +{ + struct hlist_nulls_node *first = h->first; + + n->next = first; + n->pprev = &h->first; + h->first = n; + if (!is_a_nulls(first)) + first->pprev = &n->next; +} + static inline void __hlist_nulls_del(struct hlist_nulls_node *n) { struct hlist_nulls_node *next = n->next; @@ -65,6 +77,12 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n) next->pprev = pprev; } +static inline void hlist_nulls_del(struct hlist_nulls_node *n) +{ + __hlist_nulls_del(n); + n->pprev = LIST_POISON2; +} + /** * hlist_nulls_for_each_entry - iterate over list of given type * @tpos: the type * to use as a loop cursor. diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h deleted file mode 100644 index 2ed8fa1b762..00000000000 --- a/include/linux/lm_interface.h +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. - */ - -#ifndef __LM_INTERFACE_DOT_H__ -#define __LM_INTERFACE_DOT_H__ - - -typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); - -/* - * lm_mount() flags - * - * LM_MFLAG_SPECTATOR - * GFS is asking to join the filesystem's lockspace, but it doesn't want to - * modify the filesystem. The lock module shouldn't assign a journal to the FS - * mount. It shouldn't send recovery callbacks to the FS mount. If the node - * dies or withdraws, all locks can be wiped immediately. - * - * LM_MFLAG_CONV_NODROP - * Do not allow the dlm to internally resolve conversion deadlocks by demoting - * the lock to unlocked and then reacquiring it in the requested mode. Instead, - * it should cancel the request and return LM_OUT_CONV_DEADLK. - */ - -#define LM_MFLAG_SPECTATOR 0x00000001 -#define LM_MFLAG_CONV_NODROP 0x00000002 - -/* - * lm_lockstruct flags - * - * LM_LSFLAG_LOCAL - * The lock_nolock module returns LM_LSFLAG_LOCAL to GFS, indicating that GFS - * can make single-node optimizations. - */ - -#define LM_LSFLAG_LOCAL 0x00000001 - -/* - * lm_lockname types - */ - -#define LM_TYPE_RESERVED 0x00 -#define LM_TYPE_NONDISK 0x01 -#define LM_TYPE_INODE 0x02 -#define LM_TYPE_RGRP 0x03 -#define LM_TYPE_META 0x04 -#define LM_TYPE_IOPEN 0x05 -#define LM_TYPE_FLOCK 0x06 -#define LM_TYPE_PLOCK 0x07 -#define LM_TYPE_QUOTA 0x08 -#define LM_TYPE_JOURNAL 0x09 - -/* - * lm_lock() states - * - * SHARED is compatible with SHARED, not with DEFERRED or EX. - * DEFERRED is compatible with DEFERRED, not with SHARED or EX. - */ - -#define LM_ST_UNLOCKED 0 -#define LM_ST_EXCLUSIVE 1 -#define LM_ST_DEFERRED 2 -#define LM_ST_SHARED 3 - -/* - * lm_lock() flags - * - * LM_FLAG_TRY - * Don't wait to acquire the lock if it can't be granted immediately. - * - * LM_FLAG_TRY_1CB - * Send one blocking callback if TRY is set and the lock is not granted. - * - * LM_FLAG_NOEXP - * GFS sets this flag on lock requests it makes while doing journal recovery. - * These special requests should not be blocked due to the recovery like - * ordinary locks would be. - * - * LM_FLAG_ANY - * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may - * also be granted in SHARED. The preferred state is whichever is compatible - * with other granted locks, or the specified state if no other locks exist. - * - * LM_FLAG_PRIORITY - * Override fairness considerations. Suppose a lock is held in a shared state - * and there is a pending request for the deferred state. A shared lock - * request with the priority flag would be allowed to bypass the deferred - * request and directly join the other shared lock. A shared lock request - * without the priority flag might be forced to wait until the deferred - * requested had acquired and released the lock. - */ - -#define LM_FLAG_TRY 0x00000001 -#define LM_FLAG_TRY_1CB 0x00000002 -#define LM_FLAG_NOEXP 0x00000004 -#define LM_FLAG_ANY 0x00000008 -#define LM_FLAG_PRIORITY 0x00000010 - -/* - * lm_lock() and lm_async_cb return flags - * - * LM_OUT_ST_MASK - * Masks the lower two bits of lock state in the returned value. - * - * LM_OUT_CACHEABLE - * The lock hasn't been released so GFS can continue to cache data for it. - * - * LM_OUT_CANCELED - * The lock request was canceled. - * - * LM_OUT_ASYNC - * The result of the request will be returned in an LM_CB_ASYNC callback. - * - * LM_OUT_CONV_DEADLK - * The lock request was canceled do to a conversion deadlock. - */ - -#define LM_OUT_ST_MASK 0x00000003 -#define LM_OUT_CANCELED 0x00000008 -#define LM_OUT_ASYNC 0x00000080 -#define LM_OUT_ERROR 0x00000100 - -/* - * lm_callback_t types - * - * LM_CB_NEED_E LM_CB_NEED_D LM_CB_NEED_S - * Blocking callback, a remote node is requesting the given lock in - * EXCLUSIVE, DEFERRED, or SHARED. - * - * LM_CB_NEED_RECOVERY - * The given journal needs to be recovered. - * - * LM_CB_ASYNC - * The given lock has been granted. - */ - -#define LM_CB_NEED_E 257 -#define LM_CB_NEED_D 258 -#define LM_CB_NEED_S 259 -#define LM_CB_NEED_RECOVERY 260 -#define LM_CB_ASYNC 262 - -/* - * lm_recovery_done() messages - */ - -#define LM_RD_GAVEUP 308 -#define LM_RD_SUCCESS 309 - - -struct lm_lockname { - u64 ln_number; - unsigned int ln_type; -}; - -#define lm_name_equal(name1, name2) \ - (((name1)->ln_number == (name2)->ln_number) && \ - ((name1)->ln_type == (name2)->ln_type)) \ - -struct lm_async_cb { - struct lm_lockname lc_name; - int lc_ret; -}; - -struct lm_lockstruct; - -struct lm_lockops { - const char *lm_proto_name; - - /* - * Mount/Unmount - */ - - int (*lm_mount) (char *table_name, char *host_data, - lm_callback_t cb, void *cb_data, - unsigned int min_lvb_size, int flags, - struct lm_lockstruct *lockstruct, - struct kobject *fskobj); - - void (*lm_others_may_mount) (void *lockspace); - - void (*lm_unmount) (void *lockspace); - - void (*lm_withdraw) (void *lockspace); - - /* - * Lock oriented operations - */ - - int (*lm_get_lock) (void *lockspace, struct lm_lockname *name, void **lockp); - - void (*lm_put_lock) (void *lock); - - unsigned int (*lm_lock) (void *lock, unsigned int cur_state, - unsigned int req_state, unsigned int flags); - - unsigned int (*lm_unlock) (void *lock, unsigned int cur_state); - - void (*lm_cancel) (void *lock); - - int (*lm_hold_lvb) (void *lock, char **lvbp); - void (*lm_unhold_lvb) (void *lock, char *lvb); - - /* - * Posix Lock oriented operations - */ - - int (*lm_plock_get) (void *lockspace, struct lm_lockname *name, - struct file *file, struct file_lock *fl); - - int (*lm_plock) (void *lockspace, struct lm_lockname *name, - struct file *file, int cmd, struct file_lock *fl); - - int (*lm_punlock) (void *lockspace, struct lm_lockname *name, - struct file *file, struct file_lock *fl); - - /* - * Client oriented operations - */ - - void (*lm_recovery_done) (void *lockspace, unsigned int jid, - unsigned int message); - - struct module *lm_owner; -}; - -/* - * lm_mount() return values - * - * ls_jid - the journal ID this node should use - * ls_first - this node is the first to mount the file system - * ls_lvb_size - size in bytes of lock value blocks - * ls_lockspace - lock module's context for this file system - * ls_ops - lock module's functions - * ls_flags - lock module features - */ - -struct lm_lockstruct { - unsigned int ls_jid; - unsigned int ls_first; - unsigned int ls_lvb_size; - void *ls_lockspace; - const struct lm_lockops *ls_ops; - int ls_flags; -}; - -/* - * Lock module bottom interface. A lock module makes itself available to GFS - * with these functions. - */ - -int gfs2_register_lockproto(const struct lm_lockops *proto); -void gfs2_unregister_lockproto(const struct lm_lockops *proto); - -/* - * Lock module top interface. GFS calls these functions when mounting or - * unmounting a file system. - */ - -int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data, - lm_callback_t cb, void *cb_data, - unsigned int min_lvb_size, int flags, - struct lm_lockstruct *lockstruct, - struct kobject *fskobj); - -void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct); - -void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct); - -#endif /* __LM_INTERFACE_DOT_H__ */ - diff --git a/include/linux/lmb.h b/include/linux/lmb.h index c46c89505da..2442e3f3d03 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h @@ -51,7 +51,7 @@ extern u64 __init lmb_alloc_base(u64 size, extern u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr); extern u64 __init lmb_phys_mem_size(void); -extern u64 __init lmb_end_of_DRAM(void); +extern u64 lmb_end_of_DRAM(void); extern void __init lmb_enforce_memory_limit(u64 memory_limit); extern int __init lmb_is_reserved(u64 addr); extern int lmb_find(struct lmb_property *res); diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index aa6fe7026de..c325b187966 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -195,7 +195,7 @@ extern struct svc_procedure nlmsvc_procedures4[]; extern int nlmsvc_grace_period; extern unsigned long nlmsvc_timeout; extern int nsm_use_hostnames; -extern int nsm_local_state; +extern u32 nsm_local_state; /* * Lockd client functions @@ -346,6 +346,7 @@ static inline int __nlm_cmp_addr4(const struct sockaddr *sap1, return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { @@ -353,6 +354,13 @@ static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); } +#else /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ +static inline int __nlm_cmp_addr6(const struct sockaddr *sap1, + const struct sockaddr *sap2) +{ + return 0; +} +#endif /* !(CONFIG_IPV6 || CONFIG_IPV6_MODULE) */ /* * Compare two host addresses diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 7dc5b6cb44c..d39ed1cc5fb 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -25,13 +25,13 @@ struct svc_rqst; #define NLM_MAXCOOKIELEN 32 #define NLM_MAXSTRLEN 1024 -#define nlm_granted __constant_htonl(NLM_LCK_GRANTED) -#define nlm_lck_denied __constant_htonl(NLM_LCK_DENIED) -#define nlm_lck_denied_nolocks __constant_htonl(NLM_LCK_DENIED_NOLOCKS) -#define nlm_lck_blocked __constant_htonl(NLM_LCK_BLOCKED) -#define nlm_lck_denied_grace_period __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD) +#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED) +#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED) +#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS) +#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED) +#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD) -#define nlm_drop_reply __constant_htonl(30000) +#define nlm_drop_reply cpu_to_be32(30000) /* Lock info passed via NLM */ struct nlm_lock { diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index 12bfe09de2b..7353821341e 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h @@ -15,11 +15,11 @@ #include <linux/lockd/xdr.h> /* error codes new to NLMv4 */ -#define nlm4_deadlock __constant_htonl(NLM_DEADLCK) -#define nlm4_rofs __constant_htonl(NLM_ROFS) -#define nlm4_stale_fh __constant_htonl(NLM_STALE_FH) -#define nlm4_fbig __constant_htonl(NLM_FBIG) -#define nlm4_failed __constant_htonl(NLM_FAILED) +#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK) +#define nlm4_rofs cpu_to_be32(NLM_ROFS) +#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH) +#define nlm4_fbig cpu_to_be32(NLM_FBIG) +#define nlm4_failed cpu_to_be32(NLM_FAILED) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 23bf02fb124..9ccf0e286b2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -20,43 +20,10 @@ struct lockdep_map; #include <linux/stacktrace.h> /* - * Lock-class usage-state bits: + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( */ -enum lock_usage_bit -{ - LOCK_USED = 0, - LOCK_USED_IN_HARDIRQ, - LOCK_USED_IN_SOFTIRQ, - LOCK_ENABLED_SOFTIRQS, - LOCK_ENABLED_HARDIRQS, - LOCK_USED_IN_HARDIRQ_READ, - LOCK_USED_IN_SOFTIRQ_READ, - LOCK_ENABLED_SOFTIRQS_READ, - LOCK_ENABLED_HARDIRQS_READ, - LOCK_USAGE_STATES -}; - -/* - * Usage-state bitmasks: - */ -#define LOCKF_USED (1 << LOCK_USED) -#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) -#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) -#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) -#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) - -#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) -#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) - -#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) -#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) -#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) -#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) - -#define LOCKF_ENABLED_IRQS_READ \ - (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) -#define LOCKF_USED_IN_IRQ_READ \ - (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) +#define XXX_LOCK_USAGE_STATES (1+3*4) #define MAX_LOCKDEP_SUBCLASSES 8UL @@ -97,7 +64,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct stack_trace usage_traces[LOCK_USAGE_STATES]; + struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; /* * These fields represent a directed graph of lock dependencies, @@ -182,6 +149,12 @@ struct lock_list { struct lock_class *class; struct stack_trace trace; int distance; + + /* + * The parent field is used to implement breadth-first search, and the + * bit 0 is reused to indicate if the lock has been accessed in BFS. + */ + struct lock_list *parent; }; /* @@ -241,10 +214,12 @@ struct held_lock { * interrupt context: */ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ - unsigned int trylock:1; + unsigned int trylock:1; /* 16 bits */ + unsigned int read:2; /* see lock_acquire() comment */ unsigned int check:2; /* see lock_acquire() comment */ unsigned int hardirqs_off:1; + unsigned int references:11; /* 32 bits */ }; /* @@ -291,6 +266,16 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, #define lockdep_set_subclass(lock, sub) \ lockdep_init_map(&(lock)->dep_map, #lock, \ (lock)->dep_map.key, sub) +/* + * Compare locking classes + */ +#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) + +static inline int lockdep_match_key(struct lockdep_map *lock, + struct lock_class_key *key) +{ + return lock->key == key; +} /* * Acquire a lock. @@ -314,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, extern void lock_release(struct lockdep_map *lock, int nested, unsigned long ip); +#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) + +extern int lock_is_held(struct lockdep_map *lock); + extern void lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, unsigned long ip); @@ -324,10 +313,16 @@ static inline void lock_set_subclass(struct lockdep_map *lock, lock_set_class(lock, lock->name, lock->key, subclass, ip); } -# define INIT_LOCKDEP .lockdep_recursion = 0, +extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); +extern void lockdep_clear_current_reclaim_state(void); +extern void lockdep_trace_alloc(gfp_t mask); + +# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) +#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) + #else /* !LOCKDEP */ static inline void lockdep_off(void) @@ -342,6 +337,9 @@ static inline void lockdep_on(void) # define lock_release(l, n, i) do { } while (0) # define lock_set_class(l, n, k, s, i) do { } while (0) # define lock_set_subclass(l, s, i) do { } while (0) +# define lockdep_set_current_reclaim_state(g) do { } while (0) +# define lockdep_clear_current_reclaim_state() do { } while (0) +# define lockdep_trace_alloc(g) do { } while (0) # define lockdep_init() do { } while (0) # define lockdep_info() do { } while (0) # define lockdep_init_map(lock, name, key, sub) \ @@ -352,6 +350,11 @@ static inline void lockdep_on(void) #define lockdep_set_class_and_subclass(lock, key, sub) \ do { (void)(key); } while (0) #define lockdep_set_subclass(lock, sub) do { } while (0) +/* + * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP + * case since the result is not well defined and the caller should rather + * #ifdef the call himself. + */ # define INIT_LOCKDEP # define lockdep_reset() do { debug_locks = 1; } while (0) @@ -364,6 +367,8 @@ struct lock_class_key { }; #define lockdep_depth(tsk) (0) +#define lockdep_assert_held(l) do { } while (0) + #endif /* !LOCKDEP */ #ifdef CONFIG_LOCK_STAT @@ -390,6 +395,23 @@ do { \ #endif /* CONFIG_LOCK_STAT */ +#ifdef CONFIG_LOCKDEP + +/* + * On lockdep we dont want the hand-coded irq-enable of + * _raw_*_lock_flags() code, because lockdep assumes + * that interrupts are not re-enabled during lock-acquire: + */ +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ + LOCK_CONTENDED((_lock), (try), (lock)) + +#else /* CONFIG_LOCKDEP */ + +#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ + lockfl((_lock), (flags)) + +#endif /* CONFIG_LOCKDEP */ + #ifdef CONFIG_GENERIC_HARDIRQS extern void early_init_irq_lock_class(void); #else diff --git a/include/linux/loop.h b/include/linux/loop.h index 6ffd6db5bb0..66c194e2d9b 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -56,8 +56,7 @@ struct loop_device { gfp_t old_gfp_mask; spinlock_t lo_lock; - struct bio *lo_bio; - struct bio *lo_biotail; + struct bio_list lo_bio_list; int lo_state; struct mutex lo_ctl_mutex; struct task_struct *lo_thread; @@ -160,5 +159,6 @@ int loop_unregister_transfer(int number); #define LOOP_SET_STATUS64 0x4C04 #define LOOP_GET_STATUS64 0x4C05 #define LOOP_CHANGE_FD 0x4C06 +#define LOOP_SET_CAPACITY 0x4C07 #endif diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h new file mode 100644 index 00000000000..190c3785487 --- /dev/null +++ b/include/linux/lsm_audit.h @@ -0,0 +1,117 @@ +/* + * Common LSM logging functions + * Heavily borrowed from selinux/avc.h + * + * Author : Etienne BASSET <etienne.basset@ensta.org> + * + * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil> + * All BUGS to : Etienne BASSET <etienne.basset@ensta.org> + */ +#ifndef _LSM_COMMON_LOGGING_ +#define _LSM_COMMON_LOGGING_ + +#include <linux/stddef.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kdev_t.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/audit.h> +#include <linux/in6.h> +#include <linux/path.h> +#include <linux/key.h> +#include <linux/skbuff.h> +#include <asm/system.h> + + +/* Auxiliary data to use in generating the audit record. */ +struct common_audit_data { + char type; +#define LSM_AUDIT_DATA_FS 1 +#define LSM_AUDIT_DATA_NET 2 +#define LSM_AUDIT_DATA_CAP 3 +#define LSM_AUDIT_DATA_IPC 4 +#define LSM_AUDIT_DATA_TASK 5 +#define LSM_AUDIT_DATA_KEY 6 +#define LSM_AUDIT_NO_AUDIT 7 + struct task_struct *tsk; + union { + struct { + struct path path; + struct inode *inode; + } fs; + struct { + int netif; + struct sock *sk; + u16 family; + __be16 dport; + __be16 sport; + union { + struct { + __be32 daddr; + __be32 saddr; + } v4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } v6; + } fam; + } net; + int cap; + int ipc_id; + struct task_struct *tsk; +#ifdef CONFIG_KEYS + struct { + key_serial_t key; + char *key_desc; + } key_struct; +#endif + } u; + /* this union contains LSM specific data */ + union { +#ifdef CONFIG_SECURITY_SMACK + /* SMACK data */ + struct smack_audit_data { + const char *function; + char *subject; + char *object; + char *request; + int result; + } smack_audit_data; +#endif +#ifdef CONFIG_SECURITY_SELINUX + /* SELinux data */ + struct { + u32 ssid; + u32 tsid; + u16 tclass; + u32 requested; + u32 audited; + u32 denied; + struct av_decision *avd; + int result; + } selinux_audit_data; +#endif + }; + /* these callback will be implemented by a specific LSM */ + void (*lsm_pre_audit)(struct audit_buffer *, void *); + void (*lsm_post_audit)(struct audit_buffer *, void *); +}; + +#define v4info fam.v4 +#define v6info fam.v6 + +int ipv4_skb_to_auditdata(struct sk_buff *skb, + struct common_audit_data *ad, u8 *proto); + +int ipv6_skb_to_auditdata(struct sk_buff *skb, + struct common_audit_data *ad, u8 *proto); + +/* Initialize an LSM audit data structure. */ +#define COMMON_AUDIT_DATA_INIT(_d, _t) \ + { memset((_d), 0, sizeof(struct common_audit_data)); \ + (_d)->type = LSM_AUDIT_DATA_##_t; } + +void common_lsm_audit(struct common_audit_data *a); + +#endif diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h index 6b71d2dce50..41d1eeb9b3b 100644 --- a/include/linux/mISDNdsp.h +++ b/include/linux/mISDNdsp.h @@ -12,7 +12,8 @@ struct mISDN_dsp_element { void *(*new)(const char *arg); void (*free)(void *p); void (*process_tx)(void *p, unsigned char *data, int len); - void (*process_rx)(void *p, unsigned char *data, int len); + void (*process_rx)(void *p, unsigned char *data, int len, + unsigned int txlen); int num_args; struct mISDN_dsp_element_arg *args; @@ -24,6 +25,7 @@ extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem); struct dsp_features { int hfc_id; /* unique id to identify the chip (or -1) */ int hfc_dtmf; /* set if HFCmulti card supports dtmf */ + int hfc_conf; /* set if HFCmulti card supports conferences */ int hfc_loops; /* set if card supports tone loops */ int hfc_echocanhw; /* set if card supports echocancelation*/ int pcm_id; /* unique id to identify the pcm bus (or -1) */ diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h index 97ffdc1d344..4af841408fb 100644 --- a/include/linux/mISDNhw.h +++ b/include/linux/mISDNhw.h @@ -89,11 +89,6 @@ struct dchannel { void (*phfunc) (struct dchannel *); u_int state; void *l1; - /* HW access */ - u_char (*read_reg) (void *, u_char); - void (*write_reg) (void *, u_char, u_char); - void (*read_fifo) (void *, u_char *, int); - void (*write_fifo) (void *, u_char *, int); void *hw; int slot; /* multiport card channel slot */ struct timer_list timer; @@ -151,11 +146,6 @@ struct bchannel { u_long Flags; struct work_struct workq; u_int state; - /* HW access */ - u_char (*read_reg) (void *, u_char); - void (*write_reg) (void *, u_char, u_char); - void (*read_fifo) (void *, u_char *, int); - void (*write_fifo) (void *, u_char *, int); void *hw; int slot; /* multiport card channel slot */ struct timer_list timer; @@ -178,6 +168,7 @@ struct bchannel { extern int mISDN_initdchannel(struct dchannel *, int, void *); extern int mISDN_initbchannel(struct bchannel *, int); extern int mISDN_freedchannel(struct dchannel *); +extern void mISDN_clear_bchannel(struct bchannel *); extern int mISDN_freebchannel(struct bchannel *); extern void queue_ch_frame(struct mISDNchannel *, u_int, int, struct sk_buff *); @@ -185,7 +176,7 @@ extern int dchannel_senddata(struct dchannel *, struct sk_buff *); extern int bchannel_senddata(struct bchannel *, struct sk_buff *); extern void recv_Dchannel(struct dchannel *); extern void recv_Echannel(struct dchannel *, struct dchannel *); -extern void recv_Bchannel(struct bchannel *); +extern void recv_Bchannel(struct bchannel *, unsigned int id); extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); extern void confirm_Bsend(struct bchannel *bch); diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 5da3d95b27f..536ca12442c 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -37,7 +37,7 @@ */ #define MISDN_MAJOR_VERSION 1 #define MISDN_MINOR_VERSION 1 -#define MISDN_RELEASE 20 +#define MISDN_RELEASE 21 /* primitives for information exchange * generell format @@ -153,6 +153,18 @@ #define HFC_VOL_CHANGE_RX 0x2602 #define HFC_SPL_LOOP_ON 0x2603 #define HFC_SPL_LOOP_OFF 0x2604 +/* for T30 FAX and analog modem */ +#define HW_MOD_FRM 0x4000 +#define HW_MOD_FRH 0x4001 +#define HW_MOD_FTM 0x4002 +#define HW_MOD_FTH 0x4003 +#define HW_MOD_FTS 0x4004 +#define HW_MOD_CONNECT 0x4010 +#define HW_MOD_OK 0x4011 +#define HW_MOD_NOCARR 0x4012 +#define HW_MOD_FCERROR 0x4013 +#define HW_MOD_READY 0x4014 +#define HW_MOD_LASTDATA 0x4015 /* DSP_TONE_PATT_ON parameter */ #define TONE_OFF 0x0000 @@ -224,11 +236,14 @@ #define ISDN_P_B_L2DTMF 0x24 #define ISDN_P_B_L2DSP 0x25 #define ISDN_P_B_L2DSPHDLC 0x26 +#define ISDN_P_B_T30_FAX 0x27 +#define ISDN_P_B_MODEM_ASYNC 0x28 #define OPTION_L2_PMX 1 #define OPTION_L2_PTP 2 #define OPTION_L2_FIXEDTEI 3 #define OPTION_L2_CLEANUP 4 +#define OPTION_L1_HOLD 5 /* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ #define MISDN_MAX_IDLEN 20 @@ -291,19 +306,19 @@ struct mISDN_devrename { /* MPH_INFORMATION_REQ payload */ struct ph_info_ch { - __u32 protocol; - __u64 Flags; + __u32 protocol; + __u64 Flags; }; struct ph_info_dch { - struct ph_info_ch ch; - __u16 state; - __u16 num_bch; + struct ph_info_ch ch; + __u16 state; + __u16 num_bch; }; struct ph_info { - struct ph_info_dch dch; - struct ph_info_ch bch[]; + struct ph_info_dch dch; + struct ph_info_ch bch[]; }; /* timer device ioctl */ @@ -317,6 +332,7 @@ struct ph_info { #define IMCTRLREQ _IOR('I', 69, int) #define IMCLEAR_L2 _IOR('I', 70, int) #define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename) +#define IMHOLD_L1 _IOR('I', 72, int) static inline int test_channelmap(u_int nr, u_char *map) @@ -362,7 +378,8 @@ clear_channelmap(u_int nr, u_char *map) #define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 #define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 #define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 - +#define MISDN_CTRL_HFC_WD_INIT 0x4009 +#define MISDN_CTRL_HFC_WD_RESET 0x400A /* socket options */ #define MISDN_TIME_STAMP 0x0001 diff --git a/include/linux/magic.h b/include/linux/magic.h index 0b4df7eba85..1923327b986 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h @@ -6,9 +6,12 @@ #define AFS_SUPER_MAGIC 0x5346414F #define AUTOFS_SUPER_MAGIC 0x0187 #define CODA_SUPER_MAGIC 0x73757245 +#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ +#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */ #define DEBUGFS_MAGIC 0x64626720 #define SYSFS_MAGIC 0x62656572 #define SECURITYFS_MAGIC 0x73636673 +#define SELINUX_MAGIC 0xf97cff8c #define TMPFS_MAGIC 0x01021994 #define SQUASHFS_MAGIC 0x73717368 #define EFS_SUPER_MAGIC 0x414A53 @@ -49,4 +52,5 @@ #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA +#define STACK_END_MAGIC 0x57AC6E9D #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/linux/major.h b/include/linux/major.h index 88249452b93..6a8ca98c9a9 100644 --- a/include/linux/major.h +++ b/include/linux/major.h @@ -145,6 +145,7 @@ #define UNIX98_PTY_MAJOR_COUNT 8 #define UNIX98_PTY_SLAVE_MAJOR (UNIX98_PTY_MASTER_MAJOR+UNIX98_PTY_MAJOR_COUNT) +#define DRBD_MAJOR 147 #define RTF_MAJOR 150 #define RAW_MAJOR 162 @@ -171,5 +172,6 @@ #define VIOTAPE_MAJOR 230 #define BLOCK_EXT_MAJOR 259 +#define SCSI_OSD_MAJOR 260 /* open-osd's OSD scsi device */ #endif diff --git a/include/linux/maple.h b/include/linux/maple.h index c23d3f51ba4..d9a51b9b330 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -8,33 +8,49 @@ extern struct bus_type maple_bus_type; /* Maple Bus command and response codes */ enum maple_code { - MAPLE_RESPONSE_FILEERR = -5, - MAPLE_RESPONSE_AGAIN = -4, /* request should be retransmitted */ - MAPLE_RESPONSE_BADCMD = -3, - MAPLE_RESPONSE_BADFUNC = -2, - MAPLE_RESPONSE_NONE = -1, /* unit didn't respond at all */ - MAPLE_COMMAND_DEVINFO = 1, - MAPLE_COMMAND_ALLINFO = 2, - MAPLE_COMMAND_RESET = 3, - MAPLE_COMMAND_KILL = 4, - MAPLE_RESPONSE_DEVINFO = 5, - MAPLE_RESPONSE_ALLINFO = 6, - MAPLE_RESPONSE_OK = 7, - MAPLE_RESPONSE_DATATRF = 8, - MAPLE_COMMAND_GETCOND = 9, - MAPLE_COMMAND_GETMINFO = 10, - MAPLE_COMMAND_BREAD = 11, - MAPLE_COMMAND_BWRITE = 12, - MAPLE_COMMAND_SETCOND = 14 + MAPLE_RESPONSE_FILEERR = -5, + MAPLE_RESPONSE_AGAIN, /* retransmit */ + MAPLE_RESPONSE_BADCMD, + MAPLE_RESPONSE_BADFUNC, + MAPLE_RESPONSE_NONE, /* unit didn't respond*/ + MAPLE_COMMAND_DEVINFO = 1, + MAPLE_COMMAND_ALLINFO, + MAPLE_COMMAND_RESET, + MAPLE_COMMAND_KILL, + MAPLE_RESPONSE_DEVINFO, + MAPLE_RESPONSE_ALLINFO, + MAPLE_RESPONSE_OK, + MAPLE_RESPONSE_DATATRF, + MAPLE_COMMAND_GETCOND, + MAPLE_COMMAND_GETMINFO, + MAPLE_COMMAND_BREAD, + MAPLE_COMMAND_BWRITE, + MAPLE_COMMAND_BSYNC, + MAPLE_COMMAND_SETCOND, + MAPLE_COMMAND_MICCONTROL +}; + +enum maple_file_errors { + MAPLE_FILEERR_INVALID_PARTITION = 0x01000000, + MAPLE_FILEERR_PHASE_ERROR = 0x02000000, + MAPLE_FILEERR_INVALID_BLOCK = 0x04000000, + MAPLE_FILEERR_WRITE_ERROR = 0x08000000, + MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000, + MAPLE_FILEERR_BAD_CRC = 0x20000000 +}; + +struct maple_buffer { + char bufx[0x400]; + void *buf; }; struct mapleq { struct list_head list; struct maple_device *dev; - void *sendbuf, *recvbuf, *recvbufdcsp; + struct maple_buffer *recvbuf; + void *sendbuf, *recvbuf_p2; unsigned char length; enum maple_code command; - struct mutex mutex; }; struct maple_devinfo { @@ -52,11 +68,15 @@ struct maple_device { struct maple_driver *driver; struct mapleq *mq; void (*callback) (struct mapleq * mq); + void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf); + int (*can_unload)(struct maple_device *mdev); unsigned long when, interval, function; struct maple_devinfo devinfo; unsigned char port, unit; char product_name[32]; char product_licence[64]; + atomic_t busy; + wait_queue_head_t maple_wait; struct device dev; }; @@ -72,7 +92,7 @@ void maple_getcond_callback(struct maple_device *dev, int maple_driver_register(struct maple_driver *); void maple_driver_unregister(struct maple_driver *); -int maple_add_packet_sleeps(struct maple_device *mdev, u32 function, +int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, u32 length, void *data); void maple_clear_dev(struct maple_device *mdev); diff --git a/include/linux/matroxfb.h b/include/linux/matroxfb.h index 404f678e734..2203121a43e 100644 --- a/include/linux/matroxfb.h +++ b/include/linux/matroxfb.h @@ -37,7 +37,7 @@ enum matroxfb_ctrl_id { MATROXFB_CID_LAST }; -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t) +#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) #endif diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h new file mode 100644 index 00000000000..ad97b06cf93 --- /dev/null +++ b/include/linux/max17040_battery.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2009 Samsung Electronics + * Minkyu Kang <mk7.kang@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAX17040_BATTERY_H_ +#define __MAX17040_BATTERY_H_ + +struct max17040_platform_data { + int (*battery_online)(void); + int (*charger_online)(void); + int (*charger_enable)(void); +}; + +#endif diff --git a/include/linux/mdio.h b/include/linux/mdio.h new file mode 100644 index 00000000000..c779b49a1fd --- /dev/null +++ b/include/linux/mdio.h @@ -0,0 +1,356 @@ +/* + * linux/mdio.h: definitions for MDIO (clause 45) transceivers + * Copyright 2006-2009 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef __LINUX_MDIO_H__ +#define __LINUX_MDIO_H__ + +#include <linux/mii.h> + +/* MDIO Manageable Devices (MMDs). */ +#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment/ + * Physical Medium Dependent */ +#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */ +#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */ +#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */ +#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */ +#define MDIO_MMD_TC 6 /* Transmission Convergence */ +#define MDIO_MMD_AN 7 /* Auto-Negotiation */ +#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */ +#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */ +#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */ + +/* Generic MDIO registers. */ +#define MDIO_CTRL1 MII_BMCR +#define MDIO_STAT1 MII_BMSR +#define MDIO_DEVID1 MII_PHYSID1 +#define MDIO_DEVID2 MII_PHYSID2 +#define MDIO_SPEED 4 /* Speed ability */ +#define MDIO_DEVS1 5 /* Devices in package */ +#define MDIO_DEVS2 6 +#define MDIO_CTRL2 7 /* 10G control 2 */ +#define MDIO_STAT2 8 /* 10G status 2 */ +#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */ +#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */ +#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */ +#define MDIO_PKGID1 14 /* Package identifier */ +#define MDIO_PKGID2 15 +#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ +#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ +#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ + +/* Media-dependent registers. */ +#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */ +#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A. + * Lanes B-D are numbered 134-136. */ +#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */ +#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ +#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ +#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ + +/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ +#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ +#define MDIO_PMA_LASI_TXCTRL 0x9001 /* TX_ALARM control */ +#define MDIO_PMA_LASI_CTRL 0x9002 /* LASI control */ +#define MDIO_PMA_LASI_RXSTAT 0x9003 /* RX_ALARM status */ +#define MDIO_PMA_LASI_TXSTAT 0x9004 /* TX_ALARM status */ +#define MDIO_PMA_LASI_STAT 0x9005 /* LASI status */ + +/* Control register 1. */ +/* Enable extended speed selection */ +#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100) +/* All speed selection bits */ +#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c) +#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX +#define MDIO_CTRL1_LPOWER BMCR_PDOWN +#define MDIO_CTRL1_RESET BMCR_RESET +#define MDIO_PMA_CTRL1_LOOPBACK 0x0001 +#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000 +#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100 +#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART +#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE +#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */ + +/* 10 Gb/s */ +#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00) +/* 10PASS-TS/2BASE-TL */ +#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04) + +/* Status register 1. */ +#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */ +#define MDIO_STAT1_LSTATUS BMSR_LSTATUS +#define MDIO_STAT1_FAULT 0x0080 /* Fault */ +#define MDIO_AN_STAT1_LPABLE 0x0001 /* Link partner AN ability */ +#define MDIO_AN_STAT1_ABLE BMSR_ANEGCAPABLE +#define MDIO_AN_STAT1_RFAULT BMSR_RFAULT +#define MDIO_AN_STAT1_COMPLETE BMSR_ANEGCOMPLETE +#define MDIO_AN_STAT1_PAGE 0x0040 /* Page received */ +#define MDIO_AN_STAT1_XNP 0x0080 /* Extended next page status */ + +/* Speed register. */ +#define MDIO_SPEED_10G 0x0001 /* 10G capable */ +#define MDIO_PMA_SPEED_2B 0x0002 /* 2BASE-TL capable */ +#define MDIO_PMA_SPEED_10P 0x0004 /* 10PASS-TS capable */ +#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */ +#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */ +#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */ +#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */ + +/* Device present registers. */ +#define MDIO_DEVS_PRESENT(devad) (1 << (devad)) +#define MDIO_DEVS_PMAPMD MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD) +#define MDIO_DEVS_WIS MDIO_DEVS_PRESENT(MDIO_MMD_WIS) +#define MDIO_DEVS_PCS MDIO_DEVS_PRESENT(MDIO_MMD_PCS) +#define MDIO_DEVS_PHYXS MDIO_DEVS_PRESENT(MDIO_MMD_PHYXS) +#define MDIO_DEVS_DTEXS MDIO_DEVS_PRESENT(MDIO_MMD_DTEXS) +#define MDIO_DEVS_TC MDIO_DEVS_PRESENT(MDIO_MMD_TC) +#define MDIO_DEVS_AN MDIO_DEVS_PRESENT(MDIO_MMD_AN) +#define MDIO_DEVS_C22EXT MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT) + +/* Control register 2. */ +#define MDIO_PMA_CTRL2_TYPE 0x000f /* PMA/PMD type selection */ +#define MDIO_PMA_CTRL2_10GBCX4 0x0000 /* 10GBASE-CX4 type */ +#define MDIO_PMA_CTRL2_10GBEW 0x0001 /* 10GBASE-EW type */ +#define MDIO_PMA_CTRL2_10GBLW 0x0002 /* 10GBASE-LW type */ +#define MDIO_PMA_CTRL2_10GBSW 0x0003 /* 10GBASE-SW type */ +#define MDIO_PMA_CTRL2_10GBLX4 0x0004 /* 10GBASE-LX4 type */ +#define MDIO_PMA_CTRL2_10GBER 0x0005 /* 10GBASE-ER type */ +#define MDIO_PMA_CTRL2_10GBLR 0x0006 /* 10GBASE-LR type */ +#define MDIO_PMA_CTRL2_10GBSR 0x0007 /* 10GBASE-SR type */ +#define MDIO_PMA_CTRL2_10GBLRM 0x0008 /* 10GBASE-LRM type */ +#define MDIO_PMA_CTRL2_10GBT 0x0009 /* 10GBASE-T type */ +#define MDIO_PMA_CTRL2_10GBKX4 0x000a /* 10GBASE-KX4 type */ +#define MDIO_PMA_CTRL2_10GBKR 0x000b /* 10GBASE-KR type */ +#define MDIO_PMA_CTRL2_1000BT 0x000c /* 1000BASE-T type */ +#define MDIO_PMA_CTRL2_1000BKX 0x000d /* 1000BASE-KX type */ +#define MDIO_PMA_CTRL2_100BTX 0x000e /* 100BASE-TX type */ +#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */ +#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */ +#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */ +#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */ +#define MDIO_PCS_CTRL2_10GBW 0x0002 /* 10GBASE-W type */ +#define MDIO_PCS_CTRL2_10GBT 0x0003 /* 10GBASE-T type */ + +/* Status register 2. */ +#define MDIO_STAT2_RXFAULT 0x0400 /* Receive fault */ +#define MDIO_STAT2_TXFAULT 0x0800 /* Transmit fault */ +#define MDIO_STAT2_DEVPRST 0xc000 /* Device present */ +#define MDIO_STAT2_DEVPRST_VAL 0x8000 /* Device present value */ +#define MDIO_PMA_STAT2_LBABLE 0x0001 /* PMA loopback ability */ +#define MDIO_PMA_STAT2_10GBEW 0x0002 /* 10GBASE-EW ability */ +#define MDIO_PMA_STAT2_10GBLW 0x0004 /* 10GBASE-LW ability */ +#define MDIO_PMA_STAT2_10GBSW 0x0008 /* 10GBASE-SW ability */ +#define MDIO_PMA_STAT2_10GBLX4 0x0010 /* 10GBASE-LX4 ability */ +#define MDIO_PMA_STAT2_10GBER 0x0020 /* 10GBASE-ER ability */ +#define MDIO_PMA_STAT2_10GBLR 0x0040 /* 10GBASE-LR ability */ +#define MDIO_PMA_STAT2_10GBSR 0x0080 /* 10GBASE-SR ability */ +#define MDIO_PMD_STAT2_TXDISAB 0x0100 /* PMD TX disable ability */ +#define MDIO_PMA_STAT2_EXTABLE 0x0200 /* Extended abilities */ +#define MDIO_PMA_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */ +#define MDIO_PMA_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */ +#define MDIO_PCS_STAT2_10GBR 0x0001 /* 10GBASE-R capable */ +#define MDIO_PCS_STAT2_10GBX 0x0002 /* 10GBASE-X capable */ +#define MDIO_PCS_STAT2_10GBW 0x0004 /* 10GBASE-W capable */ +#define MDIO_PCS_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */ +#define MDIO_PCS_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */ + +/* Transmit disable register. */ +#define MDIO_PMD_TXDIS_GLOBAL 0x0001 /* Global PMD TX disable */ +#define MDIO_PMD_TXDIS_0 0x0002 /* PMD TX disable 0 */ +#define MDIO_PMD_TXDIS_1 0x0004 /* PMD TX disable 1 */ +#define MDIO_PMD_TXDIS_2 0x0008 /* PMD TX disable 2 */ +#define MDIO_PMD_TXDIS_3 0x0010 /* PMD TX disable 3 */ + +/* Receive signal detect register. */ +#define MDIO_PMD_RXDET_GLOBAL 0x0001 /* Global PMD RX signal detect */ +#define MDIO_PMD_RXDET_0 0x0002 /* PMD RX signal detect 0 */ +#define MDIO_PMD_RXDET_1 0x0004 /* PMD RX signal detect 1 */ +#define MDIO_PMD_RXDET_2 0x0008 /* PMD RX signal detect 2 */ +#define MDIO_PMD_RXDET_3 0x0010 /* PMD RX signal detect 3 */ + +/* Extended abilities register. */ +#define MDIO_PMA_EXTABLE_10GCX4 0x0001 /* 10GBASE-CX4 ability */ +#define MDIO_PMA_EXTABLE_10GBLRM 0x0002 /* 10GBASE-LRM ability */ +#define MDIO_PMA_EXTABLE_10GBT 0x0004 /* 10GBASE-T ability */ +#define MDIO_PMA_EXTABLE_10GBKX4 0x0008 /* 10GBASE-KX4 ability */ +#define MDIO_PMA_EXTABLE_10GBKR 0x0010 /* 10GBASE-KR ability */ +#define MDIO_PMA_EXTABLE_1000BT 0x0020 /* 1000BASE-T ability */ +#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */ +#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */ +#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */ + +/* PHY XGXS lane state register. */ +#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 +#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 +#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 +#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 +#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 + +/* PMA 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_SWAPPOL_ABNX 0x0001 /* Pair A/B uncrossed */ +#define MDIO_PMA_10GBT_SWAPPOL_CDNX 0x0002 /* Pair C/D uncrossed */ +#define MDIO_PMA_10GBT_SWAPPOL_AREV 0x0100 /* Pair A polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_BREV 0x0200 /* Pair B polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_CREV 0x0400 /* Pair C polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_DREV 0x0800 /* Pair D polarity reversed */ + +/* PMA 10GBASE-T TX power register. */ +#define MDIO_PMA_10GBT_TXPWR_SHORT 0x0001 /* Short-reach mode */ + +/* PMA 10GBASE-T SNR registers. */ +/* Value is SNR margin in dB, clamped to range [-127, 127], plus 0x8000. */ +#define MDIO_PMA_10GBT_SNR_BIAS 0x8000 +#define MDIO_PMA_10GBT_SNR_MAX 127 + +/* PMA 10GBASE-R FEC ability register. */ +#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */ +#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */ + +/* PCS 10GBASE-R/-T status register 1. */ +#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */ + +/* PCS 10GBASE-R/-T status register 2. */ +#define MDIO_PCS_10GBRT_STAT2_ERR 0x00ff +#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00 + +/* AN 10GBASE-T control register. */ +#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */ + +/* AN 10GBASE-T status register. */ +#define MDIO_AN_10GBT_STAT_LPTRR 0x0200 /* LP training reset req. */ +#define MDIO_AN_10GBT_STAT_LPLTABLE 0x0400 /* LP loop timing ability */ +#define MDIO_AN_10GBT_STAT_LP10G 0x0800 /* LP is 10GBT capable */ +#define MDIO_AN_10GBT_STAT_REMOK 0x1000 /* Remote OK */ +#define MDIO_AN_10GBT_STAT_LOCOK 0x2000 /* Local OK */ +#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */ +#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */ + +/* LASI RX_ALARM control/status registers. */ +#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */ +#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */ +#define MDIO_PMA_LASI_RX_PMALFLT 0x0010 /* PMA/PMD RX local fault */ +#define MDIO_PMA_LASI_RX_OPTICPOWERFLT 0x0020 /* RX optical power fault */ +#define MDIO_PMA_LASI_RX_WISLFLT 0x0200 /* WIS local fault */ + +/* LASI TX_ALARM control/status registers. */ +#define MDIO_PMA_LASI_TX_PHYXSLFLT 0x0001 /* PHY XS TX local fault */ +#define MDIO_PMA_LASI_TX_PCSLFLT 0x0008 /* PCS TX local fault */ +#define MDIO_PMA_LASI_TX_PMALFLT 0x0010 /* PMA/PMD TX local fault */ +#define MDIO_PMA_LASI_TX_LASERPOWERFLT 0x0080 /* Laser output power fault */ +#define MDIO_PMA_LASI_TX_LASERTEMPFLT 0x0100 /* Laser temperature fault */ +#define MDIO_PMA_LASI_TX_LASERBICURRFLT 0x0200 /* Laser bias current fault */ + +/* LASI control/status registers. */ +#define MDIO_PMA_LASI_LSALARM 0x0001 /* LS_ALARM enable/status */ +#define MDIO_PMA_LASI_TXALARM 0x0002 /* TX_ALARM enable/status */ +#define MDIO_PMA_LASI_RXALARM 0x0004 /* RX_ALARM enable/status */ + +/* Mapping between MDIO PRTAD/DEVAD and mii_ioctl_data::phy_id */ + +#define MDIO_PHY_ID_C45 0x8000 +#define MDIO_PHY_ID_PRTAD 0x03e0 +#define MDIO_PHY_ID_DEVAD 0x001f +#define MDIO_PHY_ID_C45_MASK \ + (MDIO_PHY_ID_C45 | MDIO_PHY_ID_PRTAD | MDIO_PHY_ID_DEVAD) + +static inline __u16 mdio_phy_id_c45(int prtad, int devad) +{ + return MDIO_PHY_ID_C45 | (prtad << 5) | devad; +} + +static inline bool mdio_phy_id_is_c45(int phy_id) +{ + return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK); +} + +static inline __u16 mdio_phy_id_prtad(int phy_id) +{ + return (phy_id & MDIO_PHY_ID_PRTAD) >> 5; +} + +static inline __u16 mdio_phy_id_devad(int phy_id) +{ + return phy_id & MDIO_PHY_ID_DEVAD; +} + +#define MDIO_SUPPORTS_C22 1 +#define MDIO_SUPPORTS_C45 2 + +#ifdef __KERNEL__ + +/** + * struct mdio_if_info - Ethernet controller MDIO interface + * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown) + * @mmds: Mask of MMDs expected to be present in the PHY. This must be + * non-zero unless @prtad = %MDIO_PRTAD_NONE. + * @mode_support: MDIO modes supported. If %MDIO_SUPPORTS_C22 is set then + * MII register access will be passed through with @devad = + * %MDIO_DEVAD_NONE. If %MDIO_EMULATE_C22 is set then access to + * commonly used clause 22 registers will be translated into + * clause 45 registers. + * @dev: Net device structure + * @mdio_read: Register read function; returns value or negative error code + * @mdio_write: Register write function; returns 0 or negative error code + */ +struct mdio_if_info { + int prtad; + u32 mmds; + unsigned mode_support; + + struct net_device *dev; + int (*mdio_read)(struct net_device *dev, int prtad, int devad, + u16 addr); + int (*mdio_write)(struct net_device *dev, int prtad, int devad, + u16 addr, u16 val); +}; + +#define MDIO_PRTAD_NONE (-1) +#define MDIO_DEVAD_NONE (-1) +#define MDIO_EMULATE_C22 4 + +struct ethtool_cmd; +struct ethtool_pauseparam; +extern int mdio45_probe(struct mdio_if_info *mdio, int prtad); +extern int mdio_set_flag(const struct mdio_if_info *mdio, + int prtad, int devad, u16 addr, int mask, + bool sense); +extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds); +extern int mdio45_nway_restart(const struct mdio_if_info *mdio); +extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, + struct ethtool_cmd *ecmd, + u32 npage_adv, u32 npage_lpa); +extern void +mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio, + const struct ethtool_pauseparam *ecmd); + +/** + * mdio45_ethtool_gset - get settings for ETHTOOL_GSET + * @mdio: MDIO interface + * @ecmd: Ethtool request structure + * + * Since the CSRs for auto-negotiation using next pages are not fully + * standardised, this function does not attempt to decode them. Use + * mdio45_ethtool_gset_npage() to specify advertisement bits from next + * pages. + */ +static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, + struct ethtool_cmd *ecmd) +{ + mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); +} + +extern int mdio_mii_ioctl(const struct mdio_if_info *mdio, + struct mii_ioctl_data *mii_data, int cmd); + +#endif /* __KERNEL__ */ +#endif /* __LINUX_MDIO_H__ */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 326f45c8653..e46a0734ab6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -56,7 +56,7 @@ extern void mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to); extern void mem_cgroup_uncharge_page(struct page *page); extern void mem_cgroup_uncharge_cache_page(struct page *page); -extern int mem_cgroup_shrink_usage(struct page *page, +extern int mem_cgroup_shmem_charge_fallback(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, @@ -75,7 +75,7 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) { struct mem_cgroup *mem; rcu_read_lock(); - mem = mem_cgroup_from_task((mm)->owner); + mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); rcu_read_unlock(); return cgroup == mem; } @@ -88,15 +88,13 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem, /* * For memory reclaim. */ -extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); -extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem); - extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority); extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority); int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); +int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, enum lru_list lru); @@ -104,6 +102,8 @@ struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone); struct zone_reclaim_stat* mem_cgroup_get_reclaim_stat_from_page(struct page *page); +extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, + struct task_struct *p); #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP extern int do_swap_account; @@ -117,7 +117,7 @@ static inline bool mem_cgroup_disabled(void) } extern bool mem_cgroup_oom_called(struct task_struct *task); - +void mem_cgroup_update_mapped_file_stat(struct page *page, int val); #else /* CONFIG_CGROUP_MEM_RES_CTLR */ struct mem_cgroup; @@ -156,7 +156,7 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) { } -static inline int mem_cgroup_shrink_usage(struct page *page, +static inline int mem_cgroup_shmem_charge_fallback(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { return 0; @@ -209,16 +209,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, { } -static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) -{ - return 0; -} - -static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) -{ - return 0; -} - static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) { return 0; @@ -250,6 +240,12 @@ mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg) return 1; } +static inline int +mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) +{ + return 1; +} + static inline unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, enum lru_list lru) @@ -270,6 +266,16 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) return NULL; } +static inline void +mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) +{ +} + +static inline void mem_cgroup_update_mapped_file_stat(struct page *page, + int val) +{ +} + #endif /* CONFIG_CGROUP_MEM_CONT */ #endif /* _LINUX_MEMCONTROL_H */ diff --git a/include/linux/memory.h b/include/linux/memory.h index 3fdc10806d3..37fa19b34ef 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -99,4 +99,21 @@ enum mem_add_context { BOOT, HOTPLUG }; #define hotplug_memory_notifier(fn, pri) do { } while (0) #endif +/* + * 'struct memory_accessor' is a generic interface to provide + * in-kernel access to persistent memory such as i2c or SPI EEPROMs + */ +struct memory_accessor { + ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset, + size_t count); + ssize_t (*write)(struct memory_accessor *, const char *buf, + off_t offset, size_t count); +}; + +/* + * Kernel text modification mutex, used for code patching. Users of this lock + * can sleep. + */ +extern struct mutex text_mutex; + #endif /* _LINUX_MEMORY_H_ */ diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h new file mode 100644 index 00000000000..7a3f316e384 --- /dev/null +++ b/include/linux/mfd/ab3100.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * AB3100 core access functions + * Author: Linus Walleij <linus.walleij@stericsson.com> + */ + +#include <linux/device.h> + +#ifndef MFD_AB3100_H +#define MFD_AB3100_H + +#define ABUNKNOWN 0 +#define AB3000 1 +#define AB3100 2 + +/* + * AB3100, EVENTA1, A2 and A3 event register flags + * these are catenated into a single 32-bit flag in the code + * for event notification broadcasts. + */ +#define AB3100_EVENTA1_ONSWA (0x01<<16) +#define AB3100_EVENTA1_ONSWB (0x02<<16) +#define AB3100_EVENTA1_ONSWC (0x04<<16) +#define AB3100_EVENTA1_DCIO (0x08<<16) +#define AB3100_EVENTA1_OVER_TEMP (0x10<<16) +#define AB3100_EVENTA1_SIM_OFF (0x20<<16) +#define AB3100_EVENTA1_VBUS (0x40<<16) +#define AB3100_EVENTA1_VSET_USB (0x80<<16) + +#define AB3100_EVENTA2_READY_TX (0x01<<8) +#define AB3100_EVENTA2_READY_RX (0x02<<8) +#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8) +#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8) +#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8) +#define AB3100_EVENTA2_MIDR (0x20<<8) +#define AB3100_EVENTA2_BATTERY_REM (0x40<<8) +#define AB3100_EVENTA2_ALARM (0x80<<8) + +#define AB3100_EVENTA3_ADC_TRIG5 (0x01) +#define AB3100_EVENTA3_ADC_TRIG4 (0x02) +#define AB3100_EVENTA3_ADC_TRIG3 (0x04) +#define AB3100_EVENTA3_ADC_TRIG2 (0x08) +#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10) +#define AB3100_EVENTA3_ADC_TRIGVTX (0x20) +#define AB3100_EVENTA3_ADC_TRIG1 (0x40) +#define AB3100_EVENTA3_ADC_TRIG0 (0x80) + +/* AB3100, STR register flags */ +#define AB3100_STR_ONSWA (0x01) +#define AB3100_STR_ONSWB (0x02) +#define AB3100_STR_ONSWC (0x04) +#define AB3100_STR_DCIO (0x08) +#define AB3100_STR_BOOT_MODE (0x10) +#define AB3100_STR_SIM_OFF (0x20) +#define AB3100_STR_BATT_REMOVAL (0x40) +#define AB3100_STR_VBUS (0x80) + +/** + * struct ab3100 + * @access_mutex: lock out concurrent accesses to the AB3100 registers + * @dev: pointer to the containing device + * @i2c_client: I2C client for this chip + * @testreg_client: secondary client for test registers + * @chip_name: name of this chip variant + * @chip_id: 8 bit chip ID for this chip variant + * @work: an event handling worker + * @event_subscribers: event subscribers are listed here + * @startup_events: a copy of the first reading of the event registers + * @startup_events_read: whether the first events have been read + * + * This struct is PRIVATE and devices using it should NOT + * access ANY fields. It is used as a token for calling the + * AB3100 functions. + */ +struct ab3100 { + struct mutex access_mutex; + struct device *dev; + struct i2c_client *i2c_client; + struct i2c_client *testreg_client; + char chip_name[32]; + u8 chip_id; + struct work_struct work; + struct blocking_notifier_head event_subscribers; + u32 startup_events; + bool startup_events_read; +}; + +int ab3100_set_register(struct ab3100 *ab3100, u8 reg, u8 regval); +int ab3100_get_register(struct ab3100 *ab3100, u8 reg, u8 *regval); +int ab3100_get_register_page(struct ab3100 *ab3100, + u8 first_reg, u8 *regvals, u8 numregs); +int ab3100_mask_and_set_register(struct ab3100 *ab3100, + u8 reg, u8 andmask, u8 ormask); +u8 ab3100_get_chip_type(struct ab3100 *ab3100); +int ab3100_event_register(struct ab3100 *ab3100, + struct notifier_block *nb); +int ab3100_event_unregister(struct ab3100 *ab3100, + struct notifier_block *nb); +int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100, + u32 *fatevent); + +#endif diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h index 322cd6deb9f..de3c4ad19af 100644 --- a/include/linux/mfd/asic3.h +++ b/include/linux/mfd/asic3.h @@ -30,6 +30,13 @@ struct asic3_platform_data { #define ASIC3_NUM_GPIOS 64 #define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6 +#define ASIC3_IRQ_LED0 64 +#define ASIC3_IRQ_LED1 65 +#define ASIC3_IRQ_LED2 66 +#define ASIC3_IRQ_SPI 67 +#define ASIC3_IRQ_SMBUS 68 +#define ASIC3_IRQ_OWM 69 + #define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio)) #define ASIC3_GPIO_BANK_A 0 @@ -227,8 +234,8 @@ struct asic3_platform_data { /* Basic control of the SD ASIC */ -#define ASIC3_SDHWCTRL_Base 0x0E00 -#define ASIC3_SDHWCTRL_SDConf 0x00 +#define ASIC3_SDHWCTRL_BASE 0x0E00 +#define ASIC3_SDHWCTRL_SDCONF 0x00 #define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */ #define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */ @@ -242,10 +249,10 @@ struct asic3_platform_data { /* SD card power supply ctrl 1=enable */ #define ASIC3_SDHWCTRL_SDPWR (1 << 6) -#define ASIC3_EXTCF_Base 0x1100 +#define ASIC3_EXTCF_BASE 0x1100 -#define ASIC3_EXTCF_Select 0x00 -#define ASIC3_EXTCF_Reset 0x04 +#define ASIC3_EXTCF_SELECT 0x00 +#define ASIC3_EXTCF_RESET 0x04 #define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */ #define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */ @@ -279,222 +286,9 @@ struct asic3_platform_data { * SDIO_CTRL Control registers for SDIO operations * *****************************************************************************/ -#define ASIC3_SD_CONFIG_Base 0x0400 /* Assumes 32 bit addressing */ - -#define ASIC3_SD_CONFIG_Command 0x08 /* R/W: Command */ - -/* [0:8] SD Control Register Base Address */ -#define ASIC3_SD_CONFIG_Addr0 0x20 - -/* [9:31] SD Control Register Base Address */ -#define ASIC3_SD_CONFIG_Addr1 0x24 - -/* R/O: interrupt assigned to pin */ -#define ASIC3_SD_CONFIG_IntPin 0x78 - -/* - * Set to 0x1f to clock SD controller, 0 otherwise. - * At 0x82 - Gated Clock Ctrl - */ -#define ASIC3_SD_CONFIG_ClkStop 0x80 - -/* Control clock of SD controller */ -#define ASIC3_SD_CONFIG_ClockMode 0x84 -#define ASIC3_SD_CONFIG_SDHC_PinStatus 0x88 /* R/0: SD pins status */ -#define ASIC3_SD_CONFIG_SDHC_Power1 0x90 /* Power1 - manual pwr ctrl */ - -/* auto power up after card inserted */ -#define ASIC3_SD_CONFIG_SDHC_Power2 0x92 - -/* auto power down when card removed */ -#define ASIC3_SD_CONFIG_SDHC_Power3 0x94 -#define ASIC3_SD_CONFIG_SDHC_CardDetect 0x98 -#define ASIC3_SD_CONFIG_SDHC_Slot 0xA0 /* R/O: support slot number */ -#define ASIC3_SD_CONFIG_SDHC_ExtGateClk1 0x1E0 /* Not used */ -#define ASIC3_SD_CONFIG_SDHC_ExtGateClk2 0x1E2 /* Not used*/ - -/* GPIO Output Reg. , at 0x1EA - GPIO Output Enable Reg. */ -#define ASIC3_SD_CONFIG_SDHC_GPIO_OutAndEnable 0x1E8 -#define ASIC3_SD_CONFIG_SDHC_GPIO_Status 0x1EC /* GPIO Status Reg. */ - -/* Bit 1: double buffer/single buffer */ -#define ASIC3_SD_CONFIG_SDHC_ExtGateClk3 0x1F0 - -/* Memory access enable (set to 1 to access SD Controller) */ -#define SD_CONFIG_COMMAND_MAE (1<<1) - -#define SD_CONFIG_CLK_ENABLE_ALL 0x1f - -#define SD_CONFIG_POWER1_PC_33V 0x0200 /* Set for 3.3 volts */ -#define SD_CONFIG_POWER1_PC_OFF 0x0000 /* Turn off power */ - - /* two bits - number of cycles for card detection */ -#define SD_CONFIG_CARDDETECTMODE_CLK ((x) & 0x3) - - -#define ASIC3_SD_CTRL_Base 0x1000 - -#define ASIC3_SD_CTRL_Cmd 0x00 -#define ASIC3_SD_CTRL_Arg0 0x08 -#define ASIC3_SD_CTRL_Arg1 0x0C -#define ASIC3_SD_CTRL_StopInternal 0x10 -#define ASIC3_SD_CTRL_TransferSectorCount 0x14 -#define ASIC3_SD_CTRL_Response0 0x18 -#define ASIC3_SD_CTRL_Response1 0x1C -#define ASIC3_SD_CTRL_Response2 0x20 -#define ASIC3_SD_CTRL_Response3 0x24 -#define ASIC3_SD_CTRL_Response4 0x28 -#define ASIC3_SD_CTRL_Response5 0x2C -#define ASIC3_SD_CTRL_Response6 0x30 -#define ASIC3_SD_CTRL_Response7 0x34 -#define ASIC3_SD_CTRL_CardStatus 0x38 -#define ASIC3_SD_CTRL_BufferCtrl 0x3C -#define ASIC3_SD_CTRL_IntMaskCard 0x40 -#define ASIC3_SD_CTRL_IntMaskBuffer 0x44 -#define ASIC3_SD_CTRL_CardClockCtrl 0x48 -#define ASIC3_SD_CTRL_MemCardXferDataLen 0x4C -#define ASIC3_SD_CTRL_MemCardOptionSetup 0x50 -#define ASIC3_SD_CTRL_ErrorStatus0 0x58 -#define ASIC3_SD_CTRL_ErrorStatus1 0x5C -#define ASIC3_SD_CTRL_DataPort 0x60 -#define ASIC3_SD_CTRL_TransactionCtrl 0x68 -#define ASIC3_SD_CTRL_SoftwareReset 0x1C0 - -#define SD_CTRL_SOFTWARE_RESET_CLEAR (1<<0) - -#define SD_CTRL_TRANSACTIONCONTROL_SET (1<<8) - -#define SD_CTRL_CARDCLOCKCONTROL_FOR_SD_CARD (1<<15) -#define SD_CTRL_CARDCLOCKCONTROL_ENABLE_CLOCK (1<<8) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_512 (1<<7) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_256 (1<<6) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_128 (1<<5) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_64 (1<<4) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_32 (1<<3) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_16 (1<<2) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_8 (1<<1) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_4 (1<<0) -#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_2 (0<<0) - -#define MEM_CARD_OPTION_REQUIRED 0x000e -#define MEM_CARD_OPTION_DATA_RESPONSE_TIMEOUT(x) (((x) & 0x0f) << 4) -#define MEM_CARD_OPTION_C2_MODULE_NOT_PRESENT (1<<14) -#define MEM_CARD_OPTION_DATA_XFR_WIDTH_1 (1<<15) -#define MEM_CARD_OPTION_DATA_XFR_WIDTH_4 0 - -#define SD_CTRL_COMMAND_INDEX(x) ((x) & 0x3f) -#define SD_CTRL_COMMAND_TYPE_CMD (0 << 6) -#define SD_CTRL_COMMAND_TYPE_ACMD (1 << 6) -#define SD_CTRL_COMMAND_TYPE_AUTHENTICATION (2 << 6) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_NORMAL (0 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1 (4 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1B (5 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R2 (6 << 8) -#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R3 (7 << 8) -#define SD_CTRL_COMMAND_DATA_PRESENT (1 << 11) -#define SD_CTRL_COMMAND_TRANSFER_READ (1 << 12) -#define SD_CTRL_COMMAND_TRANSFER_WRITE (0 << 12) -#define SD_CTRL_COMMAND_MULTI_BLOCK (1 << 13) -#define SD_CTRL_COMMAND_SECURITY_CMD (1 << 14) - -#define SD_CTRL_STOP_INTERNAL_ISSSUE_CMD12 (1 << 0) -#define SD_CTRL_STOP_INTERNAL_AUTO_ISSUE_CMD12 (1 << 8) - -#define SD_CTRL_CARDSTATUS_RESPONSE_END (1 << 0) -#define SD_CTRL_CARDSTATUS_RW_END (1 << 2) -#define SD_CTRL_CARDSTATUS_CARD_REMOVED_0 (1 << 3) -#define SD_CTRL_CARDSTATUS_CARD_INSERTED_0 (1 << 4) -#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_0 (1 << 5) -#define SD_CTRL_CARDSTATUS_WRITE_PROTECT (1 << 7) -#define SD_CTRL_CARDSTATUS_CARD_REMOVED_3 (1 << 8) -#define SD_CTRL_CARDSTATUS_CARD_INSERTED_3 (1 << 9) -#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_3 (1 << 10) - -#define SD_CTRL_BUFFERSTATUS_CMD_INDEX_ERROR (1 << 0) -#define SD_CTRL_BUFFERSTATUS_CRC_ERROR (1 << 1) -#define SD_CTRL_BUFFERSTATUS_STOP_BIT_END_ERROR (1 << 2) -#define SD_CTRL_BUFFERSTATUS_DATA_TIMEOUT (1 << 3) -#define SD_CTRL_BUFFERSTATUS_BUFFER_OVERFLOW (1 << 4) -#define SD_CTRL_BUFFERSTATUS_BUFFER_UNDERFLOW (1 << 5) -#define SD_CTRL_BUFFERSTATUS_CMD_TIMEOUT (1 << 6) -#define SD_CTRL_BUFFERSTATUS_UNK7 (1 << 7) -#define SD_CTRL_BUFFERSTATUS_BUFFER_READ_ENABLE (1 << 8) -#define SD_CTRL_BUFFERSTATUS_BUFFER_WRITE_ENABLE (1 << 9) -#define SD_CTRL_BUFFERSTATUS_ILLEGAL_FUNCTION (1 << 13) -#define SD_CTRL_BUFFERSTATUS_CMD_BUSY (1 << 14) -#define SD_CTRL_BUFFERSTATUS_ILLEGAL_ACCESS (1 << 15) - -#define SD_CTRL_INTMASKCARD_RESPONSE_END (1 << 0) -#define SD_CTRL_INTMASKCARD_RW_END (1 << 2) -#define SD_CTRL_INTMASKCARD_CARD_REMOVED_0 (1 << 3) -#define SD_CTRL_INTMASKCARD_CARD_INSERTED_0 (1 << 4) -#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_0 (1 << 5) -#define SD_CTRL_INTMASKCARD_UNK6 (1 << 6) -#define SD_CTRL_INTMASKCARD_WRITE_PROTECT (1 << 7) -#define SD_CTRL_INTMASKCARD_CARD_REMOVED_3 (1 << 8) -#define SD_CTRL_INTMASKCARD_CARD_INSERTED_3 (1 << 9) -#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_3 (1 << 10) - -#define SD_CTRL_INTMASKBUFFER_CMD_INDEX_ERROR (1 << 0) -#define SD_CTRL_INTMASKBUFFER_CRC_ERROR (1 << 1) -#define SD_CTRL_INTMASKBUFFER_STOP_BIT_END_ERROR (1 << 2) -#define SD_CTRL_INTMASKBUFFER_DATA_TIMEOUT (1 << 3) -#define SD_CTRL_INTMASKBUFFER_BUFFER_OVERFLOW (1 << 4) -#define SD_CTRL_INTMASKBUFFER_BUFFER_UNDERFLOW (1 << 5) -#define SD_CTRL_INTMASKBUFFER_CMD_TIMEOUT (1 << 6) -#define SD_CTRL_INTMASKBUFFER_UNK7 (1 << 7) -#define SD_CTRL_INTMASKBUFFER_BUFFER_READ_ENABLE (1 << 8) -#define SD_CTRL_INTMASKBUFFER_BUFFER_WRITE_ENABLE (1 << 9) -#define SD_CTRL_INTMASKBUFFER_ILLEGAL_FUNCTION (1 << 13) -#define SD_CTRL_INTMASKBUFFER_CMD_BUSY (1 << 14) -#define SD_CTRL_INTMASKBUFFER_ILLEGAL_ACCESS (1 << 15) - -#define SD_CTRL_DETAIL0_RESPONSE_CMD_ERROR (1 << 0) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 2) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_CMD12 (1 << 3) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_READ_DATA (1 << 4) -#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_WRITE_CRC_STATUS (1 << 5) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 8) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_CMD12 (1 << 9) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_READ_DATA (1 << 10) -#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_WRITE_CMD (1 << 11) - -#define SD_CTRL_DETAIL1_NO_CMD_RESPONSE (1 << 0) -#define SD_CTRL_DETAIL1_TIMEOUT_READ_DATA (1 << 4) -#define SD_CTRL_DETAIL1_TIMEOUT_CRS_STATUS (1 << 5) -#define SD_CTRL_DETAIL1_TIMEOUT_CRC_BUSY (1 << 6) - -#define ASIC3_SDIO_CTRL_Base 0x1200 - -#define ASIC3_SDIO_CTRL_Cmd 0x00 -#define ASIC3_SDIO_CTRL_CardPortSel 0x04 -#define ASIC3_SDIO_CTRL_Arg0 0x08 -#define ASIC3_SDIO_CTRL_Arg1 0x0C -#define ASIC3_SDIO_CTRL_TransferBlockCount 0x14 -#define ASIC3_SDIO_CTRL_Response0 0x18 -#define ASIC3_SDIO_CTRL_Response1 0x1C -#define ASIC3_SDIO_CTRL_Response2 0x20 -#define ASIC3_SDIO_CTRL_Response3 0x24 -#define ASIC3_SDIO_CTRL_Response4 0x28 -#define ASIC3_SDIO_CTRL_Response5 0x2C -#define ASIC3_SDIO_CTRL_Response6 0x30 -#define ASIC3_SDIO_CTRL_Response7 0x34 -#define ASIC3_SDIO_CTRL_CardStatus 0x38 -#define ASIC3_SDIO_CTRL_BufferCtrl 0x3C -#define ASIC3_SDIO_CTRL_IntMaskCard 0x40 -#define ASIC3_SDIO_CTRL_IntMaskBuffer 0x44 -#define ASIC3_SDIO_CTRL_CardXferDataLen 0x4C -#define ASIC3_SDIO_CTRL_CardOptionSetup 0x50 -#define ASIC3_SDIO_CTRL_ErrorStatus0 0x54 -#define ASIC3_SDIO_CTRL_ErrorStatus1 0x58 -#define ASIC3_SDIO_CTRL_DataPort 0x60 -#define ASIC3_SDIO_CTRL_TransactionCtrl 0x68 -#define ASIC3_SDIO_CTRL_CardIntCtrl 0x6C -#define ASIC3_SDIO_CTRL_ClocknWaitCtrl 0x70 -#define ASIC3_SDIO_CTRL_HostInformation 0x74 -#define ASIC3_SDIO_CTRL_ErrorCtrl 0x78 -#define ASIC3_SDIO_CTRL_LEDCtrl 0x7C -#define ASIC3_SDIO_CTRL_SoftwareReset 0x1C0 +#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ +#define ASIC3_SD_CTRL_BASE 0x1000 +#define ASIC3_SDIO_CTRL_BASE 0x1200 #define ASIC3_MAP_SIZE_32BIT 0x2000 #define ASIC3_MAP_SIZE_16BIT 0x1000 diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h new file mode 100644 index 00000000000..be469a357cb --- /dev/null +++ b/include/linux/mfd/ds1wm.h @@ -0,0 +1,6 @@ +/* MFD cell driver data for the DS1WM driver */ + +struct ds1wm_driver_data { + int active_high; + int clock_rate; +}; diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h new file mode 100644 index 00000000000..c12c3c0932b --- /dev/null +++ b/include/linux/mfd/ezx-pcap.h @@ -0,0 +1,256 @@ +/* + * Copyright 2009 Daniel Ribeiro <drwyrm@gmail.com> + * + * For further information, please see http://wiki.openezx.org/PCAP2 + */ + +#ifndef EZX_PCAP_H +#define EZX_PCAP_H + +struct pcap_subdev { + int id; + const char *name; + void *platform_data; +}; + +struct pcap_platform_data { + unsigned int irq_base; + unsigned int config; + void (*init) (void *); /* board specific init */ + int num_subdevs; + struct pcap_subdev *subdevs; +}; + +struct pcap_chip; + +int ezx_pcap_write(struct pcap_chip *, u8, u32); +int ezx_pcap_read(struct pcap_chip *, u8, u32 *); +int pcap_to_irq(struct pcap_chip *, int); +int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *); +int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]); + +#define PCAP_SECOND_PORT 1 +#define PCAP_CS_AH 2 + +#define PCAP_REGISTER_WRITE_OP_BIT 0x80000000 +#define PCAP_REGISTER_READ_OP_BIT 0x00000000 + +#define PCAP_REGISTER_VALUE_MASK 0x01ffffff +#define PCAP_REGISTER_ADDRESS_MASK 0x7c000000 +#define PCAP_REGISTER_ADDRESS_SHIFT 26 +#define PCAP_REGISTER_NUMBER 32 +#define PCAP_CLEAR_INTERRUPT_REGISTER 0x01ffffff +#define PCAP_MASK_ALL_INTERRUPT 0x01ffffff + +/* registers acessible by both pcap ports */ +#define PCAP_REG_ISR 0x0 /* Interrupt Status */ +#define PCAP_REG_MSR 0x1 /* Interrupt Mask */ +#define PCAP_REG_PSTAT 0x2 /* Processor Status */ +#define PCAP_REG_VREG2 0x6 /* Regulator Bank 2 Control */ +#define PCAP_REG_AUXVREG 0x7 /* Auxiliary Regulator Control */ +#define PCAP_REG_BATT 0x8 /* Battery Control */ +#define PCAP_REG_ADC 0x9 /* AD Control */ +#define PCAP_REG_ADR 0xa /* AD Result */ +#define PCAP_REG_CODEC 0xb /* Audio Codec Control */ +#define PCAP_REG_RX_AMPS 0xc /* RX Audio Amplifiers Control */ +#define PCAP_REG_ST_DAC 0xd /* Stereo DAC Control */ +#define PCAP_REG_BUSCTRL 0x14 /* Connectivity Control */ +#define PCAP_REG_PERIPH 0x15 /* Peripheral Control */ +#define PCAP_REG_LOWPWR 0x18 /* Regulator Low Power Control */ +#define PCAP_REG_TX_AMPS 0x1a /* TX Audio Amplifiers Control */ +#define PCAP_REG_GP 0x1b /* General Purpose */ +#define PCAP_REG_TEST1 0x1c +#define PCAP_REG_TEST2 0x1d +#define PCAP_REG_VENDOR_TEST1 0x1e +#define PCAP_REG_VENDOR_TEST2 0x1f + +/* registers acessible by pcap port 1 only (a1200, e2 & e6) */ +#define PCAP_REG_INT_SEL 0x3 /* Interrupt Select */ +#define PCAP_REG_SWCTRL 0x4 /* Switching Regulator Control */ +#define PCAP_REG_VREG1 0x5 /* Regulator Bank 1 Control */ +#define PCAP_REG_RTC_TOD 0xe /* RTC Time of Day */ +#define PCAP_REG_RTC_TODA 0xf /* RTC Time of Day Alarm */ +#define PCAP_REG_RTC_DAY 0x10 /* RTC Day */ +#define PCAP_REG_RTC_DAYA 0x11 /* RTC Day Alarm */ +#define PCAP_REG_MTRTMR 0x12 /* AD Monitor Timer */ +#define PCAP_REG_PWR 0x13 /* Power Control */ +#define PCAP_REG_AUXVREG_MASK 0x16 /* Auxiliary Regulator Mask */ +#define PCAP_REG_VENDOR_REV 0x17 +#define PCAP_REG_PERIPH_MASK 0x19 /* Peripheral Mask */ + +/* PCAP2 Interrupts */ +#define PCAP_NIRQS 23 +#define PCAP_IRQ_ADCDONE 0 /* ADC done port 1 */ +#define PCAP_IRQ_TS 1 /* Touch Screen */ +#define PCAP_IRQ_1HZ 2 /* 1HZ timer */ +#define PCAP_IRQ_WH 3 /* ADC above high limit */ +#define PCAP_IRQ_WL 4 /* ADC below low limit */ +#define PCAP_IRQ_TODA 5 /* Time of day alarm */ +#define PCAP_IRQ_USB4V 6 /* USB above 4V */ +#define PCAP_IRQ_ONOFF 7 /* On/Off button */ +#define PCAP_IRQ_ONOFF2 8 /* On/Off button 2 */ +#define PCAP_IRQ_USB1V 9 /* USB above 1V */ +#define PCAP_IRQ_MOBPORT 10 +#define PCAP_IRQ_MIC 11 /* Mic attach/HS button */ +#define PCAP_IRQ_HS 12 /* Headset attach */ +#define PCAP_IRQ_ST 13 +#define PCAP_IRQ_PC 14 /* Power Cut */ +#define PCAP_IRQ_WARM 15 +#define PCAP_IRQ_EOL 16 /* Battery End Of Life */ +#define PCAP_IRQ_CLK 17 +#define PCAP_IRQ_SYSRST 18 /* System Reset */ +#define PCAP_IRQ_DUMMY 19 +#define PCAP_IRQ_ADCDONE2 20 /* ADC done port 2 */ +#define PCAP_IRQ_SOFTRESET 21 +#define PCAP_IRQ_MNEXB 22 + +/* voltage regulators */ +#define V1 0 +#define V2 1 +#define V3 2 +#define V4 3 +#define V5 4 +#define V6 5 +#define V7 6 +#define V8 7 +#define V9 8 +#define V10 9 +#define VAUX1 10 +#define VAUX2 11 +#define VAUX3 12 +#define VAUX4 13 +#define VSIM 14 +#define VSIM2 15 +#define VVIB 16 +#define SW1 17 +#define SW2 18 +#define SW3 19 +#define SW1S 20 +#define SW2S 21 + +#define PCAP_BATT_DAC_MASK 0x000000ff +#define PCAP_BATT_DAC_SHIFT 0 +#define PCAP_BATT_B_FDBK (1 << 8) +#define PCAP_BATT_EXT_ISENSE (1 << 9) +#define PCAP_BATT_V_COIN_MASK 0x00003c00 +#define PCAP_BATT_V_COIN_SHIFT 10 +#define PCAP_BATT_I_COIN (1 << 14) +#define PCAP_BATT_COIN_CH_EN (1 << 15) +#define PCAP_BATT_EOL_SEL_MASK 0x000e0000 +#define PCAP_BATT_EOL_SEL_SHIFT 17 +#define PCAP_BATT_EOL_CMP_EN (1 << 20) +#define PCAP_BATT_BATT_DET_EN (1 << 21) +#define PCAP_BATT_THERMBIAS_CTRL (1 << 22) + +#define PCAP_ADC_ADEN (1 << 0) +#define PCAP_ADC_RAND (1 << 1) +#define PCAP_ADC_AD_SEL1 (1 << 2) +#define PCAP_ADC_AD_SEL2 (1 << 3) +#define PCAP_ADC_ADA1_MASK 0x00000070 +#define PCAP_ADC_ADA1_SHIFT 4 +#define PCAP_ADC_ADA2_MASK 0x00000380 +#define PCAP_ADC_ADA2_SHIFT 7 +#define PCAP_ADC_ATO_MASK 0x00003c00 +#define PCAP_ADC_ATO_SHIFT 10 +#define PCAP_ADC_ATOX (1 << 14) +#define PCAP_ADC_MTR1 (1 << 15) +#define PCAP_ADC_MTR2 (1 << 16) +#define PCAP_ADC_TS_M_MASK 0x000e0000 +#define PCAP_ADC_TS_M_SHIFT 17 +#define PCAP_ADC_TS_REF_LOWPWR (1 << 20) +#define PCAP_ADC_TS_REFENB (1 << 21) +#define PCAP_ADC_BATT_I_POLARITY (1 << 22) +#define PCAP_ADC_BATT_I_ADC (1 << 23) + +#define PCAP_ADC_BANK_0 0 +#define PCAP_ADC_BANK_1 1 +/* ADC bank 0 */ +#define PCAP_ADC_CH_COIN 0 +#define PCAP_ADC_CH_BATT 1 +#define PCAP_ADC_CH_BPLUS 2 +#define PCAP_ADC_CH_MOBPORTB 3 +#define PCAP_ADC_CH_TEMPERATURE 4 +#define PCAP_ADC_CH_CHARGER_ID 5 +#define PCAP_ADC_CH_AD6 6 +/* ADC bank 1 */ +#define PCAP_ADC_CH_AD7 0 +#define PCAP_ADC_CH_AD8 1 +#define PCAP_ADC_CH_AD9 2 +#define PCAP_ADC_CH_TS_X1 3 +#define PCAP_ADC_CH_TS_X2 4 +#define PCAP_ADC_CH_TS_Y1 5 +#define PCAP_ADC_CH_TS_Y2 6 + +#define PCAP_ADC_T_NOW 0 +#define PCAP_ADC_T_IN_BURST 1 +#define PCAP_ADC_T_OUT_BURST 2 + +#define PCAP_ADC_ATO_IN_BURST 6 +#define PCAP_ADC_ATO_OUT_BURST 0 + +#define PCAP_ADC_TS_M_XY 1 +#define PCAP_ADC_TS_M_PRESSURE 2 +#define PCAP_ADC_TS_M_PLATE_X 3 +#define PCAP_ADC_TS_M_PLATE_Y 4 +#define PCAP_ADC_TS_M_STANDBY 5 +#define PCAP_ADC_TS_M_NONTS 6 + +#define PCAP_ADR_ADD1_MASK 0x000003ff +#define PCAP_ADR_ADD1_SHIFT 0 +#define PCAP_ADR_ADD2_MASK 0x000ffc00 +#define PCAP_ADR_ADD2_SHIFT 10 +#define PCAP_ADR_ADINC1 (1 << 20) +#define PCAP_ADR_ADINC2 (1 << 21) +#define PCAP_ADR_ASC (1 << 22) +#define PCAP_ADR_ONESHOT (1 << 23) + +#define PCAP_BUSCTRL_FSENB (1 << 0) +#define PCAP_BUSCTRL_USB_SUSPEND (1 << 1) +#define PCAP_BUSCTRL_USB_PU (1 << 2) +#define PCAP_BUSCTRL_USB_PD (1 << 3) +#define PCAP_BUSCTRL_VUSB_EN (1 << 4) +#define PCAP_BUSCTRL_USB_PS (1 << 5) +#define PCAP_BUSCTRL_VUSB_MSTR_EN (1 << 6) +#define PCAP_BUSCTRL_VBUS_PD_ENB (1 << 7) +#define PCAP_BUSCTRL_CURRLIM (1 << 8) +#define PCAP_BUSCTRL_RS232ENB (1 << 9) +#define PCAP_BUSCTRL_RS232_DIR (1 << 10) +#define PCAP_BUSCTRL_SE0_CONN (1 << 11) +#define PCAP_BUSCTRL_USB_PDM (1 << 12) +#define PCAP_BUSCTRL_BUS_PRI_ADJ (1 << 24) + +/* leds */ +#define PCAP_LED0 0 +#define PCAP_LED1 1 +#define PCAP_BL0 2 +#define PCAP_BL1 3 +#define PCAP_VIB 4 +#define PCAP_LED_3MA 0 +#define PCAP_LED_4MA 1 +#define PCAP_LED_5MA 2 +#define PCAP_LED_9MA 3 +#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff +#define PCAP_LED_GPIO_EN 0x01000000 +#define PCAP_LED_GPIO_INVERT 0x02000000 +#define PCAP_LED_T_MASK 0xf +#define PCAP_LED_C_MASK 0x3 +#define PCAP_BL_MASK 0x1f +#define PCAP_BL0_SHIFT 0 +#define PCAP_LED0_EN (1 << 5) +#define PCAP_LED1_EN (1 << 6) +#define PCAP_LED0_T_SHIFT 7 +#define PCAP_LED1_T_SHIFT 11 +#define PCAP_LED0_C_SHIFT 15 +#define PCAP_LED1_C_SHIFT 17 +#define PCAP_BL1_SHIFT 20 +#define PCAP_VIB_MASK 0x3 +#define PCAP_VIB_SHIFT 20 +#define PCAP_VIB_EN (1 << 19) + +/* RTC */ +#define PCAP_RTC_DAY_MASK 0x3fff +#define PCAP_RTC_TOD_MASK 0xffff +#define PCAP_RTC_PC_MASK 0x7 +#define SEC_PER_DAY 86400 + +#endif diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h index b4294f12c4f..3d3ed67bd96 100644 --- a/include/linux/mfd/htc-pasic3.h +++ b/include/linux/mfd/htc-pasic3.h @@ -48,7 +48,6 @@ struct pasic3_leds_machinfo { struct pasic3_platform_data { struct pasic3_leds_machinfo *led_pdata; - unsigned int bus_shift; unsigned int clock_rate; }; diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 4455b212d75..c8f51c3c0a7 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -29,6 +29,8 @@ struct pcf50633_platform_data { char **batteries; int num_batteries; + int charging_restart_interval; + /* Callbacks */ void (*probe_done)(struct pcf50633 *); void (*mbc_event_callback)(struct pcf50633 *, int); diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h index 6e17619b773..4119579acf2 100644 --- a/include/linux/mfd/pcf50633/mbc.h +++ b/include/linux/mfd/pcf50633/mbc.h @@ -128,7 +128,6 @@ enum pcf50633_reg_mbcs3 { int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); int pcf50633_mbc_get_status(struct pcf50633 *); -void pcf50633_mbc_set_status(struct pcf50633 *, int what, int status); #endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 516d955ab8a..6b9c5d06690 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -19,6 +19,13 @@ } while (0) /* + * data for the MMC controller + */ +struct tmio_mmc_data { + const unsigned int hclk; +}; + +/* * data for the NAND controller */ struct tmio_nand_data { diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h index af95a1d2f3a..d899dc0223b 100644 --- a/include/linux/mfd/wm8350/audio.h +++ b/include/linux/mfd/wm8350/audio.h @@ -490,6 +490,7 @@ /* * R231 (0xE7) - Jack Status */ +#define WM8350_JACK_L_LVL 0x0800 #define WM8350_JACK_R_LVL 0x0400 /* diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 980669d50dc..42cca672f34 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -640,9 +640,11 @@ struct wm8350 { * * @init: Function called during driver initialisation. Should be * used by the platform to configure GPIO functions and similar. + * @irq_high: Set if WM8350 IRQ is active high. */ struct wm8350_platform_data { int (*init)(struct wm8350 *wm8350); + int irq_high; }; diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h index b6640e01804..e06ed3eb1d0 100644 --- a/include/linux/mfd/wm8400-audio.h +++ b/include/linux/mfd/wm8400-audio.h @@ -1181,6 +1181,7 @@ #define WM8400_FLL_OUTDIV_SHIFT 0 /* FLL_OUTDIV - [2:0] */ #define WM8400_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [2:0] */ +struct wm8400; void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400); #endif diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h new file mode 100644 index 00000000000..e11f4d9f1c2 --- /dev/null +++ b/include/linux/mg_disk.h @@ -0,0 +1,45 @@ +/* + * include/linux/mg_disk.c + * + * Private data for mflash platform driver + * + * (c) 2008 mGine Co.,LTD + * (c) 2008 unsik Kim <donari75@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MG_DISK_H__ +#define __MG_DISK_H__ + +/* name for platform device */ +#define MG_DEV_NAME "mg_disk" + +/* names of GPIO resource */ +#define MG_RST_PIN "mg_rst" +/* except MG_BOOT_DEV, reset-out pin should be assigned */ +#define MG_RSTOUT_PIN "mg_rstout" + +/* device attribution */ +/* use mflash as boot device */ +#define MG_BOOT_DEV (1 << 0) +/* use mflash as storage device */ +#define MG_STORAGE_DEV (1 << 1) +/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ +#define MG_STORAGE_DEV_SKIP_RST (1 << 2) + +/* private driver data */ +struct mg_drv_data { + /* disk resource */ + u32 use_polling; + + /* device attribution */ + u32 dev_attr; + + /* internally used */ + void *host; +}; + +#endif diff --git a/include/linux/mii.h b/include/linux/mii.h index ad748588faf..359fba88027 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -240,6 +240,22 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock, } /** + * mii_advertise_flowctrl - get flow control advertisement flags + * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both) + */ +static inline u16 mii_advertise_flowctrl(int cap) +{ + u16 adv = 0; + + if (cap & FLOW_CTRL_RX) + adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (cap & FLOW_CTRL_TX) + adv ^= ADVERTISE_PAUSE_ASYM; + + return adv; +} + +/** * mii_resolve_flowctrl_fdx * @lcladv: value of MII ADVERTISE register * @rmtadv: value of MII LPA register @@ -250,18 +266,12 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) { u8 cap = 0; - if (lcladv & ADVERTISE_PAUSE_CAP) { - if (lcladv & ADVERTISE_PAUSE_ASYM) { - if (rmtadv & LPA_PAUSE_CAP) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - else if (rmtadv & LPA_PAUSE_ASYM) - cap = FLOW_CTRL_RX; - } else { - if (rmtadv & LPA_PAUSE_CAP) - cap = FLOW_CTRL_TX | FLOW_CTRL_RX; - } - } else if (lcladv & ADVERTISE_PAUSE_ASYM) { - if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM)) + if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) { + if (lcladv & ADVERTISE_PAUSE_CAP) + cap = FLOW_CTRL_RX; + else if (rmtadv & ADVERTISE_PAUSE_CAP) cap = FLOW_CTRL_TX; } diff --git a/include/linux/minix_fs.h b/include/linux/minix_fs.h index 0e39745f511..13fe09e0576 100644 --- a/include/linux/minix_fs.h +++ b/include/linux/minix_fs.h @@ -1,6 +1,7 @@ #ifndef _LINUX_MINIX_FS_H #define _LINUX_MINIX_FS_H +#include <linux/types.h> #include <linux/magic.h> /* diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index a820f816a49..05211774462 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -26,6 +26,7 @@ #define TUN_MINOR 200 #define MWAVE_MINOR 219 /* ACP/Mwave Modem */ #define MPT_MINOR 220 +#define MPT2SAS_MINOR 221 #define HPET_MINOR 228 #define FUSE_MINOR 229 #define KVM_MINOR 232 @@ -40,6 +41,7 @@ struct miscdevice { struct list_head list; struct device *parent; struct device *this_device; + const char *devnode; }; extern int misc_register(struct miscdevice * misc); diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index cf9c679ab38..0f82293a82e 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -55,6 +55,7 @@ enum { MLX4_CMD_CLOSE_PORT = 0xa, MLX4_CMD_QUERY_HCA = 0xb, MLX4_CMD_QUERY_PORT = 0x43, + MLX4_CMD_SENSE_PORT = 0x4d, MLX4_CMD_SET_PORT = 0xc, MLX4_CMD_ACCESS_DDR = 0x2e, MLX4_CMD_MAP_ICM = 0xffa, diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 8f659cc2996..ce7cc6c7bcb 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -155,8 +155,9 @@ enum mlx4_qp_region { }; enum mlx4_port_type { - MLX4_PORT_TYPE_IB = 1 << 0, - MLX4_PORT_TYPE_ETH = 1 << 1, + MLX4_PORT_TYPE_IB = 1, + MLX4_PORT_TYPE_ETH = 2, + MLX4_PORT_TYPE_AUTO = 3 }; enum mlx4_special_vlan_idx { @@ -209,6 +210,7 @@ struct mlx4_caps { int num_comp_vectors; int num_mpts; int num_mtt_segs; + int mtts_per_seg; int fmr_reserved_mtts; int reserved_mtts; int reserved_mrws; @@ -237,6 +239,7 @@ struct mlx4_caps { enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; u8 supported_type[MLX4_MAX_PORTS + 1]; u32 port_mask; + enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; }; struct mlx4_buf_list { diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index bf8f11982da..9f29d86e5dc 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -165,6 +165,7 @@ enum { MLX4_WQE_CTRL_IP_CSUM = 1 << 4, MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, MLX4_WQE_CTRL_INS_VLAN = 1 << 6, + MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, }; struct mlx4_wqe_ctrl_seg { diff --git a/include/linux/mm.h b/include/linux/mm.h index e8ddc98b840..9a72cc78e6b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -7,7 +7,6 @@ #include <linux/gfp.h> #include <linux/list.h> -#include <linux/mmdebug.h> #include <linux/mmzone.h> #include <linux/rbtree.h> #include <linux/prio_tree.h> @@ -19,6 +18,7 @@ struct anon_vma; struct file_ra_state; struct user_struct; struct writeback_control; +struct rlimit; #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ extern unsigned long max_mapnr; @@ -34,8 +34,6 @@ extern int sysctl_legacy_va_layout; #define sysctl_legacy_va_layout 0 #endif -extern unsigned long mmap_min_addr; - #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -104,6 +102,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ +#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS @@ -134,6 +133,7 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ +#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ /* * This interface is used by x86 PAT code to identify a pfn mapping that is @@ -145,7 +145,7 @@ extern pgprot_t protection_map[16]; */ static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) { - return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); + return (vma->vm_flags & VM_PFN_AT_MMAP); } static inline int is_pfn_mapping(struct vm_area_struct *vma) @@ -186,7 +186,7 @@ struct vm_operations_struct { /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); + int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware @@ -573,21 +573,6 @@ static inline void set_page_links(struct page *page, enum zone_type zone, } /* - * If a hint addr is less than mmap_min_addr change hint to be as - * low as possible but still greater than mmap_min_addr - */ -static inline unsigned long round_hint_to_min(unsigned long hint) -{ -#ifdef CONFIG_SECURITY - hint &= PAGE_MASK; - if (((void *)hint != NULL) && - (hint < mmap_min_addr)) - return PAGE_ALIGN(mmap_min_addr); -#endif - return hint; -} - -/* * Some inline functions in vmstat.h depend on page_zone() */ #include <linux/vmstat.h> @@ -724,7 +709,7 @@ static inline int shmem_lock(struct file *file, int lock, return 0; } #endif -struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); +struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); int shmem_zero_setup(struct vm_area_struct *); @@ -792,6 +777,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +int follow_pfn(struct vm_area_struct *vma, unsigned long address, + unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, @@ -808,11 +795,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access); + unsigned long address, unsigned int flags); #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, - int write_access) + unsigned int flags) { /* should never happen if there's no MMU */ BUG(); @@ -823,8 +810,11 @@ static inline int handle_mm_fault(struct mm_struct *mm, extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); -int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, - int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); +int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + unsigned long start, int nr_pages, int write, int force, + struct page **pages, struct vm_area_struct **vmas); +int get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); @@ -833,6 +823,7 @@ int __set_page_dirty_nobuffers(struct page *page); int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); +void account_page_dirtied(struct page *page, struct address_space *mapping); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); @@ -848,17 +839,10 @@ extern int mprotect_fixup(struct vm_area_struct *vma, unsigned long end, unsigned long newflags); /* - * get_user_pages_fast provides equivalent functionality to get_user_pages, - * operating on current and current->mm (force=0 and doesn't return any vmas). - * - * get_user_pages_fast may take mmap_sem and page tables, so no assumptions - * can be made about locking. get_user_pages_fast is to be implemented in a - * way that is advantageous (vs get_user_pages()) when the user memory area is - * already faulted in and present in ptes. However if the pages have to be - * faulted in, it may turn out to be slightly slower). + * doesn't attempt to fault and will return short. */ -int get_user_pages_fast(unsigned long start, int nr_pages, int write, - struct page **pages); +int __get_user_pages_fast(unsigned long start, int nr_pages, int write, + struct page **pages); /* * A callback you can register to apply pressure to ageable caches. @@ -1028,8 +1012,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); extern void remove_active_range(unsigned int nid, unsigned long start_pfn, unsigned long end_pfn); -extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, - unsigned long end_pfn); extern void remove_all_active_ranges(void); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); @@ -1041,14 +1023,28 @@ extern void free_bootmem_with_active_regions(int nid, typedef int (*work_fn_t)(unsigned long, unsigned long, void *); extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); -#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID -extern int early_pfn_to_nid(unsigned long pfn); -#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ + +#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ + !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) +static inline int __early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} +#else +/* please see mm/page_alloc.c */ +extern int __meminit early_pfn_to_nid(unsigned long pfn); +#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID +/* there is a per-arch backend function. */ +extern int __meminit __early_pfn_to_nid(unsigned long pfn); +#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ +#endif + extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, enum memmap_context); -extern void setup_per_zone_pages_min(void); +extern void setup_per_zone_wmarks(void); +extern void calculate_zone_inactive_ratio(struct zone *zone); extern void mem_init(void); extern void __init mmap_init(void); extern void show_mem(void); @@ -1063,7 +1059,7 @@ static inline void setup_per_cpu_pageset(void) {} #endif /* nommu.c */ -extern atomic_t mmap_pages_allocated; +extern atomic_long_t mmap_pages_allocated; /* prio_tree.c */ void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); @@ -1129,8 +1125,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long flag, unsigned long pgoff); extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, - unsigned int vm_flags, unsigned long pgoff, - int accountable); + unsigned int vm_flags, unsigned long pgoff); static inline unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -1160,13 +1155,12 @@ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); +void task_dirty_inc(struct task_struct *tsk); /* readahead.c */ #define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ -int do_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read); int force_page_cache_readahead(struct address_space *mapping, struct file *filp, pgoff_t offset, unsigned long nr_to_read); @@ -1184,6 +1178,9 @@ void page_cache_async_readahead(struct address_space *mapping, unsigned long size); unsigned long max_sane_readahead(unsigned long nr); +unsigned long ra_submit(struct file_ra_state *ra, + struct address_space *mapping, + struct file *filp); /* Do stack extension */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -1303,7 +1300,8 @@ int vmemmap_populate_basepages(struct page *start_page, int vmemmap_populate(struct page *start_page, unsigned long pages, int node); void vmemmap_populate_print_last(void); -extern void *alloc_locked_buffer(size_t size); -extern void free_locked_buffer(void *buffer, size_t size); +extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, + size_t size); +extern void refund_locked_memory(struct mm_struct *mm, size_t size); #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 92915e81443..0042090a4d7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -11,6 +11,7 @@ #include <linux/rwsem.h> #include <linux/completion.h> #include <linux/cpumask.h> +#include <linux/page-debug-flags.h> #include <asm/page.h> #include <asm/mmu.h> @@ -94,6 +95,17 @@ struct page { void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ +#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS + unsigned long debug_flags; /* Use atomic bitops on this */ +#endif + +#ifdef CONFIG_KMEMCHECK + /* + * kmemcheck wants to track the status of each byte in a page; this + * is a pointer to such a status block. NULL if not tracked. + */ + void *shadow; +#endif }; /* @@ -276,4 +288,7 @@ struct mm_struct { #endif }; +/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ +#define mm_cpumask(mm) (&(mm)->cpu_vm_mask) + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mman.h b/include/linux/mman.h index 30d1073bac3..9872d6ca58a 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -12,21 +12,18 @@ #ifdef __KERNEL__ #include <linux/mm.h> +#include <linux/percpu_counter.h> #include <asm/atomic.h> extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; -extern atomic_long_t vm_committed_space; +extern struct percpu_counter vm_committed_as; -#ifdef CONFIG_SMP -extern void vm_acct_memory(long pages); -#else static inline void vm_acct_memory(long pages) { - atomic_long_add(pages, &vm_committed_space); + percpu_counter_add(&vm_committed_as, pages); } -#endif static inline void vm_unacct_memory(long pages) { diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4e457256bd3..3e7615e9087 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) wake_up_process(host->sdio_irq_thread); } +struct regulator; + +int mmc_regulator_get_ocrmask(struct regulator *supply); +int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); + #endif diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index ea1bf5ba092..2dbfb5a0599 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -22,8 +22,24 @@ /* * Vendors and devices. Sort key: vendor first, device next. */ +#define SDIO_VENDOR_ID_INTEL 0x0089 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 +#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 +#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 +#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 +#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 +#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 + +#define SDIO_VENDOR_ID_SIANO 0x039a +#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 +#define SDIO_DEVICE_ID_SIANO_NICE 0x0202 +#define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300 +#define SDIO_DEVICE_ID_SIANO_VENICE 0x0301 +#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 +#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 #endif diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index 139d7c88d9c..97491f78b08 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h @@ -1,5 +1,5 @@ -#ifndef MMIOTRACE_H -#define MMIOTRACE_H +#ifndef _LINUX_MMIOTRACE_H +#define _LINUX_MMIOTRACE_H #include <linux/types.h> #include <linux/list.h> @@ -13,28 +13,36 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, unsigned long condition, struct pt_regs *); struct kmmio_probe { - struct list_head list; /* kmmio internal list */ - unsigned long addr; /* start location of the probe point */ - unsigned long len; /* length of the probe region */ - kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */ - kmmio_post_handler_t post_handler; /* Called after addr is executed */ - void *private; + /* kmmio internal list: */ + struct list_head list; + /* start location of the probe point: */ + unsigned long addr; + /* length of the probe region: */ + unsigned long len; + /* Called before addr is executed: */ + kmmio_pre_handler_t pre_handler; + /* Called after addr is executed: */ + kmmio_post_handler_t post_handler; + void *private; }; +extern unsigned int kmmio_count; + +extern int register_kmmio_probe(struct kmmio_probe *p); +extern void unregister_kmmio_probe(struct kmmio_probe *p); +extern int kmmio_init(void); +extern void kmmio_cleanup(void); + +#ifdef CONFIG_MMIOTRACE /* kmmio is active by some kmmio_probes? */ static inline int is_kmmio_active(void) { - extern unsigned int kmmio_count; return kmmio_count; } -extern int register_kmmio_probe(struct kmmio_probe *p); -extern void unregister_kmmio_probe(struct kmmio_probe *p); - /* Called from page fault handler. */ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); -#ifdef CONFIG_MMIOTRACE /* Called from ioremap.c */ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr); @@ -43,7 +51,17 @@ extern void mmiotrace_iounmap(volatile void __iomem *addr); /* For anyone to insert markers. Remember trailing newline. */ extern int mmiotrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); -#else +#else /* !CONFIG_MMIOTRACE: */ +static inline int is_kmmio_active(void) +{ + return 0; +} + +static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr) +{ + return 0; +} + static inline void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr) { @@ -63,28 +81,28 @@ static inline int mmiotrace_printk(const char *fmt, ...) #endif /* CONFIG_MMIOTRACE */ enum mm_io_opcode { - MMIO_READ = 0x1, /* struct mmiotrace_rw */ - MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ - MMIO_PROBE = 0x3, /* struct mmiotrace_map */ - MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ - MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ + MMIO_READ = 0x1, /* struct mmiotrace_rw */ + MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ + MMIO_PROBE = 0x3, /* struct mmiotrace_map */ + MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ + MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ }; struct mmiotrace_rw { - resource_size_t phys; /* PCI address of register */ - unsigned long value; - unsigned long pc; /* optional program counter */ - int map_id; - unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ - unsigned char width; /* size of register access in bytes */ + resource_size_t phys; /* PCI address of register */ + unsigned long value; + unsigned long pc; /* optional program counter */ + int map_id; + unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ + unsigned char width; /* size of register access in bytes */ }; struct mmiotrace_map { - resource_size_t phys; /* base address in PCI space */ - unsigned long virt; /* base virtual address */ - unsigned long len; /* mapping size */ - int map_id; - unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ + resource_size_t phys; /* base address in PCI space */ + unsigned long virt; /* base virtual address */ + unsigned long len; /* mapping size */ + int map_id; + unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ }; /* in kernel/trace/trace_mmiotrace.c */ @@ -94,4 +112,4 @@ extern void mmio_trace_rw(struct mmiotrace_rw *rw); extern void mmio_trace_mapping(struct mmiotrace_map *map); extern int mmio_trace_printk(const char *fmt, va_list args); -#endif /* MMIOTRACE_H */ +#endif /* _LINUX_MMIOTRACE_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 09c14e213b6..88959853737 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -50,9 +50,6 @@ extern int page_group_by_mobility_disabled; static inline int get_pageblock_migratetype(struct page *page) { - if (unlikely(page_group_by_mobility_disabled)) - return MIGRATE_UNMOVABLE; - return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); } @@ -86,13 +83,8 @@ enum zone_stat_item { NR_ACTIVE_ANON, /* " " " " " */ NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ -#ifdef CONFIG_UNEVICTABLE_LRU NR_UNEVICTABLE, /* " " " " " */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ -#else - NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */ - NR_MLOCK = NR_ACTIVE_FILE, -#endif NR_ANON_PAGES, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ @@ -135,11 +127,7 @@ enum lru_list { LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, -#ifdef CONFIG_UNEVICTABLE_LRU LRU_UNEVICTABLE, -#else - LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */ -#endif NR_LRU_LISTS }; @@ -159,13 +147,20 @@ static inline int is_active_lru(enum lru_list l) static inline int is_unevictable_lru(enum lru_list l) { -#ifdef CONFIG_UNEVICTABLE_LRU return (l == LRU_UNEVICTABLE); -#else - return 0; -#endif } +enum zone_watermarks { + WMARK_MIN, + WMARK_LOW, + WMARK_HIGH, + NR_WMARK +}; + +#define min_wmark_pages(z) (z->watermark[WMARK_MIN]) +#define low_wmark_pages(z) (z->watermark[WMARK_LOW]) +#define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) + struct per_cpu_pages { int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ @@ -278,7 +273,10 @@ struct zone_reclaim_stat { struct zone { /* Fields commonly accessed by the page allocator */ - unsigned long pages_min, pages_low, pages_high; + + /* zone watermarks, access with *_wmark_pages(zone) macros */ + unsigned long watermark[NR_WMARK]; + /* * We don't know if the memory that we're going to allocate will be freeable * or/and it will be released eventually, so to avoid totally wasting several @@ -323,9 +321,9 @@ struct zone { /* Fields commonly accessed by the page reclaim scanner */ spinlock_t lru_lock; - struct { + struct zone_lru { struct list_head list; - unsigned long nr_scan; + unsigned long nr_saved_scan; /* accumulated for batching */ } lru[NR_LRU_LISTS]; struct zone_reclaim_stat reclaim_stat; @@ -764,12 +762,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int, extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ -#include <linux/topology.h> -/* Returns the number of the current Node. */ -#ifndef numa_node_id -#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) -#endif - #ifndef CONFIG_NEED_MULTIPLE_NODES extern struct pglist_data contig_page_data; @@ -806,6 +798,14 @@ extern struct zone *next_zone(struct zone *zone); zone; \ zone = next_zone(zone)) +#define for_each_populated_zone(zone) \ + for (zone = (first_online_pgdat())->node_zones; \ + zone; \ + zone = next_zone(zone)) \ + if (!populated_zone(zone)) \ + ; /* do nothing */ \ + else + static inline struct zone *zonelist_zone(struct zoneref *zoneref) { return zoneref->zone; @@ -1071,7 +1071,7 @@ void sparse_init(void); #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_NODES_SPAN_OTHER_NODES -#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid)) +bool early_pfn_in_nid(unsigned long pfn, int nid); #else #define early_pfn_in_nid(pfn, nid) (1) #endif @@ -1095,6 +1095,32 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); #define pfn_valid_within(pfn) (1) #endif +#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL +/* + * pfn_valid() is meant to be able to tell if a given PFN has valid memmap + * associated with it or not. In FLATMEM, it is expected that holes always + * have valid memmap as long as there is valid PFNs either side of the hole. + * In SPARSEMEM, it is assumed that a valid section has a memmap for the + * entire section. + * + * However, an ARM, and maybe other embedded architectures in the future + * free memmap backing holes to save memory on the assumption the memmap is + * never used. The page_zone linkages are then broken even though pfn_valid() + * returns true. A walker of the full memmap must then do this additional + * check to ensure the memmap they are looking at is sane by making sure + * the zone and PFN linkages are still valid. This is expensive, but walkers + * of the full memmap are extremely rare. + */ +int memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone); +#else +static inline int memmap_valid_within(unsigned long pfn, + struct page *page, struct zone *zone) +{ + return 1; +} +#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ + #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 830bbcd449d..d74785c2393 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -2,10 +2,9 @@ #define _NAMESPACE_H_ #ifdef __KERNEL__ -#include <linux/mount.h> -#include <linux/sched.h> -#include <linux/nsproxy.h> +#include <linux/path.h> #include <linux/seq_file.h> +#include <linux/wait.h> struct mnt_namespace { atomic_t count; @@ -22,24 +21,12 @@ struct proc_mounts { int event; }; +struct fs_struct; + +extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt); extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, struct fs_struct *); -extern void __put_mnt_ns(struct mnt_namespace *ns); - -static inline void put_mnt_ns(struct mnt_namespace *ns) -{ - if (atomic_dec_and_lock(&ns->count, &vfsmount_lock)) - /* releases vfsmount_lock */ - __put_mnt_ns(ns); -} - -static inline void exit_mnt_ns(struct task_struct *p) -{ - struct mnt_namespace *ns = p->nsproxy->mnt_ns; - if (ns) - put_mnt_ns(ns); -} - +extern void put_mnt_ns(struct mnt_namespace *ns); static inline void get_mnt_ns(struct mnt_namespace *ns) { atomic_inc(&ns->count); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 97b91d1abb4..1bf5900ffe4 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -443,8 +443,24 @@ struct dmi_system_id { struct dmi_strmatch matches[4]; void *driver_data; }; +/* + * struct dmi_device_id appears during expansion of + * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it + * but this is enough for gcc 3.4.6 to error out: + * error: storage size of '__mod_dmi_device_table' isn't known + */ +#define dmi_device_id dmi_system_id #endif #define DMI_MATCH(a, b) { a, b } +#define PLATFORM_NAME_SIZE 20 +#define PLATFORM_MODULE_PREFIX "platform:" + +struct platform_device_id { + char name[PLATFORM_NAME_SIZE]; + kernel_ulong_t driver_data + __attribute__((aligned(sizeof(kernel_ulong_t)))); +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/module.h b/include/linux/module.h index 4f7ea12463d..f8f92d015ef 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -17,10 +17,12 @@ #include <linux/moduleparam.h> #include <linux/marker.h> #include <linux/tracepoint.h> -#include <asm/local.h> +#include <asm/local.h> #include <asm/module.h> +#include <trace/events/module.h> + /* Not Yet Implemented */ #define MODULE_SUPPORTED_DEVICE(name) @@ -77,6 +79,7 @@ search_extable(const struct exception_table_entry *first, void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish); void sort_main_extable(void); +void trim_init_extable(struct module *m); #ifdef MODULE #define MODULE_GENERIC_TABLE(gtype,name) \ @@ -219,11 +222,6 @@ void *__symbol_get_gpl(const char *symbol); #endif -struct module_ref -{ - local_t count; -} ____cacheline_aligned; - enum module_state { MODULE_STATE_LIVE, @@ -253,6 +251,10 @@ struct module const unsigned long *crcs; unsigned int num_syms; + /* Kernel parameters. */ + struct kernel_param *kp; + unsigned int num_kp; + /* GPL-only exported symbols. */ unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; @@ -334,6 +336,19 @@ struct module unsigned int num_tracepoints; #endif +#ifdef CONFIG_TRACING + const char **trace_bprintk_fmt_start; + unsigned int num_trace_bprintk_fmt; +#endif +#ifdef CONFIG_EVENT_TRACING + struct ftrace_event_call *trace_events; + unsigned int num_trace_events; +#endif +#ifdef CONFIG_FTRACE_MCOUNT_RECORD + unsigned long *ftrace_callsites; + unsigned int num_ftrace_callsites; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head modules_which_use_me; @@ -344,14 +359,25 @@ struct module /* Destruction function. */ void (*exit)(void); - /* Reference counts */ - struct module_ref ref[NR_CPUS]; +#ifdef CONFIG_SMP + char *refptr; +#else + local_t ref; +#endif +#endif + +#ifdef CONFIG_CONSTRUCTORS + /* Constructor functions. */ + ctor_fn_t *ctors; + unsigned int num_ctors; #endif }; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} #endif +extern struct mutex module_mutex; + /* FIXME: It'd be nice to isolate modules during init, too, so they aren't used before they (may) fail. But presently too much code (IDE & SCSI) require entry into the module during init.*/ @@ -360,10 +386,10 @@ static inline int module_is_live(struct module *mod) return mod->state != MODULE_STATE_GOING; } -/* Is this address in a module? (second is with no locks, for oops) */ -struct module *module_text_address(unsigned long addr); struct module *__module_text_address(unsigned long addr); -int is_module_address(unsigned long addr); +struct module *__module_address(unsigned long addr); +bool is_module_address(unsigned long addr); +bool is_module_text_address(unsigned long addr); static inline int within_module_core(unsigned long addr, struct module *mod) { @@ -377,6 +403,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod) addr < (unsigned long)mod->module_init + mod->init_size; } +/* Search for module by name: must hold module_mutex. */ +struct module *find_module(const char *name); + +struct symsearch { + const struct kernel_symbol *start, *stop; + const unsigned long *crcs; + enum { + NOT_GPL_ONLY, + GPL_ONLY, + WILL_BE_GPL_ONLY, + } licence; + bool unused; +}; + +/* Search for an exported symbol by name. */ +const struct kernel_symbol *find_symbol(const char *name, + struct module **owner, + const unsigned long **crc, + bool gplok, + bool warn); + +/* Walk the exported symbol table */ +bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, + unsigned int symnum, void *data), void *data); + /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, @@ -385,6 +436,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); +int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, unsigned long), + void *data); + extern void __module_put_and_exit(struct module *mod, long code) __attribute__((noreturn)); #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); @@ -395,13 +450,24 @@ void __symbol_put(const char *symbol); #define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) void symbol_put_addr(void *addr); +static inline local_t *__module_ref_addr(struct module *mod, int cpu) +{ +#ifdef CONFIG_SMP + return (local_t *) (mod->refptr + per_cpu_offset(cpu)); +#else + return &mod->ref; +#endif +} + /* Sometimes we know we already have a refcount, and it's easier not to handle the error case (which only happens with rmmod --wait). */ static inline void __module_get(struct module *module) { if (module) { - BUG_ON(module_refcount(module) == 0); - local_inc(&module->ref[get_cpu()].count); + unsigned int cpu = get_cpu(); + local_inc(__module_ref_addr(module, cpu)); + trace_module_get(module, _THIS_IP_, + local_read(__module_ref_addr(module, cpu))); put_cpu(); } } @@ -412,8 +478,11 @@ static inline int try_module_get(struct module *module) if (module) { unsigned int cpu = get_cpu(); - if (likely(module_is_live(module))) - local_inc(&module->ref[cpu].count); + if (likely(module_is_live(module))) { + local_inc(__module_ref_addr(module, cpu)); + trace_module_get(module, _THIS_IP_, + local_read(__module_ref_addr(module, cpu))); + } else ret = 0; put_cpu(); @@ -438,6 +507,7 @@ static inline void __module_get(struct module *module) #define symbol_put_addr(p) do { } while(0) #endif /* CONFIG_MODULE_UNLOAD */ +int use_module(struct module *a, struct module *b); /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ @@ -484,21 +554,24 @@ search_module_extables(unsigned long addr) return NULL; } -/* Is this address in a module? */ -static inline struct module *module_text_address(unsigned long addr) +static inline struct module *__module_address(unsigned long addr) { return NULL; } -/* Is this address in a module? (don't take a lock, we're oopsing) */ static inline struct module *__module_text_address(unsigned long addr) { return NULL; } -static inline int is_module_address(unsigned long addr) +static inline bool is_module_address(unsigned long addr) { - return 0; + return false; +} + +static inline bool is_module_text_address(unsigned long addr) +{ + return false; } /* Get/put a kernel symbol (calls should be symmetric) */ @@ -553,6 +626,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name) return 0; } +static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, + struct module *, + unsigned long), + void *data) +{ + return 0; +} + static inline int register_module_notifier(struct notifier_block * nb) { /* no events will happen anyway, so this can always succeed */ @@ -630,4 +711,21 @@ static inline void module_remove_modinfo_attrs(struct module *mod) #define __MODULE_STRING(x) __stringify(x) + +#ifdef CONFIG_GENERIC_BUG +int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, + struct module *); +void module_bug_cleanup(struct module *); + +#else /* !CONFIG_GENERIC_BUG */ + +static inline int module_bug_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod) +{ + return 0; +} +static inline void module_bug_cleanup(struct module *mod) {} +#endif /* CONFIG_GENERIC_BUG */ + #endif /* _LINUX_MODULE_H */ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index e4af3399ef4..6547c3cdbc4 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -36,9 +36,14 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp); /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); +/* Flag bits for kernel_param.flags */ +#define KPARAM_KMALLOCED 1 +#define KPARAM_ISBOOL 2 + struct kernel_param { const char *name; - unsigned int perm; + u16 perm; + u16 flags; param_set_fn set; param_get_fn get; union { @@ -79,7 +84,7 @@ struct kparam_array parameters. perm sets the visibility in sysfs: 000 means it's not there, read bits mean it's readable, write bits mean it's writable. */ -#define __module_param_call(prefix, name, set, get, arg, perm) \ +#define __module_param_call(prefix, name, set, get, arg, isbool, perm) \ /* Default value instead of permissions? */ \ static int __param_perm_check_##name __attribute__((unused)) = \ BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ @@ -88,10 +93,13 @@ struct kparam_array static struct kernel_param __moduleparam_const __param_##name \ __used \ __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ - = { __param_str_##name, perm, set, get, { arg } } + = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \ + set, get, { arg } } #define module_param_call(name, set, get, arg, perm) \ - __module_param_call(MODULE_PARAM_PREFIX, name, set, get, arg, perm) + __module_param_call(MODULE_PARAM_PREFIX, \ + name, set, get, arg, \ + __same_type(*(arg), bool), perm) /* Helper functions: type is byte, short, ushort, int, uint, long, ulong, charp, bool or invbool, or XXX if you define param_get_XXX, @@ -120,15 +128,16 @@ struct kparam_array #define core_param(name, var, type, perm) \ param_check_##type(name, &(var)); \ __module_param_call("", name, param_set_##type, param_get_##type, \ - &var, perm) + &var, __same_type(var, bool), perm) #endif /* !MODULE */ /* Actually copy string: maxlen param is usually sizeof(string). */ #define module_param_string(name, string, len, perm) \ static const struct kparam_string __param_string_##name \ = { len, string }; \ - module_param_call(name, param_set_copystring, param_get_string, \ - .str = &__param_string_##name, perm); \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + param_set_copystring, param_get_string, \ + .str = &__param_string_##name, 0, perm); \ __MODULE_PARM_TYPE(name, "string") /* Called on module insert or kernel boot */ @@ -138,6 +147,16 @@ extern int parse_args(const char *name, unsigned num, int (*unknown)(char *param, char *val)); +/* Called by module remove. */ +#ifdef CONFIG_SYSFS +extern void destroy_params(const struct kernel_param *params, unsigned num); +#else +static inline void destroy_params(const struct kernel_param *params, + unsigned num) +{ +} +#endif /* !CONFIG_SYSFS */ + /* All the helper functions */ /* The macros to do compile-time type checking stolen from Jakub Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ @@ -176,21 +195,30 @@ extern int param_set_charp(const char *val, struct kernel_param *kp); extern int param_get_charp(char *buffer, struct kernel_param *kp); #define param_check_charp(name, p) __param_check(name, p, char *) +/* For historical reasons "bool" parameters can be (unsigned) "int". */ extern int param_set_bool(const char *val, struct kernel_param *kp); extern int param_get_bool(char *buffer, struct kernel_param *kp); -#define param_check_bool(name, p) __param_check(name, p, int) +#define param_check_bool(name, p) \ + static inline void __check_##name(void) \ + { \ + BUILD_BUG_ON(!__same_type(*(p), bool) && \ + !__same_type(*(p), unsigned int) && \ + !__same_type(*(p), int)); \ + } extern int param_set_invbool(const char *val, struct kernel_param *kp); extern int param_get_invbool(char *buffer, struct kernel_param *kp); -#define param_check_invbool(name, p) __param_check(name, p, int) +#define param_check_invbool(name, p) __param_check(name, p, bool) /* Comma-separated array: *nump is set to number they actually specified. */ #define module_param_array_named(name, array, type, nump, perm) \ static const struct kparam_array __param_arr_##name \ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ sizeof(array[0]), array }; \ - module_param_call(name, param_array_set, param_array_get, \ - .arr = &__param_arr_##name, perm); \ + __module_param_call(MODULE_PARAM_PREFIX, name, \ + param_array_set, param_array_get, \ + .arr = &__param_arr_##name, \ + __same_type(array[0], bool), perm); \ __MODULE_PARM_TYPE(name, "array of " #type) #define module_param_array(name, type, nump, perm) \ diff --git a/include/linux/mount.h b/include/linux/mount.h index cab2a85e2ee..5d527536486 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -27,9 +27,10 @@ struct mnt_namespace; #define MNT_NODIRATIME 0x10 #define MNT_RELATIME 0x20 #define MNT_READONLY 0x40 /* does the user want this to be r/o? */ +#define MNT_STRICTATIME 0x80 #define MNT_SHRINKABLE 0x100 -#define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */ +#define MNT_WRITE_HOLD 0x200 #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ @@ -64,13 +65,22 @@ struct vfsmount { int mnt_expiry_mark; /* true if marked for expiry */ int mnt_pinned; int mnt_ghosts; - /* - * This value is not stable unless all of the mnt_writers[] spinlocks - * are held, and all mnt_writer[]s on this mount have 0 as their ->count - */ - atomic_t __mnt_writers; +#ifdef CONFIG_SMP + int *mnt_writers; +#else + int mnt_writers; +#endif }; +static inline int *get_mnt_writers_ptr(struct vfsmount *mnt) +{ +#ifdef CONFIG_SMP + return mnt->mnt_writers; +#else + return &mnt->mnt_writers; +#endif +} + static inline struct vfsmount *mntget(struct vfsmount *mnt) { if (mnt) @@ -78,7 +88,11 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt) return mnt; } +struct file; /* forward dec */ + extern int mnt_want_write(struct vfsmount *mnt); +extern int mnt_want_write_file(struct file *file); +extern int mnt_clone_write(struct vfsmount *mnt); extern void mnt_drop_write(struct vfsmount *mnt); extern void mntput_no_expire(struct vfsmount *mnt); extern void mnt_pin(struct vfsmount *mnt); diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 5c42821da2d..068a0c9946a 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -11,21 +11,11 @@ */ #ifdef CONFIG_BLOCK -struct mpage_data { - struct bio *bio; - sector_t last_block_in_bio; - get_block_t *get_block; - unsigned use_writepage; -}; - struct writeback_control; -struct bio *mpage_bio_submit(int rw, struct bio *bio); int mpage_readpages(struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_block_t get_block); int mpage_readpage(struct page *page, get_block_t get_block); -int __mpage_writepage(struct page *page, struct writeback_control *wbc, - void *data); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); int mpage_writepage(struct page *page, get_block_t *get_block, diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 8a455694d68..0d45b4e8d36 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -193,6 +193,9 @@ struct vif_device struct mfc_cache { struct mfc_cache *next; /* Next entry on cache line */ +#ifdef CONFIG_NET_NS + struct net *mfc_net; +#endif __be32 mfc_mcastgrp; /* Group the entry belongs to */ __be32 mfc_origin; /* Source of packet */ vifi_t mfc_parent; /* Source interface */ @@ -215,6 +218,18 @@ struct mfc_cache } mfc_un; }; +static inline +struct net *mfc_net(const struct mfc_cache *mfc) +{ + return read_pnet(&mfc->mfc_net); +} + +static inline +void mfc_net_set(struct mfc_cache *mfc, struct net *net) +{ + write_pnet(&mfc->mfc_net, hold_net(net)); +} + #define MFC_STATIC 1 #define MFC_NOTIFY 2 @@ -241,7 +256,8 @@ struct mfc_cache #ifdef __KERNEL__ struct rtmsg; -extern int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); +extern int ipmr_get_route(struct net *net, struct sk_buff *skb, + struct rtmsg *rtm, int nowait); #endif #endif diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 5375faca1f7..43dc97e3218 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -65,7 +65,7 @@ struct mif6ctl { mifi_t mif6c_mifi; /* Index of MIF */ unsigned char mif6c_flags; /* MIFF_ flags */ unsigned char vifc_threshold; /* ttl limit */ - u_short mif6c_pifi; /* the index of the physical IF */ + __u16 mif6c_pifi; /* the index of the physical IF */ unsigned int vifc_rate_limit; /* Rate limiter values (NI) */ }; diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h index e0a9b207920..ce38f1caa5e 100644 --- a/include/linux/msdos_fs.h +++ b/include/linux/msdos_fs.h @@ -1,6 +1,7 @@ #ifndef _LINUX_MSDOS_FS_H #define _LINUX_MSDOS_FS_H +#include <linux/types.h> #include <linux/magic.h> #include <asm/byteorder.h> diff --git a/include/linux/msi.h b/include/linux/msi.h index d2b8a1e8ca1..6991ab5b24d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -20,20 +20,23 @@ extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); struct msi_desc { struct { - __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ + __u8 is_msix : 1; + __u8 multiple: 3; /* log2 number of messages */ __u8 maskbit : 1; /* mask-pending bit supported ? */ - __u8 masked : 1; __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ __u8 pos; /* Location of the msi capability */ - __u32 maskbits_mask; /* mask bits mask */ __u16 entry_nr; /* specific enabled entry */ unsigned default_irq; /* default pre-assigned irq */ - }msi_attrib; + } msi_attrib; + u32 masked; /* mask bits */ unsigned int irq; struct list_head list; - void __iomem *mask_base; + union { + void __iomem *mask_base; + u8 mask_pos; + }; struct pci_dev *dev; /* Last set MSI message */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 3aa5d77c2cd..0f32a9b6ff5 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/uio.h> #include <linux/notifier.h> +#include <linux/device.h> #include <linux/mtd/compatmac.h> #include <mtd/mtd-abi.h> @@ -162,6 +163,20 @@ struct mtd_info { /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); + /* Allow NOMMU mmap() to directly map the device (if not NULL) + * - return the address to which the offset maps + * - return -ENOSYS to indicate refusal to do the mapping + */ + unsigned long (*get_unmapped_area) (struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags); + + /* Backing device capabilities for this device + * - provides mmap capabilities + */ + struct backing_dev_info *backing_dev_info; + int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); @@ -223,6 +238,7 @@ struct mtd_info { void *priv; struct module *owner; + struct device dev; int usecount; /* If the driver is something smart, like UBI, it may need to maintain @@ -233,6 +249,11 @@ struct mtd_info { void (*put_device) (struct mtd_info *mtd); }; +static inline struct mtd_info *dev_to_mtd(struct device *dev) +{ + return dev ? dev_get_drvdata(dev) : NULL; +} + static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) { if (mtd->erasesize_shift) diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index db5b63da2a7..4030ebada49 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -43,8 +43,8 @@ extern void nand_wait_ready(struct mtd_info *mtd); * is supported now. If you add a chip with bigger oobsize/page * adjust this accordingly. */ -#define NAND_MAX_OOBSIZE 64 -#define NAND_MAX_PAGESIZE 2048 +#define NAND_MAX_OOBSIZE 128 +#define NAND_MAX_PAGESIZE 4096 /* * Constants for hardware specific CLE/ALE/NCE function @@ -563,6 +563,7 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, * @options: Option flags, e.g. 16bit buswidth * @ecclayout: ecc layout info structure * @part_probe_types: NULL-terminated array of probe types + * @set_parts: platform specific function to set partitions * @priv: hardware controller specific settings */ struct platform_nand_chip { @@ -574,26 +575,41 @@ struct platform_nand_chip { int chip_delay; unsigned int options; const char **part_probe_types; + void (*set_parts)(uint64_t size, + struct platform_nand_chip *chip); void *priv; }; +/* Keep gcc happy */ +struct platform_device; + /** * struct platform_nand_ctrl - controller level device structure + * @probe: platform specific function to probe/setup hardware + * @remove: platform specific function to remove/teardown hardware * @hwcontrol: platform specific hardware control structure * @dev_ready: platform specific function to read ready/busy pin * @select_chip: platform specific chip select function * @cmd_ctrl: platform specific function for controlling * ALE/CLE/nCE. Also used to write command and address + * @write_buf: platform specific function for write buffer + * @read_buf: platform specific function for read buffer * @priv: private data to transport driver specific settings * * All fields are optional and depend on the hardware driver requirements */ struct platform_nand_ctrl { + int (*probe)(struct platform_device *pdev); + void (*remove)(struct platform_device *pdev); void (*hwcontrol)(struct mtd_info *mtd, int cmd); int (*dev_ready)(struct mtd_info *mtd); void (*select_chip)(struct mtd_info *mtd, int chip); void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + void (*write_buf)(struct mtd_info *mtd, + const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, + uint8_t *buf, int len); void *priv; }; diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 9aa2a9149b5..8ed87337438 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h @@ -17,6 +17,7 @@ #include <linux/mtd/onenand_regs.h> #include <linux/mtd/bbm.h> +#define MAX_DIES 2 #define MAX_BUFFERRAM 2 /* Scan and identify a OneNAND device */ @@ -51,7 +52,12 @@ struct onenand_bufferram { /** * struct onenand_chip - OneNAND Private Flash Chip Data * @base: [BOARDSPECIFIC] address to access OneNAND + * @dies: [INTERN][FLEX-ONENAND] number of dies on chip + * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies + * @diesize: [INTERN][FLEX-ONENAND] Size of the dies * @chipsize: [INTERN] the size of one chip for multichip arrays + * FIXME For Flex-OneNAND, chipsize holds maximum possible + * device size ie when all blocks are considered MLC * @device_id: [INTERN] device ID * @density_mask: chip density, used for DDP devices * @verstion_id: [INTERN] version ID @@ -68,6 +74,8 @@ struct onenand_bufferram { * @command: [REPLACEABLE] hardware specific function for writing * commands to the chip * @wait: [REPLACEABLE] hardware specific function for wait on ready + * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready + * @unlock_all: [REPLACEABLE] hardware specific function for unlock all * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area * @read_word: [REPLACEABLE] hardware specific function for read @@ -92,9 +100,13 @@ struct onenand_bufferram { */ struct onenand_chip { void __iomem *base; + unsigned dies; + unsigned boundary[MAX_DIES]; + loff_t diesize[MAX_DIES]; unsigned int chipsize; unsigned int device_id; unsigned int version_id; + unsigned int technology; unsigned int density_mask; unsigned int options; @@ -108,6 +120,8 @@ struct onenand_chip { int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len); int (*wait)(struct mtd_info *mtd, int state); + int (*bbt_wait)(struct mtd_info *mtd, int state); + void (*unlock_all)(struct mtd_info *mtd); int (*read_bufferram)(struct mtd_info *mtd, int area, unsigned char *buffer, int offset, size_t count); int (*write_bufferram)(struct mtd_info *mtd, int area, @@ -145,6 +159,8 @@ struct onenand_chip { #define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0) #define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1) +#define FLEXONENAND(this) \ + (this->device_id & DEVICE_IS_FLEXONENAND) #define ONENAND_GET_SYS_CFG1(this) \ (this->read_word(this->base + ONENAND_REG_SYS_CFG1)) #define ONENAND_SET_SYS_CFG1(v, this) \ @@ -153,6 +169,9 @@ struct onenand_chip { #define ONENAND_IS_DDP(this) \ (this->device_id & ONENAND_DEVICE_IS_DDP) +#define ONENAND_IS_MLC(this) \ + (this->technology & ONENAND_TECHNOLOGY_IS_MLC) + #ifdef CONFIG_MTD_ONENAND_2X_PROGRAM #define ONENAND_IS_2PLANE(this) \ (this->options & ONENAND_HAS_2PLANE) @@ -169,6 +188,7 @@ struct onenand_chip { #define ONENAND_HAS_CONT_LOCK (0x0001) #define ONENAND_HAS_UNLOCK_ALL (0x0002) #define ONENAND_HAS_2PLANE (0x0004) +#define ONENAND_SKIP_UNLOCK_CHECK (0x0100) #define ONENAND_PAGEBUF_ALLOC (0x1000) #define ONENAND_OOBBUF_ALLOC (0x2000) @@ -176,6 +196,7 @@ struct onenand_chip { * OneNAND Flash Manufacturer ID Codes */ #define ONENAND_MFR_SAMSUNG 0xec +#define ONENAND_MFR_NUMONYX 0x20 /** * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure @@ -189,5 +210,8 @@ struct onenand_manufacturers { int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); +unsigned onenand_block(struct onenand_chip *this, loff_t addr); +loff_t onenand_addr(struct onenand_chip *this, int block); +int flexonenand_region(struct mtd_info *mtd, loff_t addr); #endif /* __LINUX_MTD_ONENAND_H */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index 0c6bbe28f38..86a6bbef646 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -67,6 +67,9 @@ /* * Device ID Register F001h (R) */ +#define DEVICE_IS_FLEXONENAND (1 << 9) +#define FLEXONENAND_PI_MASK (0x3ff) +#define FLEXONENAND_PI_UNLOCK_SHIFT (14) #define ONENAND_DEVICE_DENSITY_MASK (0xf) #define ONENAND_DEVICE_DENSITY_SHIFT (4) #define ONENAND_DEVICE_IS_DDP (1 << 3) @@ -84,6 +87,11 @@ #define ONENAND_VERSION_PROCESS_SHIFT (8) /* + * Technology Register F006h (R) + */ +#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0) + +/* * Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W) */ #define ONENAND_DDP_SHIFT (15) @@ -93,7 +101,8 @@ /* * Start Address 8 F107h (R/W) */ -#define ONENAND_FPA_MASK (0x3f) +/* Note: It's actually 0x3f in case of SLC */ +#define ONENAND_FPA_MASK (0x7f) #define ONENAND_FPA_SHIFT (2) #define ONENAND_FSA_MASK (0x03) @@ -105,7 +114,8 @@ #define ONENAND_BSA_BOOTRAM (0 << 2) #define ONENAND_BSA_DATARAM0 (2 << 2) #define ONENAND_BSA_DATARAM1 (3 << 2) -#define ONENAND_BSC_MASK (0x03) +/* Note: It's actually 0x03 in case of SLC */ +#define ONENAND_BSC_MASK (0x07) /* * Command Register F220h (R/W) @@ -124,9 +134,13 @@ #define ONENAND_CMD_RESET (0xF0) #define ONENAND_CMD_OTP_ACCESS (0x65) #define ONENAND_CMD_READID (0x90) +#define FLEXONENAND_CMD_PI_UPDATE (0x05) +#define FLEXONENAND_CMD_PI_ACCESS (0x66) +#define FLEXONENAND_CMD_RECOVER_LSB (0x05) /* NOTE: Those are not *REAL* commands */ #define ONENAND_CMD_BUFFERRAM (0x1978) +#define FLEXONENAND_CMD_READ_PI (0x1985) /* * System Configuration 1 Register F221h (R, R/W) @@ -192,10 +206,12 @@ #define ONENAND_ECC_1BIT_ALL (0x5555) #define ONENAND_ECC_2BIT (1 << 1) #define ONENAND_ECC_2BIT_ALL (0xAAAA) +#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) /* * One-Time Programmable (OTP) */ +#define FLEXONENAND_OTP_LOCK_OFFSET (2048) #define ONENAND_OTP_LOCK_OFFSET (14) #endif /* __ONENAND_REG_H */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index a45dd831b3f..b70313d33ff 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -40,7 +40,6 @@ struct mtd_partition { uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/ - struct mtd_info **mtdp; /* pointer to store the MTD object */ }; #define MTDPART_OFS_NXTBLK (-2) @@ -48,6 +47,8 @@ struct mtd_partition { #define MTDPART_SIZ_FULL (0) +struct mtd_info; + int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); int del_mtd_partitions(struct mtd_info *); @@ -76,4 +77,16 @@ int __devinit of_mtd_parse_partitions(struct device *dev, struct device_node *node, struct mtd_partition **pparts); +#ifdef CONFIG_MTD_PARTITIONS +static inline int mtd_has_partitions(void) { return 1; } +#else +static inline int mtd_has_partitions(void) { return 0; } +#endif + +#ifdef CONFIG_MTD_CMDLINE_PARTS +static inline int mtd_has_cmdlinepart(void) { return 1; } +#else +static inline int mtd_has_cmdlinepart(void) { return 0; } +#endif + #endif diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index 6316fafe5c2..6913b71d9ab 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -132,6 +132,39 @@ struct ubi_device_info { dev_t cdev; }; +/* + * enum - volume notification types. + * @UBI_VOLUME_ADDED: volume has been added + * @UBI_VOLUME_REMOVED: start volume volume + * @UBI_VOLUME_RESIZED: volume size has been re-sized + * @UBI_VOLUME_RENAMED: volume name has been re-named + * @UBI_VOLUME_UPDATED: volume name has been updated + * + * These constants define which type of event has happened when a volume + * notification function is invoked. + */ +enum { + UBI_VOLUME_ADDED, + UBI_VOLUME_REMOVED, + UBI_VOLUME_RESIZED, + UBI_VOLUME_RENAMED, + UBI_VOLUME_UPDATED, +}; + +/* + * struct ubi_notification - UBI notification description structure. + * @di: UBI device description object + * @vi: UBI volume description object + * + * UBI notifiers are called with a pointer to an object of this type. The + * object describes the notification. Namely, it provides a description of the + * UBI device and UBI volume the notification informs about. + */ +struct ubi_notification { + struct ubi_device_info di; + struct ubi_volume_info vi; +}; + /* UBI descriptor given to users when they open UBI volumes */ struct ubi_volume_desc; @@ -141,6 +174,10 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc, struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, int mode); +int ubi_register_volume_notifier(struct notifier_block *nb, + int ignore_existing); +int ubi_unregister_volume_notifier(struct notifier_block *nb); + void ubi_close_volume(struct ubi_volume_desc *desc); int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, int len, int check); diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 7a0e5c4f807..878cab4f5fc 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -50,8 +50,10 @@ struct mutex { atomic_t count; spinlock_t wait_lock; struct list_head wait_list; -#ifdef CONFIG_DEBUG_MUTEXES +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) struct thread_info *owner; +#endif +#ifdef CONFIG_DEBUG_MUTEXES const char *name; void *magic; #endif @@ -68,7 +70,6 @@ struct mutex_waiter { struct list_head list; struct task_struct *task; #ifdef CONFIG_DEBUG_MUTEXES - struct mutex *lock; void *magic; #endif }; @@ -149,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); */ extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); +extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); #endif diff --git a/include/linux/namei.h b/include/linux/namei.h index fc2e0357987..d870ae2faed 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -18,6 +18,7 @@ enum { MAX_NESTED_LINKS = 8 }; struct nameidata { struct path path; struct qstr last; + struct path root; unsigned int flags; int last_type; unsigned depth; @@ -69,7 +70,6 @@ extern int path_lookup(const char *, unsigned, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct nameidata *); -extern int path_lookup_open(int dfd, const char *name, unsigned lookup_flags, struct nameidata *, int open_flags); extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)); extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); @@ -78,8 +78,8 @@ extern void release_open_intent(struct nameidata *); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_noperm(const char *, struct dentry *); -extern int follow_down(struct vfsmount **, struct dentry **); -extern int follow_up(struct vfsmount **, struct dentry **); +extern int follow_down(struct path *); +extern int follow_up(struct path *); extern struct dentry *lock_rename(struct dentry *, struct dentry *); extern void unlock_rename(struct dentry *, struct dentry *); diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index f69e66d151c..30b06c89394 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h @@ -204,7 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *); /* linux/fs/ncpfs/dir.c */ extern const struct inode_operations ncp_dir_inode_operations; extern const struct file_operations ncp_dir_operations; -extern struct dentry_operations ncp_root_dentry_operations; +extern const struct dentry_operations ncp_root_dentry_operations; int ncp_conn_logged_in(struct super_block *); int ncp_date_dos2unix(__le16 time, __le16 date); void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); diff --git a/include/linux/ncp_no.h b/include/linux/ncp_no.h index f56a696a7cc..cddaa48fb18 100644 --- a/include/linux/ncp_no.h +++ b/include/linux/ncp_no.h @@ -2,18 +2,18 @@ #define _NCP_NO /* these define the attribute byte as seen by NCP */ -#define aRONLY (__constant_cpu_to_le32(1)) -#define aHIDDEN (__constant_cpu_to_le32(2)) -#define aSYSTEM (__constant_cpu_to_le32(4)) -#define aEXECUTE (__constant_cpu_to_le32(8)) -#define aDIR (__constant_cpu_to_le32(0x10)) -#define aARCH (__constant_cpu_to_le32(0x20)) -#define aSHARED (__constant_cpu_to_le32(0x80)) -#define aDONTSUBALLOCATE (__constant_cpu_to_le32(1L<<11)) -#define aTRANSACTIONAL (__constant_cpu_to_le32(1L<<12)) -#define aPURGE (__constant_cpu_to_le32(1L<<16)) -#define aRENAMEINHIBIT (__constant_cpu_to_le32(1L<<17)) -#define aDELETEINHIBIT (__constant_cpu_to_le32(1L<<18)) -#define aDONTCOMPRESS (__constant_cpu_to_le32(1L<<27)) +#define aRONLY (__cpu_to_le32(1)) +#define aHIDDEN (__cpu_to_le32(2)) +#define aSYSTEM (__cpu_to_le32(4)) +#define aEXECUTE (__cpu_to_le32(8)) +#define aDIR (__cpu_to_le32(0x10)) +#define aARCH (__cpu_to_le32(0x20)) +#define aSHARED (__cpu_to_le32(0x80)) +#define aDONTSUBALLOCATE (__cpu_to_le32(1L<<11)) +#define aTRANSACTIONAL (__cpu_to_le32(1L<<12)) +#define aPURGE (__cpu_to_le32(1L<<16)) +#define aRENAMEINHIBIT (__cpu_to_le32(1L<<17)) +#define aDELETEINHIBIT (__cpu_to_le32(1L<<18)) +#define aDONTCOMPRESS (__cpu_to_le32(1L<<27)) #endif /* _NCP_NO */ diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h index bd3bbf668cd..12c9de13845 100644 --- a/include/linux/neighbour.h +++ b/include/linux/neighbour.h @@ -1,6 +1,7 @@ #ifndef __LINUX_NEIGHBOUR_H #define __LINUX_NEIGHBOUR_H +#include <linux/types.h> #include <linux/netlink.h> struct ndmsg @@ -30,6 +31,7 @@ enum * Neighbor Cache Entry Flags */ +#define NTF_USE 0x01 #define NTF_PROXY 0x08 /* == ATF_PUBL */ #define NTF_ROUTER 0x80 diff --git a/include/linux/net.h b/include/linux/net.h index 4515efae4c3..9040a10584f 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -57,6 +57,7 @@ typedef enum { #include <linux/random.h> #include <linux/wait.h> #include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */ +#include <linux/kmemcheck.h> struct poll_table_struct; struct pipe_inode_info; @@ -127,13 +128,21 @@ enum sock_shutdown_cmd { */ struct socket { socket_state state; + + kmemcheck_bitfield_begin(type); short type; + kmemcheck_bitfield_end(type); + unsigned long flags; - const struct proto_ops *ops; + /* + * Please keep fasync_list & wait fields in the same cache line + */ struct fasync_struct *fasync_list; + wait_queue_head_t wait; + struct file *file; struct sock *sk; - wait_queue_head_t wait; + const struct proto_ops *ops; }; struct vm_area_struct; diff --git a/include/linux/net_dropmon.h b/include/linux/net_dropmon.h new file mode 100644 index 00000000000..2a739462cae --- /dev/null +++ b/include/linux/net_dropmon.h @@ -0,0 +1,64 @@ +#ifndef __NET_DROPMON_H +#define __NET_DROPMON_H + +#include <linux/types.h> +#include <linux/netlink.h> + +struct net_dm_drop_point { + __u8 pc[8]; + __u32 count; +}; + +#define is_drop_point_hw(x) do {\ + int ____i, ____j;\ + for (____i = 0; ____i < 8; i ____i++)\ + ____j |= x[____i];\ + ____j;\ +} while (0) + +#define NET_DM_CFG_VERSION 0 +#define NET_DM_CFG_ALERT_COUNT 1 +#define NET_DM_CFG_ALERT_DELAY 2 +#define NET_DM_CFG_MAX 3 + +struct net_dm_config_entry { + __u32 type; + __u64 data __attribute__((aligned(8))); +}; + +struct net_dm_config_msg { + __u32 entries; + struct net_dm_config_entry options[0]; +}; + +struct net_dm_alert_msg { + __u32 entries; + struct net_dm_drop_point points[0]; +}; + +struct net_dm_user_msg { + union { + struct net_dm_config_msg user; + struct net_dm_alert_msg alert; + } u; +}; + + +/* These are the netlink message types for this protocol */ + +enum { + NET_DM_CMD_UNSPEC = 0, + NET_DM_CMD_ALERT, + NET_DM_CMD_CONFIG, + NET_DM_CMD_START, + NET_DM_CMD_STOP, + _NET_DM_CMD_MAX, +}; + +#define NET_DM_CMD_MAX (_NET_DM_CMD_MAX - 1) + +/* + * Our group identifiers + */ +#define NET_DM_GRP_ALERT 1 +#endif diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h new file mode 100644 index 00000000000..a3b8546354a --- /dev/null +++ b/include/linux/net_tstamp.h @@ -0,0 +1,104 @@ +/* + * Userspace API for hardware time stamping of network packets + * + * Copyright (C) 2008,2009 Intel Corporation + * Author: Patrick Ohly <patrick.ohly@intel.com> + * + */ + +#ifndef _NET_TIMESTAMPING_H +#define _NET_TIMESTAMPING_H + +#include <linux/socket.h> /* for SO_TIMESTAMPING */ + +/* SO_TIMESTAMPING gets an integer bit field comprised of these values */ +enum { + SOF_TIMESTAMPING_TX_HARDWARE = (1<<0), + SOF_TIMESTAMPING_TX_SOFTWARE = (1<<1), + SOF_TIMESTAMPING_RX_HARDWARE = (1<<2), + SOF_TIMESTAMPING_RX_SOFTWARE = (1<<3), + SOF_TIMESTAMPING_SOFTWARE = (1<<4), + SOF_TIMESTAMPING_SYS_HARDWARE = (1<<5), + SOF_TIMESTAMPING_RAW_HARDWARE = (1<<6), + SOF_TIMESTAMPING_MASK = + (SOF_TIMESTAMPING_RAW_HARDWARE - 1) | + SOF_TIMESTAMPING_RAW_HARDWARE +}; + +/** + * struct hwtstamp_config - %SIOCSHWTSTAMP parameter + * + * @flags: no flags defined right now, must be zero + * @tx_type: one of HWTSTAMP_TX_* + * @rx_type: one of one of HWTSTAMP_FILTER_* + * + * %SIOCSHWTSTAMP expects a &struct ifreq with a ifr_data pointer to + * this structure. dev_ifsioc() in the kernel takes care of the + * translation between 32 bit userspace and 64 bit kernel. The + * structure is intentionally chosen so that it has the same layout on + * 32 and 64 bit systems, don't break this! + */ +struct hwtstamp_config { + int flags; + int tx_type; + int rx_filter; +}; + +/* possible values for hwtstamp_config->tx_type */ +enum { + /* + * No outgoing packet will need hardware time stamping; + * should a packet arrive which asks for it, no hardware + * time stamping will be done. + */ + HWTSTAMP_TX_OFF, + + /* + * Enables hardware time stamping for outgoing packets; + * the sender of the packet decides which are to be + * time stamped by setting %SOF_TIMESTAMPING_TX_SOFTWARE + * before sending the packet. + */ + HWTSTAMP_TX_ON, +}; + +/* possible values for hwtstamp_config->rx_filter */ +enum { + /* time stamp no incoming packet at all */ + HWTSTAMP_FILTER_NONE, + + /* time stamp any incoming packet */ + HWTSTAMP_FILTER_ALL, + + /* return value: time stamp all packets requested plus some others */ + HWTSTAMP_FILTER_SOME, + + /* PTP v1, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V1_L4_EVENT, + /* PTP v1, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V1_L4_SYNC, + /* PTP v1, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ, + /* PTP v2, UDP, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L4_EVENT, + /* PTP v2, UDP, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L4_SYNC, + /* PTP v2, UDP, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ, + + /* 802.AS1, Ethernet, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_L2_EVENT, + /* 802.AS1, Ethernet, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_L2_SYNC, + /* 802.AS1, Ethernet, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ, + + /* PTP v2/802.AS1, any layer, any kind of event packet */ + HWTSTAMP_FILTER_PTP_V2_EVENT, + /* PTP v2/802.AS1, any layer, Sync packet */ + HWTSTAMP_FILTER_PTP_V2_SYNC, + /* PTP v2/802.AS1, any layer, Delay_req packet */ + HWTSTAMP_FILTER_PTP_V2_DELAY_REQ, +}; + +#endif /* _NET_TIMESTAMPING_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ec54785d34f..f46db6c7a73 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -32,15 +32,18 @@ #ifdef __KERNEL__ #include <linux/timer.h> #include <linux/delay.h> +#include <linux/mm.h> #include <asm/atomic.h> #include <asm/cache.h> #include <asm/byteorder.h> #include <linux/device.h> #include <linux/percpu.h> +#include <linux/rculist.h> #include <linux/dmaengine.h> #include <linux/workqueue.h> +#include <linux/ethtool.h> #include <net/net_namespace.h> #include <net/dsa.h> #ifdef CONFIG_DCB @@ -48,7 +51,6 @@ #endif struct vlan_group; -struct ethtool_ops; struct netpoll_info; /* 802.11 specific */ struct wireless_dev; @@ -70,10 +72,6 @@ struct wireless_dev; /* Backlog congestion levels */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_DROP 1 /* packet dropped */ -#define NET_RX_CN_LOW 2 /* storm alert, just in case */ -#define NET_RX_CN_MOD 3 /* Storm on its way! */ -#define NET_RX_CN_HIGH 4 /* The storm is here */ -#define NET_RX_BAD 5 /* packet dropped due to kernel error */ /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It * indicates that the device will soon be dropping packets, or already drops @@ -81,29 +79,31 @@ struct wireless_dev; #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) +/* Driver transmit return codes */ +enum netdev_tx { + NETDEV_TX_OK = 0, /* driver took care of packet */ + NETDEV_TX_BUSY, /* driver tx path was busy*/ + NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */ +}; +typedef enum netdev_tx netdev_tx_t; + #endif #define MAX_ADDR_LEN 32 /* Largest hardware address length */ -/* Driver transmit return codes */ -#define NETDEV_TX_OK 0 /* driver took care of packet */ -#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ -#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ - #ifdef __KERNEL__ - /* * Compute the worst case header length according to the protocols * used. */ - + #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) # if defined(CONFIG_MAC80211_MESH) # define LL_MAX_HEADER 128 # else # define LL_MAX_HEADER 96 # endif -#elif defined(CONFIG_TR) +#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) # define LL_MAX_HEADER 48 #else # define LL_MAX_HEADER 32 @@ -124,7 +124,7 @@ struct wireless_dev; * Network device statistics. Akin to the 2.0 ether stats but * with byte counters. */ - + struct net_device_stats { unsigned long rx_packets; /* total packets received */ @@ -209,6 +209,24 @@ struct dev_addr_list #define dmi_users da_users #define dmi_gusers da_gusers +struct netdev_hw_addr { + struct list_head list; + unsigned char addr[MAX_ADDR_LEN]; + unsigned char type; +#define NETDEV_HW_ADDR_T_LAN 1 +#define NETDEV_HW_ADDR_T_SAN 2 +#define NETDEV_HW_ADDR_T_SLAVE 3 +#define NETDEV_HW_ADDR_T_UNICAST 4 + int refcount; + bool synced; + struct rcu_head rcu_head; +}; + +struct netdev_hw_addr_list { + struct list_head list; + int count; +}; + struct hh_cache { struct hh_cache *hh_next; /* Next entry */ @@ -285,7 +303,7 @@ enum netdev_state_t /* * This structure holds at boot time configured netdevice settings. They - * are then used in the device probing. + * are then used in the device probing. */ struct netdev_boot_setup { char name[IFNAMSIZ]; @@ -314,6 +332,9 @@ struct napi_struct { spinlock_t poll_lock; int poll_owner; #endif + + unsigned int gro_count; + struct net_device *dev; struct list_head dev_list; struct sk_buff *gro_list; @@ -327,6 +348,14 @@ enum NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ }; +enum { + GRO_MERGED, + GRO_MERGED_FREE, + GRO_HELD, + GRO_NORMAL, + GRO_DROP, +}; + extern void __napi_schedule(struct napi_struct *n); static inline int napi_disable_pending(struct napi_struct *n) @@ -435,12 +464,25 @@ enum netdev_queue_state_t }; struct netdev_queue { +/* + * read mostly part + */ struct net_device *dev; struct Qdisc *qdisc; unsigned long state; - spinlock_t _xmit_lock; - int xmit_lock_owner; struct Qdisc *qdisc_sleeping; +/* + * write mostly part + */ + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; + int xmit_lock_owner; + /* + * please use this field instead of dev->trans_start + */ + unsigned long trans_start; + unsigned long tx_bytes; + unsigned long tx_packets; + unsigned long tx_dropped; } ____cacheline_aligned_in_smp; @@ -467,9 +509,11 @@ struct netdev_queue { * This function is called when network device transistions to the down * state. * - * int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev); + * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, + * struct net_device *dev); * Called when a packet needs to be transmitted. - * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED, + * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. + * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); @@ -488,7 +532,7 @@ struct netdev_queue { * * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); * This function is called when the Media Access Control address - * needs to be changed. If not this interface is not defined, the + * needs to be changed. If this interface is not defined, the * mac address can not be changed. * * int (*ndo_validate_addr)(struct net_device *dev); @@ -540,7 +584,7 @@ struct net_device_ops { void (*ndo_uninit)(struct net_device *dev); int (*ndo_open)(struct net_device *dev); int (*ndo_stop)(struct net_device *dev); - int (*ndo_start_xmit) (struct sk_buff *skb, + netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev); u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); @@ -582,6 +626,16 @@ struct net_device_ops { #define HAVE_NETDEV_POLL void (*ndo_poll_controller)(struct net_device *dev); #endif +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + int (*ndo_fcoe_enable)(struct net_device *dev); + int (*ndo_fcoe_disable)(struct net_device *dev); + int (*ndo_fcoe_ddp_setup)(struct net_device *dev, + u16 xid, + struct scatterlist *sgl, + unsigned int sgc); + int (*ndo_fcoe_ddp_done)(struct net_device *dev, + u16 xid); +#endif }; /* @@ -650,14 +704,20 @@ struct net_device #define NETIF_F_GRO 16384 /* Generic receive offload */ #define NETIF_F_LRO 32768 /* large receive offload */ +/* the GSO_MASK reserves bits 16 through 23 */ +#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ +#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ +#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ + /* Segmentation offload features */ #define NETIF_F_GSO_SHIFT 16 -#define NETIF_F_GSO_MASK 0xffff0000 +#define NETIF_F_GSO_MASK 0x00ff0000 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) +#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) /* List of features with software fallbacks. */ #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) @@ -724,10 +784,10 @@ struct net_device unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ - spinlock_t addr_list_lock; - struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ - int uc_count; /* Number of installed ucasts */ + struct netdev_hw_addr_list uc; /* Secondary unicast + mac addresses */ int uc_promisc; + spinlock_t addr_list_lock; struct dev_addr_list *mc_list; /* Multicast mac addresses */ int mc_count; /* Number of installed mcasts */ unsigned int promiscuity; @@ -740,7 +800,7 @@ struct net_device void *dsa_ptr; /* dsa specific data */ #endif void *atalk_ptr; /* AppleTalk link */ - void *ip_ptr; /* IPv4 specific data */ + void *ip_ptr; /* IPv4 specific data */ void *dn_ptr; /* DECnet specific data */ void *ip6_ptr; /* IPv6 specific data */ void *ec_ptr; /* Econet specific data */ @@ -753,8 +813,12 @@ struct net_device */ unsigned long last_rx; /* Time of last Rx */ /* Interface address info used in eth_type_trans() */ - unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast - because most packets are unicast) */ + unsigned char *dev_addr; /* hw address, (before bcast + because most packets are + unicast) */ + + struct netdev_hw_addr_list dev_addrs; /* list of device + hw addresses */ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ @@ -768,12 +832,20 @@ struct net_device /* Number of TX queues currently active in device */ unsigned int real_num_tx_queues; + /* root qdisc from userspace point of view */ + struct Qdisc *qdisc; + unsigned long tx_queue_len; /* Max frames per queue allowed */ spinlock_t tx_global_lock; /* * One part is mostly used on xmit path (device) */ /* These may be needed for future network-power-down code. */ + + /* + * trans_start here is expensive for high speed devices on SMP, + * please use netdev_queue->trans_start instead. + */ unsigned long trans_start; /* Time (in jiffies) of last Tx */ int watchdog_timeo; /* used by dev_watchdog() */ @@ -840,48 +912,14 @@ struct net_device struct dcbnl_rtnl_ops *dcbnl_ops; #endif -#ifdef CONFIG_COMPAT_NET_DEV_OPS - struct { - int (*init)(struct net_device *dev); - void (*uninit)(struct net_device *dev); - int (*open)(struct net_device *dev); - int (*stop)(struct net_device *dev); - int (*hard_start_xmit) (struct sk_buff *skb, - struct net_device *dev); - u16 (*select_queue)(struct net_device *dev, - struct sk_buff *skb); - void (*change_rx_flags)(struct net_device *dev, - int flags); - void (*set_rx_mode)(struct net_device *dev); - void (*set_multicast_list)(struct net_device *dev); - int (*set_mac_address)(struct net_device *dev, - void *addr); - int (*validate_addr)(struct net_device *dev); - int (*do_ioctl)(struct net_device *dev, - struct ifreq *ifr, int cmd); - int (*set_config)(struct net_device *dev, - struct ifmap *map); - int (*change_mtu)(struct net_device *dev, int new_mtu); - int (*neigh_setup)(struct net_device *dev, - struct neigh_parms *); - void (*tx_timeout) (struct net_device *dev); - struct net_device_stats* (*get_stats)(struct net_device *dev); - void (*vlan_rx_register)(struct net_device *dev, - struct vlan_group *grp); - void (*vlan_rx_add_vid)(struct net_device *dev, - unsigned short vid); - void (*vlan_rx_kill_vid)(struct net_device *dev, - unsigned short vid); -#ifdef CONFIG_NET_POLL_CONTROLLER - void (*poll_controller)(struct net_device *dev); -#endif - }; +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + /* max exchange id for FCoE LRO by ddp */ + unsigned int fcoe_ddp_xid; #endif }; #define to_net_dev(d) container_of(d, struct net_device, dev) #define NETDEV_ALIGN 32 -#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) static inline struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, @@ -952,9 +990,7 @@ static inline bool netdev_uses_trailer_tags(struct net_device *dev) */ static inline void *netdev_priv(const struct net_device *dev) { - return (char *)dev + ((sizeof(struct net_device) - + NETDEV_ALIGN_CONST) - & ~NETDEV_ALIGN_CONST); + return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); } /* Set the sysfs physical device reference for the network logical device @@ -962,6 +998,12 @@ static inline void *netdev_priv(const struct net_device *dev) */ #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) +/* Set the sysfs device type for the network logical device to allow + * fin grained indentification of different network device types. For + * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. + */ +#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) + /** * netif_napi_add - initialize a napi context * @dev: network device @@ -984,6 +1026,15 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, void netif_napi_del(struct napi_struct *napi); struct napi_gro_cb { + /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ + void *frag0; + + /* Length of frag0. */ + unsigned int frag0_len; + + /* This indicates where we are processing relative to skb->data. */ + int data_offset; + /* This is non-zero if the packet may be of the same flow. */ int same_flow; @@ -1016,14 +1067,6 @@ struct packet_type { struct list_head list; }; -struct napi_gro_fraginfo { - skb_frag_t frags[MAX_SKB_FRAGS]; - unsigned int nr_frags; - unsigned int ip_summed; - unsigned int len; - __wsum csum; -}; - #include <linux/interrupt.h> #include <linux/notifier.h> @@ -1079,6 +1122,7 @@ extern void synchronize_net(void); extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); extern int init_dummy_netdev(struct net_device *dev); +extern void netdev_resync_ops(struct net_device *dev); extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); extern struct net_device *dev_get_by_index(struct net *net, int ifindex); @@ -1087,6 +1131,54 @@ extern int dev_restart(struct net_device *dev); #ifdef CONFIG_NETPOLL_TRAP extern int netpoll_trap(void); #endif +extern int skb_gro_receive(struct sk_buff **head, + struct sk_buff *skb); +extern void skb_gro_reset_offset(struct sk_buff *skb); + +static inline unsigned int skb_gro_offset(const struct sk_buff *skb) +{ + return NAPI_GRO_CB(skb)->data_offset; +} + +static inline unsigned int skb_gro_len(const struct sk_buff *skb) +{ + return skb->len - NAPI_GRO_CB(skb)->data_offset; +} + +static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) +{ + NAPI_GRO_CB(skb)->data_offset += len; +} + +static inline void *skb_gro_header_fast(struct sk_buff *skb, + unsigned int offset) +{ + return NAPI_GRO_CB(skb)->frag0 + offset; +} + +static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) +{ + return NAPI_GRO_CB(skb)->frag0_len < hlen; +} + +static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, + unsigned int offset) +{ + NAPI_GRO_CB(skb)->frag0 = NULL; + NAPI_GRO_CB(skb)->frag0_len = 0; + return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; +} + +static inline void *skb_gro_mac_header(struct sk_buff *skb) +{ + return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); +} + +static inline void *skb_gro_network_header(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + + skb_network_offset(skb); +} static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, @@ -1180,7 +1272,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) { #ifdef CONFIG_NETPOLL_TRAP if (netpoll_trap()) { - clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); + netif_tx_start_queue(dev_queue); return; } #endif @@ -1286,7 +1378,8 @@ static inline int netif_running(const struct net_device *dev) static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) { struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); - clear_bit(__QUEUE_STATE_XOFF, &txq->state); + + netif_tx_start_queue(txq); } /** @@ -1303,7 +1396,7 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) if (netpoll_trap()) return; #endif - set_bit(__QUEUE_STATE_XOFF, &txq->state); + netif_tx_stop_queue(txq); } /** @@ -1317,7 +1410,8 @@ static inline int __netif_subqueue_stopped(const struct net_device *dev, u16 queue_index) { struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); - return test_bit(__QUEUE_STATE_XOFF, &txq->state); + + return netif_tx_queue_stopped(txq); } static inline int netif_subqueue_stopped(const struct net_device *dev, @@ -1375,14 +1469,23 @@ extern int netif_receive_skb(struct sk_buff *skb); extern void napi_gro_flush(struct napi_struct *napi); extern int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb); +extern int napi_skb_finish(int ret, struct sk_buff *skb); extern int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); extern void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb); -extern struct sk_buff * napi_fraginfo_skb(struct napi_struct *napi, - struct napi_gro_fraginfo *info); -extern int napi_gro_frags(struct napi_struct *napi, - struct napi_gro_fraginfo *info); +extern struct sk_buff * napi_get_frags(struct napi_struct *napi); +extern int napi_frags_finish(struct napi_struct *napi, + struct sk_buff *skb, int ret); +extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); +extern int napi_gro_frags(struct napi_struct *napi); + +static inline void napi_free_frags(struct napi_struct *napi) +{ + kfree_skb(napi->skb); + napi->skb = NULL; +} + extern void netif_nit_deliver(struct sk_buff *skb); extern int dev_valid_name(const char *name); extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); @@ -1449,6 +1552,8 @@ static inline int netif_carrier_ok(const struct net_device *dev) return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); } +extern unsigned long dev_trans_start(struct net_device *dev); + extern void __netdev_watchdog_up(struct net_device *dev); extern void netif_carrier_on(struct net_device *dev); @@ -1574,56 +1679,6 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) return (1 << debug_value) - 1; } -/* Test if receive needs to be scheduled but only if up */ -static inline int netif_rx_schedule_prep(struct napi_struct *napi) -{ - return napi_schedule_prep(napi); -} - -/* Add interface to tail of rx poll list. This assumes that _prep has - * already been called and returned 1. - */ -static inline void __netif_rx_schedule(struct napi_struct *napi) -{ - __napi_schedule(napi); -} - -/* Try to reschedule poll. Called by irq handler. */ - -static inline void netif_rx_schedule(struct napi_struct *napi) -{ - if (netif_rx_schedule_prep(napi)) - __netif_rx_schedule(napi); -} - -/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ -static inline int netif_rx_reschedule(struct napi_struct *napi) -{ - if (napi_schedule_prep(napi)) { - __netif_rx_schedule(napi); - return 1; - } - return 0; -} - -/* same as netif_rx_complete, except that local_irq_save(flags) - * has already been issued - */ -static inline void __netif_rx_complete(struct napi_struct *napi) -{ - __napi_complete(napi); -} - -/* Remove interface from poll list: it must be in the poll list - * on current cpu. This primitive is called by dev->poll(), when - * it completes the work. The device cannot be out of poll list at this - * moment, it is BUG(). - */ -static inline void netif_rx_complete(struct napi_struct *napi) -{ - napi_complete(napi); -} - static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); @@ -1656,6 +1711,12 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) spin_unlock_bh(&txq->_xmit_lock); } +static inline void txq_trans_update(struct netdev_queue *txq) +{ + if (txq->xmit_lock_owner != -1) + txq->trans_start = jiffies; +} + /** * netif_tx_lock - grab network device transmit lock * @dev: network device @@ -1702,8 +1763,7 @@ static inline void netif_tx_unlock(struct net_device *dev) * force a schedule. */ clear_bit(__QUEUE_STATE_FROZEN, &txq->state); - if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) - __netif_schedule(txq->qdisc); + netif_schedule_queue(txq); } spin_unlock(&dev->tx_global_lock); } @@ -1763,6 +1823,13 @@ static inline void netif_addr_unlock_bh(struct net_device *dev) spin_unlock_bh(&dev->addr_list_lock); } +/* + * dev_addrs walker. Should be used only for read access. Call with + * rcu_read_lock held. + */ +#define for_each_dev_addr(dev, ha) \ + list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) + /* These functions live elsewhere (drivers/net/net_init.c, but related) */ extern void ether_setup(struct net_device *dev); @@ -1775,11 +1842,24 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, alloc_netdev_mq(sizeof_priv, name, setup, 1) extern int register_netdev(struct net_device *dev); extern void unregister_netdev(struct net_device *dev); + +/* Functions used for device addresses handling */ +extern int dev_addr_add(struct net_device *dev, unsigned char *addr, + unsigned char addr_type); +extern int dev_addr_del(struct net_device *dev, unsigned char *addr, + unsigned char addr_type); +extern int dev_addr_add_multiple(struct net_device *to_dev, + struct net_device *from_dev, + unsigned char addr_type); +extern int dev_addr_del_multiple(struct net_device *to_dev, + struct net_device *from_dev, + unsigned char addr_type); + /* Functions used for secondary unicast and multicast support */ extern void dev_set_rx_mode(struct net_device *dev); extern void __dev_set_rx_mode(struct net_device *dev); -extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen); -extern int dev_unicast_add(struct net_device *dev, void *addr, int alen); +extern int dev_unicast_delete(struct net_device *dev, void *addr); +extern int dev_unicast_add(struct net_device *dev, void *addr); extern int dev_unicast_sync(struct net_device *to, struct net_device *from); extern void dev_unicast_unsync(struct net_device *to, struct net_device *from); extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); @@ -1793,7 +1873,8 @@ extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct extern int dev_set_promiscuity(struct net_device *dev, int inc); extern int dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); -extern void netdev_bonding_change(struct net_device *dev); +extern void netdev_bonding_change(struct net_device *dev, + unsigned long event); extern void netdev_features_change(struct net_device *dev); /* Load a device via the kmod */ extern void dev_load(struct net *net, const char *name); @@ -1841,15 +1922,14 @@ static inline int net_gso_ok(int features, int gso_type) static inline int skb_gso_ok(struct sk_buff *skb, int features) { - return net_gso_ok(features, skb_shinfo(skb)->gso_type); + return net_gso_ok(features, skb_shinfo(skb)->gso_type) && + (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST)); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { return skb_is_gso(skb) && (!skb_gso_ok(skb, dev->features) || - (skb_shinfo(skb)->frag_list && - !(dev->features & NETIF_F_FRAGLIST)) || unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } @@ -1859,6 +1939,16 @@ static inline void netif_set_gso_max_size(struct net_device *dev, dev->gso_max_size = size; } +static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, + struct net_device *master) +{ + if (skb->pkt_type == PACKET_HOST) { + u16 *dest = (u16 *) eth_hdr(skb)->h_dest; + + memcpy(dest, master->dev_addr, ETH_ALEN); + } +} + /* On bonding slaves other than the currently active slave, suppress * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and * ARP on active-backup slaves with arp_validate enabled. @@ -1872,9 +1962,17 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) if (master->priv_flags & IFF_MASTER_ARPMON) dev->last_rx = jiffies; + if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { + /* Do address unmangle. The local destination address + * will be always the one master has. Provides the right + * functionality in a bridge. + */ + skb_bond_set_mac_by_master(skb, master); + } + if (dev->priv_flags & IFF_SLAVE_INACTIVE) { if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && - skb->protocol == __constant_htons(ETH_P_ARP)) + skb->protocol == __cpu_to_be16(ETH_P_ARP)) return 0; if (master->priv_flags & IFF_MASTER_ALB) { @@ -1883,7 +1981,7 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) return 0; } if (master->priv_flags & IFF_MASTER_8023AD && - skb->protocol == __constant_htons(ETH_P_SLOW)) + skb->protocol == __cpu_to_be16(ETH_P_SLOW)) return 0; return 1; @@ -1893,6 +1991,28 @@ static inline int skb_bond_should_drop(struct sk_buff *skb) } extern struct pernet_operations __net_initdata loopback_net_ops; + +static inline int dev_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings) + return -EOPNOTSUPP; + return dev->ethtool_ops->get_settings(dev, cmd); +} + +static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) +{ + if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) + return 0; + return dev->ethtool_ops->get_rx_csum(dev); +} + +static inline u32 dev_ethtool_get_flags(struct net_device *dev) +{ + if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) + return 0; + return dev->ethtool_ops->get_flags(dev); +} #endif /* __KERNEL__ */ -#endif /* _LINUX_DEV_H */ +#endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 5a8af875bce..2aea50399c0 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -7,16 +7,21 @@ header-y += xt_CLASSIFY.h header-y += xt_CONNMARK.h header-y += xt_CONNSECMARK.h header-y += xt_DSCP.h +header-y += xt_LED.h header-y += xt_MARK.h header-y += xt_NFLOG.h header-y += xt_NFQUEUE.h header-y += xt_RATEEST.h header-y += xt_SECMARK.h header-y += xt_TCPMSS.h +header-y += xt_TCPOPTSTRIP.h +header-y += xt_TPROXY.h header-y += xt_comment.h header-y += xt_connbytes.h +header-y += xt_connlimit.h header-y += xt_connmark.h header-y += xt_conntrack.h +header-y += xt_cluster.h header-y += xt_dccp.h header-y += xt_dscp.h header-y += xt_esp.h @@ -28,8 +33,10 @@ header-y += xt_limit.h header-y += xt_mac.h header-y += xt_mark.h header-y += xt_multiport.h +header-y += xt_osf.h header-y += xt_owner.h header-y += xt_pkttype.h +header-y += xt_quota.h header-y += xt_rateest.h header-y += xt_realm.h header-y += xt_recent.h @@ -39,6 +46,8 @@ header-y += xt_statistic.h header-y += xt_string.h header-y += xt_tcpmss.h header-y += xt_tcpudp.h +header-y += xt_time.h +header-y += xt_u32.h unifdef-y += nf_conntrack_common.h unifdef-y += nf_conntrack_ftp.h diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 885cbe28226..a8248ee422b 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -75,75 +75,6 @@ enum ip_conntrack_status { IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT), }; -/* Connection tracking event bits */ -enum ip_conntrack_events -{ - /* New conntrack */ - IPCT_NEW_BIT = 0, - IPCT_NEW = (1 << IPCT_NEW_BIT), - - /* Expected connection */ - IPCT_RELATED_BIT = 1, - IPCT_RELATED = (1 << IPCT_RELATED_BIT), - - /* Destroyed conntrack */ - IPCT_DESTROY_BIT = 2, - IPCT_DESTROY = (1 << IPCT_DESTROY_BIT), - - /* Timer has been refreshed */ - IPCT_REFRESH_BIT = 3, - IPCT_REFRESH = (1 << IPCT_REFRESH_BIT), - - /* Status has changed */ - IPCT_STATUS_BIT = 4, - IPCT_STATUS = (1 << IPCT_STATUS_BIT), - - /* Update of protocol info */ - IPCT_PROTOINFO_BIT = 5, - IPCT_PROTOINFO = (1 << IPCT_PROTOINFO_BIT), - - /* Volatile protocol info */ - IPCT_PROTOINFO_VOLATILE_BIT = 6, - IPCT_PROTOINFO_VOLATILE = (1 << IPCT_PROTOINFO_VOLATILE_BIT), - - /* New helper for conntrack */ - IPCT_HELPER_BIT = 7, - IPCT_HELPER = (1 << IPCT_HELPER_BIT), - - /* Update of helper info */ - IPCT_HELPINFO_BIT = 8, - IPCT_HELPINFO = (1 << IPCT_HELPINFO_BIT), - - /* Volatile helper info */ - IPCT_HELPINFO_VOLATILE_BIT = 9, - IPCT_HELPINFO_VOLATILE = (1 << IPCT_HELPINFO_VOLATILE_BIT), - - /* NAT info */ - IPCT_NATINFO_BIT = 10, - IPCT_NATINFO = (1 << IPCT_NATINFO_BIT), - - /* Counter highest bit has been set, unused */ - IPCT_COUNTER_FILLING_BIT = 11, - IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT), - - /* Mark is set */ - IPCT_MARK_BIT = 12, - IPCT_MARK = (1 << IPCT_MARK_BIT), - - /* NAT sequence adjustment */ - IPCT_NATSEQADJ_BIT = 13, - IPCT_NATSEQADJ = (1 << IPCT_NATSEQADJ_BIT), - - /* Secmark is set */ - IPCT_SECMARK_BIT = 14, - IPCT_SECMARK = (1 << IPCT_SECMARK_BIT), -}; - -enum ip_conntrack_expect_events { - IPEXP_NEW_BIT = 0, - IPEXP_NEW = (1 << IPEXP_NEW_BIT), -}; - #ifdef __KERNEL__ struct ip_conntrack_stat { diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h index a049df4f223..4352feed237 100644 --- a/include/linux/netfilter/nf_conntrack_tcp.h +++ b/include/linux/netfilter/nf_conntrack_tcp.h @@ -2,6 +2,8 @@ #define _NF_CONNTRACK_TCP_H /* TCP tracking. */ +#include <linux/types.h> + /* This is exposed to userspace (ctnetlink) */ enum tcp_conntrack { TCP_CONNTRACK_NONE, @@ -13,7 +15,8 @@ enum tcp_conntrack { TCP_CONNTRACK_LAST_ACK, TCP_CONNTRACK_TIME_WAIT, TCP_CONNTRACK_CLOSE, - TCP_CONNTRACK_LISTEN, + TCP_CONNTRACK_LISTEN, /* obsolete */ +#define TCP_CONNTRACK_SYN_SENT2 TCP_CONNTRACK_LISTEN TCP_CONNTRACK_MAX, TCP_CONNTRACK_IGNORE }; @@ -33,9 +36,12 @@ enum tcp_conntrack { /* Has unacknowledged data */ #define IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10 +/* The field td_maxack has been set */ +#define IP_CT_TCP_FLAG_MAXACK_SET 0x20 + struct nf_ct_tcp_flags { - u_int8_t flags; - u_int8_t mask; + __u8 flags; + __u8 mask; }; #ifdef __KERNEL__ @@ -44,6 +50,7 @@ struct ip_ct_tcp_state { u_int32_t td_end; /* max of seq + len */ u_int32_t td_maxend; /* max of ack + max(win, 1) */ u_int32_t td_maxwin; /* max(win) */ + u_int32_t td_maxack; /* max of ack */ u_int8_t td_scale; /* window scale factor */ u_int8_t flags; /* per direction options */ }; diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 7d8e0455cca..9f00da287f2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -25,8 +25,8 @@ enum nfnetlink_groups { /* General form of address family dependent message. */ struct nfgenmsg { - u_int8_t nfgen_family; /* AF_xxx */ - u_int8_t version; /* nfnetlink version */ + __u8 nfgen_family; /* AF_xxx */ + __u8 version; /* nfnetlink version */ __be16 res_id; /* resource id */ }; @@ -46,7 +46,8 @@ struct nfgenmsg { #define NFNL_SUBSYS_CTNETLINK_EXP 2 #define NFNL_SUBSYS_QUEUE 3 #define NFNL_SUBSYS_ULOG 4 -#define NFNL_SUBSYS_COUNT 5 +#define NFNL_SUBSYS_OSF 5 +#define NFNL_SUBSYS_COUNT 6 #ifdef __KERNEL__ @@ -57,7 +58,8 @@ struct nfgenmsg { struct nfnl_callback { int (*call)(struct sock *nl, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nlattr *cda[]); + const struct nlmsghdr *nlh, + const struct nlattr * const cda[]); const struct nla_policy *policy; /* netlink attribute policy */ const u_int16_t attr_count; /* number of nlattr's */ }; @@ -75,7 +77,8 @@ extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); extern int nfnetlink_has_listeners(unsigned int group); extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, - int echo); + int echo, gfp_t flags); +extern void nfnetlink_set_err(u32 pid, u32 group, int error); extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags); extern void nfnl_lock(void); diff --git a/include/linux/netfilter/nfnetlink_compat.h b/include/linux/netfilter/nfnetlink_compat.h index e1451760c9c..eda55cabcee 100644 --- a/include/linux/netfilter/nfnetlink_compat.h +++ b/include/linux/netfilter/nfnetlink_compat.h @@ -1,5 +1,8 @@ #ifndef _NFNETLINK_COMPAT_H #define _NFNETLINK_COMPAT_H + +#include <linux/types.h> + #ifndef __KERNEL__ /* Old nfnetlink macros for userspace */ @@ -20,8 +23,8 @@ struct nfattr { - u_int16_t nfa_len; - u_int16_t nfa_type; /* we use 15 bits for the type, and the highest + __u16 nfa_len; + __u16 nfa_type; /* we use 15 bits for the type, and the highest * bit to indicate whether the payload is nested */ }; diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 29fe9ea1d34..ed4ef8d0b11 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -100,6 +100,8 @@ enum ctattr_protoinfo_tcp { enum ctattr_protoinfo_dccp { CTA_PROTOINFO_DCCP_UNSPEC, CTA_PROTOINFO_DCCP_STATE, + CTA_PROTOINFO_DCCP_ROLE, + CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, __CTA_PROTOINFO_DCCP_MAX, }; #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h index f661731f3cb..d3bab7a2c9b 100644 --- a/include/linux/netfilter/nfnetlink_log.h +++ b/include/linux/netfilter/nfnetlink_log.h @@ -17,14 +17,14 @@ enum nfulnl_msg_types { struct nfulnl_msg_packet_hdr { __be16 hw_protocol; /* hw protocol (network order) */ - u_int8_t hook; /* netfilter hook */ - u_int8_t _pad; + __u8 hook; /* netfilter hook */ + __u8 _pad; }; struct nfulnl_msg_packet_hw { __be16 hw_addrlen; - u_int16_t _pad; - u_int8_t hw_addr[8]; + __u16 _pad; + __u8 hw_addr[8]; }; struct nfulnl_msg_packet_timestamp { @@ -35,12 +35,12 @@ struct nfulnl_msg_packet_timestamp { enum nfulnl_attr_type { NFULA_UNSPEC, NFULA_PACKET_HDR, - NFULA_MARK, /* u_int32_t nfmark */ + NFULA_MARK, /* __u32 nfmark */ NFULA_TIMESTAMP, /* nfulnl_msg_packet_timestamp */ - NFULA_IFINDEX_INDEV, /* u_int32_t ifindex */ - NFULA_IFINDEX_OUTDEV, /* u_int32_t ifindex */ - NFULA_IFINDEX_PHYSINDEV, /* u_int32_t ifindex */ - NFULA_IFINDEX_PHYSOUTDEV, /* u_int32_t ifindex */ + NFULA_IFINDEX_INDEV, /* __u32 ifindex */ + NFULA_IFINDEX_OUTDEV, /* __u32 ifindex */ + NFULA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ + NFULA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ NFULA_HWADDR, /* nfulnl_msg_packet_hw */ NFULA_PAYLOAD, /* opaque data payload */ NFULA_PREFIX, /* string prefix */ @@ -65,23 +65,23 @@ enum nfulnl_msg_config_cmds { }; struct nfulnl_msg_config_cmd { - u_int8_t command; /* nfulnl_msg_config_cmds */ + __u8 command; /* nfulnl_msg_config_cmds */ } __attribute__ ((packed)); struct nfulnl_msg_config_mode { __be32 copy_range; - u_int8_t copy_mode; - u_int8_t _pad; + __u8 copy_mode; + __u8 _pad; } __attribute__ ((packed)); enum nfulnl_attr_config { NFULA_CFG_UNSPEC, NFULA_CFG_CMD, /* nfulnl_msg_config_cmd */ NFULA_CFG_MODE, /* nfulnl_msg_config_mode */ - NFULA_CFG_NLBUFSIZ, /* u_int32_t buffer size */ - NFULA_CFG_TIMEOUT, /* u_int32_t in 1/100 s */ - NFULA_CFG_QTHRESH, /* u_int32_t */ - NFULA_CFG_FLAGS, /* u_int16_t */ + NFULA_CFG_NLBUFSIZ, /* __u32 buffer size */ + NFULA_CFG_TIMEOUT, /* __u32 in 1/100 s */ + NFULA_CFG_QTHRESH, /* __u32 */ + NFULA_CFG_FLAGS, /* __u16 */ __NFULA_CFG_MAX }; #define NFULA_CFG_MAX (__NFULA_CFG_MAX -1) diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h index 83e789633e3..2455fe5f4e0 100644 --- a/include/linux/netfilter/nfnetlink_queue.h +++ b/include/linux/netfilter/nfnetlink_queue.h @@ -15,13 +15,13 @@ enum nfqnl_msg_types { struct nfqnl_msg_packet_hdr { __be32 packet_id; /* unique ID of packet in queue */ __be16 hw_protocol; /* hw protocol (network order) */ - u_int8_t hook; /* netfilter hook */ + __u8 hook; /* netfilter hook */ } __attribute__ ((packed)); struct nfqnl_msg_packet_hw { __be16 hw_addrlen; - u_int16_t _pad; - u_int8_t hw_addr[8]; + __u16 _pad; + __u8 hw_addr[8]; }; struct nfqnl_msg_packet_timestamp { @@ -33,12 +33,12 @@ enum nfqnl_attr_type { NFQA_UNSPEC, NFQA_PACKET_HDR, NFQA_VERDICT_HDR, /* nfqnl_msg_verdict_hrd */ - NFQA_MARK, /* u_int32_t nfmark */ + NFQA_MARK, /* __u32 nfmark */ NFQA_TIMESTAMP, /* nfqnl_msg_packet_timestamp */ - NFQA_IFINDEX_INDEV, /* u_int32_t ifindex */ - NFQA_IFINDEX_OUTDEV, /* u_int32_t ifindex */ - NFQA_IFINDEX_PHYSINDEV, /* u_int32_t ifindex */ - NFQA_IFINDEX_PHYSOUTDEV, /* u_int32_t ifindex */ + NFQA_IFINDEX_INDEV, /* __u32 ifindex */ + NFQA_IFINDEX_OUTDEV, /* __u32 ifindex */ + NFQA_IFINDEX_PHYSINDEV, /* __u32 ifindex */ + NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */ NFQA_HWADDR, /* nfqnl_msg_packet_hw */ NFQA_PAYLOAD, /* opaque data payload */ @@ -61,8 +61,8 @@ enum nfqnl_msg_config_cmds { }; struct nfqnl_msg_config_cmd { - u_int8_t command; /* nfqnl_msg_config_cmds */ - u_int8_t _pad; + __u8 command; /* nfqnl_msg_config_cmds */ + __u8 _pad; __be16 pf; /* AF_xxx for PF_[UN]BIND */ }; @@ -74,7 +74,7 @@ enum nfqnl_config_mode { struct nfqnl_msg_config_params { __be32 copy_range; - u_int8_t copy_mode; /* enum nfqnl_config_mode */ + __u8 copy_mode; /* enum nfqnl_config_mode */ } __attribute__ ((packed)); @@ -82,7 +82,7 @@ enum nfqnl_attr_config { NFQA_CFG_UNSPEC, NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */ NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */ - NFQA_CFG_QUEUE_MAXLEN, /* u_int32_t */ + NFQA_CFG_QUEUE_MAXLEN, /* __u32 */ __NFQA_CFG_MAX }; #define NFQA_CFG_MAX (__NFQA_CFG_MAX-1) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index c7ee8744d26..812cb153cab 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -1,6 +1,8 @@ #ifndef _X_TABLES_H #define _X_TABLES_H +#include <linux/types.h> + #define XT_FUNCTION_MAXNAMELEN 30 #define XT_TABLE_MAXNAMELEN 32 @@ -8,22 +10,22 @@ struct xt_entry_match { union { struct { - u_int16_t match_size; + __u16 match_size; /* Used by userspace */ char name[XT_FUNCTION_MAXNAMELEN-1]; - u_int8_t revision; + __u8 revision; } user; struct { - u_int16_t match_size; + __u16 match_size; /* Used inside the kernel */ struct xt_match *match; } kernel; /* Total length */ - u_int16_t match_size; + __u16 match_size; } u; unsigned char data[0]; @@ -33,22 +35,22 @@ struct xt_entry_target { union { struct { - u_int16_t target_size; + __u16 target_size; /* Used by userspace */ char name[XT_FUNCTION_MAXNAMELEN-1]; - u_int8_t revision; + __u8 revision; } user; struct { - u_int16_t target_size; + __u16 target_size; /* Used inside the kernel */ struct xt_target *target; } kernel; /* Total length */ - u_int16_t target_size; + __u16 target_size; } u; unsigned char data[0]; @@ -74,7 +76,7 @@ struct xt_get_revision { char name[XT_FUNCTION_MAXNAMELEN-1]; - u_int8_t revision; + __u8 revision; }; /* CONTINUE verdict for targets */ @@ -90,10 +92,10 @@ struct xt_get_revision */ struct _xt_align { - u_int8_t u8; - u_int16_t u16; - u_int32_t u32; - u_int64_t u64; + __u8 u8; + __u16 u16; + __u32 u32; + __u64 u64; }; #define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \ @@ -109,7 +111,7 @@ struct _xt_align struct xt_counters { - u_int64_t pcnt, bcnt; /* Packet and byte counters */ + __u64 pcnt, bcnt; /* Packet and byte counters */ }; /* The argument to IPT_SO_ADD_COUNTERS. */ @@ -182,9 +184,10 @@ struct xt_counters_info * @matchinfo: per-match data * @fragoff: packet is a fragment, this is the data offset * @thoff: position of transport header relative to skb->data - * @hotdrop: drop packet if we had inspection problems + * @hook: hook number given packet came from * @family: Actual NFPROTO_* through which the function is invoked * (helpful when match->family == NFPROTO_UNSPEC) + * @hotdrop: drop packet if we had inspection problems */ struct xt_match_param { const struct net_device *in, *out; @@ -192,8 +195,9 @@ struct xt_match_param { const void *matchinfo; int fragoff; unsigned int thoff; - bool *hotdrop; + unsigned int hooknum; u_int8_t family; + bool *hotdrop; }; /** @@ -234,9 +238,9 @@ struct xt_mtdtor_param { */ struct xt_target_param { const struct net_device *in, *out; - unsigned int hooknum; const struct xt_target *target; const void *targinfo; + unsigned int hooknum; u_int8_t family; }; @@ -349,23 +353,19 @@ struct xt_table { struct list_head list; - /* A unique name... */ - const char name[XT_TABLE_MAXNAMELEN]; - /* What hooks you will enter on */ unsigned int valid_hooks; - /* Lock for the curtain */ - rwlock_t lock; - /* Man behind the curtain... */ - //struct ip6t_table_info *private; - void *private; + struct xt_table_info *private; /* Set this to THIS_MODULE if you are a module, otherwise NULL */ struct module *me; u_int8_t af; /* address/protocol family */ + + /* A unique name... */ + const char name[XT_TABLE_MAXNAMELEN]; }; #include <linux/netfilter_ipv4.h> @@ -386,7 +386,7 @@ struct xt_table_info /* ipt_entry tables: one per CPU */ /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ - char *entries[1]; + void *entries[1]; }; #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ @@ -407,7 +407,7 @@ extern int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); extern struct xt_table *xt_register_table(struct net *net, - struct xt_table *table, + const struct xt_table *table, struct xt_table_info *bootstrap, struct xt_table_info *newinfo); extern void *xt_unregister_table(struct xt_table *table); @@ -434,6 +434,97 @@ extern void xt_proto_fini(struct net *net, u_int8_t af); extern struct xt_table_info *xt_alloc_table_info(unsigned int size); extern void xt_free_table_info(struct xt_table_info *info); +/* + * Per-CPU spinlock associated with per-cpu table entries, and + * with a counter for the "reading" side that allows a recursive + * reader to avoid taking the lock and deadlocking. + * + * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. + * It needs to ensure that the rules are not being changed while the packet + * is being processed. In some cases, the read lock will be acquired + * twice on the same CPU; this is okay because of the count. + * + * "writing" is used when reading counters. + * During replace any readers that are using the old tables have to complete + * before freeing the old table. This is handled by the write locking + * necessary for reading the counters. + */ +struct xt_info_lock { + spinlock_t lock; + unsigned char readers; +}; +DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); + +/* + * Note: we need to ensure that preemption is disabled before acquiring + * the per-cpu-variable, so we do it as a two step process rather than + * using "spin_lock_bh()". + * + * We _also_ need to disable bottom half processing before updating our + * nesting count, to make sure that the only kind of re-entrancy is this + * code being called by itself: since the count+lock is not an atomic + * operation, we can allow no races. + * + * _Only_ that special combination of being per-cpu and never getting + * re-entered asynchronously means that the count is safe. + */ +static inline void xt_info_rdlock_bh(void) +{ + struct xt_info_lock *lock; + + local_bh_disable(); + lock = &__get_cpu_var(xt_info_locks); + if (likely(!lock->readers++)) + spin_lock(&lock->lock); +} + +static inline void xt_info_rdunlock_bh(void) +{ + struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); + + if (likely(!--lock->readers)) + spin_unlock(&lock->lock); + local_bh_enable(); +} + +/* + * The "writer" side needs to get exclusive access to the lock, + * regardless of readers. This must be called with bottom half + * processing (and thus also preemption) disabled. + */ +static inline void xt_info_wrlock(unsigned int cpu) +{ + spin_lock(&per_cpu(xt_info_locks, cpu).lock); +} + +static inline void xt_info_wrunlock(unsigned int cpu) +{ + spin_unlock(&per_cpu(xt_info_locks, cpu).lock); +} + +/* + * This helper is performance critical and must be inlined + */ +static inline unsigned long ifname_compare_aligned(const char *_a, + const char *_b, + const char *_mask) +{ + const unsigned long *a = (const unsigned long *)_a; + const unsigned long *b = (const unsigned long *)_b; + const unsigned long *mask = (const unsigned long *)_mask; + unsigned long ret; + + ret = (a[0] ^ b[0]) & mask[0]; + if (IFNAMSIZ > sizeof(unsigned long)) + ret |= (a[1] ^ b[1]) & mask[1]; + if (IFNAMSIZ > 2 * sizeof(unsigned long)) + ret |= (a[2] ^ b[2]) & mask[2]; + if (IFNAMSIZ > 3 * sizeof(unsigned long)) + ret |= (a[3] ^ b[3]) & mask[3]; + BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); + return ret; +} + #ifdef CONFIG_COMPAT #include <net/compat.h> diff --git a/include/linux/netfilter/xt_CLASSIFY.h b/include/linux/netfilter/xt_CLASSIFY.h index 58111355255..a813bf14dd6 100644 --- a/include/linux/netfilter/xt_CLASSIFY.h +++ b/include/linux/netfilter/xt_CLASSIFY.h @@ -1,8 +1,10 @@ #ifndef _XT_CLASSIFY_H #define _XT_CLASSIFY_H +#include <linux/types.h> + struct xt_classify_target_info { - u_int32_t priority; + __u32 priority; }; #endif /*_XT_CLASSIFY_H */ diff --git a/include/linux/netfilter/xt_CONNMARK.h b/include/linux/netfilter/xt_CONNMARK.h index 4e58ba43c28..0a854586675 100644 --- a/include/linux/netfilter/xt_CONNMARK.h +++ b/include/linux/netfilter/xt_CONNMARK.h @@ -1,6 +1,8 @@ #ifndef _XT_CONNMARK_H_target #define _XT_CONNMARK_H_target +#include <linux/types.h> + /* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * @@ -16,15 +18,9 @@ enum { XT_CONNMARK_RESTORE }; -struct xt_connmark_target_info { - unsigned long mark; - unsigned long mask; - u_int8_t mode; -}; - struct xt_connmark_tginfo1 { - u_int32_t ctmark, ctmask, nfmask; - u_int8_t mode; + __u32 ctmark, ctmask, nfmask; + __u8 mode; }; #endif /*_XT_CONNMARK_H_target*/ diff --git a/include/linux/netfilter/xt_CONNSECMARK.h b/include/linux/netfilter/xt_CONNSECMARK.h index c6bd75469ba..b973ff80fa1 100644 --- a/include/linux/netfilter/xt_CONNSECMARK.h +++ b/include/linux/netfilter/xt_CONNSECMARK.h @@ -1,13 +1,15 @@ #ifndef _XT_CONNSECMARK_H_target #define _XT_CONNSECMARK_H_target +#include <linux/types.h> + enum { CONNSECMARK_SAVE = 1, CONNSECMARK_RESTORE, }; struct xt_connsecmark_target_info { - u_int8_t mode; + __u8 mode; }; #endif /*_XT_CONNSECMARK_H_target */ diff --git a/include/linux/netfilter/xt_DSCP.h b/include/linux/netfilter/xt_DSCP.h index 14da1968e2c..648e0b3bed2 100644 --- a/include/linux/netfilter/xt_DSCP.h +++ b/include/linux/netfilter/xt_DSCP.h @@ -11,15 +11,16 @@ #ifndef _XT_DSCP_TARGET_H #define _XT_DSCP_TARGET_H #include <linux/netfilter/xt_dscp.h> +#include <linux/types.h> /* target info */ struct xt_DSCP_info { - u_int8_t dscp; + __u8 dscp; }; struct xt_tos_target_info { - u_int8_t tos_value; - u_int8_t tos_mask; + __u8 tos_value; + __u8 tos_mask; }; #endif /* _XT_DSCP_TARGET_H */ diff --git a/include/linux/netfilter/xt_LED.h b/include/linux/netfilter/xt_LED.h new file mode 100644 index 00000000000..f5509e7524d --- /dev/null +++ b/include/linux/netfilter/xt_LED.h @@ -0,0 +1,15 @@ +#ifndef _XT_LED_H +#define _XT_LED_H + +#include <linux/types.h> + +struct xt_led_info { + char id[27]; /* Unique ID for this trigger in the LED class */ + __u8 always_blink; /* Blink even if the LED is already on */ + __u32 delay; /* Delay until LED is switched off after trigger */ + + /* Kernel data used in the module */ + void *internal_data __attribute__((aligned(8))); +}; + +#endif /* _XT_LED_H */ diff --git a/include/linux/netfilter/xt_MARK.h b/include/linux/netfilter/xt_MARK.h index 778b278fd9f..bc9561bdef7 100644 --- a/include/linux/netfilter/xt_MARK.h +++ b/include/linux/netfilter/xt_MARK.h @@ -1,25 +1,10 @@ #ifndef _XT_MARK_H_target #define _XT_MARK_H_target -/* Version 0 */ -struct xt_mark_target_info { - unsigned long mark; -}; - -/* Version 1 */ -enum { - XT_MARK_SET=0, - XT_MARK_AND, - XT_MARK_OR, -}; - -struct xt_mark_target_info_v1 { - unsigned long mark; - u_int8_t mode; -}; +#include <linux/types.h> struct xt_mark_tginfo2 { - u_int32_t mark, mask; + __u32 mark, mask; }; #endif /*_XT_MARK_H_target */ diff --git a/include/linux/netfilter/xt_NFLOG.h b/include/linux/netfilter/xt_NFLOG.h index cdcd0ed58f7..87b58311ce6 100644 --- a/include/linux/netfilter/xt_NFLOG.h +++ b/include/linux/netfilter/xt_NFLOG.h @@ -1,17 +1,19 @@ #ifndef _XT_NFLOG_TARGET #define _XT_NFLOG_TARGET +#include <linux/types.h> + #define XT_NFLOG_DEFAULT_GROUP 0x1 -#define XT_NFLOG_DEFAULT_THRESHOLD 1 +#define XT_NFLOG_DEFAULT_THRESHOLD 0 #define XT_NFLOG_MASK 0x0 struct xt_nflog_info { - u_int32_t len; - u_int16_t group; - u_int16_t threshold; - u_int16_t flags; - u_int16_t pad; + __u32 len; + __u16 group; + __u16 threshold; + __u16 flags; + __u16 pad; char prefix[64]; }; diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h index 9a9af79f74d..2584f4a777d 100644 --- a/include/linux/netfilter/xt_NFQUEUE.h +++ b/include/linux/netfilter/xt_NFQUEUE.h @@ -8,9 +8,16 @@ #ifndef _XT_NFQ_TARGET_H #define _XT_NFQ_TARGET_H +#include <linux/types.h> + /* target info */ struct xt_NFQ_info { - u_int16_t queuenum; + __u16 queuenum; +}; + +struct xt_NFQ_info_v1 { + __u16 queuenum; + __u16 queues_total; }; #endif /* _XT_NFQ_TARGET_H */ diff --git a/include/linux/netfilter/xt_RATEEST.h b/include/linux/netfilter/xt_RATEEST.h index f79e3133cbe..6605e20ad8c 100644 --- a/include/linux/netfilter/xt_RATEEST.h +++ b/include/linux/netfilter/xt_RATEEST.h @@ -1,10 +1,12 @@ #ifndef _XT_RATEEST_TARGET_H #define _XT_RATEEST_TARGET_H +#include <linux/types.h> + struct xt_rateest_target_info { char name[IFNAMSIZ]; - int8_t interval; - u_int8_t ewma_log; + __s8 interval; + __u8 ewma_log; /* Used internally by the kernel */ struct xt_rateest *est __attribute__((aligned(8))); diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h index c53fbffa997..6fcd3448b18 100644 --- a/include/linux/netfilter/xt_SECMARK.h +++ b/include/linux/netfilter/xt_SECMARK.h @@ -1,6 +1,8 @@ #ifndef _XT_SECMARK_H_target #define _XT_SECMARK_H_target +#include <linux/types.h> + /* * This is intended for use by various security subsystems (but not * at the same time). @@ -12,12 +14,12 @@ #define SECMARK_SELCTX_MAX 256 struct xt_secmark_target_selinux_info { - u_int32_t selsid; + __u32 selsid; char selctx[SECMARK_SELCTX_MAX]; }; struct xt_secmark_target_info { - u_int8_t mode; + __u8 mode; union { struct xt_secmark_target_selinux_info sel; } u; diff --git a/include/linux/netfilter/xt_TCPMSS.h b/include/linux/netfilter/xt_TCPMSS.h index 53a292cd47f..9a6960afc13 100644 --- a/include/linux/netfilter/xt_TCPMSS.h +++ b/include/linux/netfilter/xt_TCPMSS.h @@ -1,8 +1,10 @@ #ifndef _XT_TCPMSS_H #define _XT_TCPMSS_H +#include <linux/types.h> + struct xt_tcpmss_info { - u_int16_t mss; + __u16 mss; }; #define XT_TCPMSS_CLAMP_PMTU 0xffff diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h new file mode 100644 index 00000000000..886682656f0 --- /dev/null +++ b/include/linux/netfilter/xt_cluster.h @@ -0,0 +1,17 @@ +#ifndef _XT_CLUSTER_MATCH_H +#define _XT_CLUSTER_MATCH_H + +enum xt_cluster_flags { + XT_CLUSTER_F_INV = (1 << 0) +}; + +struct xt_cluster_match_info { + u_int32_t total_nodes; + u_int32_t node_mask; + u_int32_t hash_seed; + u_int32_t flags; +}; + +#define XT_CLUSTER_NODES_MAX 32 + +#endif /* _XT_CLUSTER_MATCH_H */ diff --git a/include/linux/netfilter/xt_connbytes.h b/include/linux/netfilter/xt_connbytes.h index c022c989754..52bd6153b99 100644 --- a/include/linux/netfilter/xt_connbytes.h +++ b/include/linux/netfilter/xt_connbytes.h @@ -1,6 +1,8 @@ #ifndef _XT_CONNBYTES_H #define _XT_CONNBYTES_H +#include <linux/types.h> + enum xt_connbytes_what { XT_CONNBYTES_PKTS, XT_CONNBYTES_BYTES, @@ -19,7 +21,7 @@ struct xt_connbytes_info aligned_u64 from; /* count to be matched */ aligned_u64 to; /* count to be matched */ } count; - u_int8_t what; /* ipt_connbytes_what */ - u_int8_t direction; /* ipt_connbytes_direction */ + __u8 what; /* ipt_connbytes_what */ + __u8 direction; /* ipt_connbytes_direction */ }; #endif diff --git a/include/linux/netfilter/xt_connmark.h b/include/linux/netfilter/xt_connmark.h index 359ef86918d..619e47cde01 100644 --- a/include/linux/netfilter/xt_connmark.h +++ b/include/linux/netfilter/xt_connmark.h @@ -1,6 +1,8 @@ #ifndef _XT_CONNMARK_H #define _XT_CONNMARK_H +#include <linux/types.h> + /* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * @@ -10,14 +12,9 @@ * (at your option) any later version. */ -struct xt_connmark_info { - unsigned long mark, mask; - u_int8_t invert; -}; - struct xt_connmark_mtinfo1 { - u_int32_t mark, mask; - u_int8_t invert; + __u32 mark, mask; + __u8 invert; }; #endif /*_XT_CONNMARK_H*/ diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h index f3fd83e46ba..54f47a2f615 100644 --- a/include/linux/netfilter/xt_conntrack.h +++ b/include/linux/netfilter/xt_conntrack.h @@ -5,6 +5,7 @@ #ifndef _XT_CONNTRACK_H #define _XT_CONNTRACK_H +#include <linux/types.h> #include <linux/netfilter/nf_conntrack_tuple_common.h> #define XT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1)) @@ -31,53 +32,30 @@ enum { XT_CONNTRACK_DIRECTION = 1 << 12, }; -/* This is exposed to userspace, so remains frozen in time. */ -struct ip_conntrack_old_tuple -{ - struct { - __be32 ip; - union { - __u16 all; - } u; - } src; - - struct { - __be32 ip; - union { - __u16 all; - } u; - - /* The protocol. */ - __u16 protonum; - } dst; -}; - -struct xt_conntrack_info -{ - unsigned int statemask, statusmask; - - struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX]; - struct in_addr sipmsk[IP_CT_DIR_MAX], dipmsk[IP_CT_DIR_MAX]; - - unsigned long expires_min, expires_max; - - /* Flags word */ - u_int8_t flags; - /* Inverse flags */ - u_int8_t invflags; +struct xt_conntrack_mtinfo1 { + union nf_inet_addr origsrc_addr, origsrc_mask; + union nf_inet_addr origdst_addr, origdst_mask; + union nf_inet_addr replsrc_addr, replsrc_mask; + union nf_inet_addr repldst_addr, repldst_mask; + __u32 expires_min, expires_max; + __u16 l4proto; + __be16 origsrc_port, origdst_port; + __be16 replsrc_port, repldst_port; + __u16 match_flags, invert_flags; + __u8 state_mask, status_mask; }; -struct xt_conntrack_mtinfo1 { +struct xt_conntrack_mtinfo2 { union nf_inet_addr origsrc_addr, origsrc_mask; union nf_inet_addr origdst_addr, origdst_mask; union nf_inet_addr replsrc_addr, replsrc_mask; union nf_inet_addr repldst_addr, repldst_mask; - u_int32_t expires_min, expires_max; - u_int16_t l4proto; + __u32 expires_min, expires_max; + __u16 l4proto; __be16 origsrc_port, origdst_port; __be16 replsrc_port, repldst_port; - u_int16_t match_flags, invert_flags; - u_int8_t state_mask, status_mask; + __u16 match_flags, invert_flags; + __u16 state_mask, status_mask; }; #endif /*_XT_CONNTRACK_H*/ diff --git a/include/linux/netfilter/xt_dccp.h b/include/linux/netfilter/xt_dccp.h index e0221b9d32c..a579e1b6f04 100644 --- a/include/linux/netfilter/xt_dccp.h +++ b/include/linux/netfilter/xt_dccp.h @@ -1,6 +1,8 @@ #ifndef _XT_DCCP_H_ #define _XT_DCCP_H_ +#include <linux/types.h> + #define XT_DCCP_SRC_PORTS 0x01 #define XT_DCCP_DEST_PORTS 0x02 #define XT_DCCP_TYPE 0x04 @@ -9,14 +11,14 @@ #define XT_DCCP_VALID_FLAGS 0x0f struct xt_dccp_info { - u_int16_t dpts[2]; /* Min, Max */ - u_int16_t spts[2]; /* Min, Max */ + __u16 dpts[2]; /* Min, Max */ + __u16 spts[2]; /* Min, Max */ - u_int16_t flags; - u_int16_t invflags; + __u16 flags; + __u16 invflags; - u_int16_t typemask; - u_int8_t option; + __u16 typemask; + __u8 option; }; #endif /* _XT_DCCP_H_ */ diff --git a/include/linux/netfilter/xt_dscp.h b/include/linux/netfilter/xt_dscp.h index f49bc1a648d..15f8932ad5c 100644 --- a/include/linux/netfilter/xt_dscp.h +++ b/include/linux/netfilter/xt_dscp.h @@ -10,20 +10,22 @@ #ifndef _XT_DSCP_H #define _XT_DSCP_H +#include <linux/types.h> + #define XT_DSCP_MASK 0xfc /* 11111100 */ #define XT_DSCP_SHIFT 2 #define XT_DSCP_MAX 0x3f /* 00111111 */ /* match info */ struct xt_dscp_info { - u_int8_t dscp; - u_int8_t invert; + __u8 dscp; + __u8 invert; }; struct xt_tos_match_info { - u_int8_t tos_mask; - u_int8_t tos_value; - u_int8_t invert; + __u8 tos_mask; + __u8 tos_value; + __u8 invert; }; #endif /* _XT_DSCP_H */ diff --git a/include/linux/netfilter/xt_esp.h b/include/linux/netfilter/xt_esp.h index 9380fb1c27d..ef6fa4747d0 100644 --- a/include/linux/netfilter/xt_esp.h +++ b/include/linux/netfilter/xt_esp.h @@ -1,10 +1,12 @@ #ifndef _XT_ESP_H #define _XT_ESP_H +#include <linux/types.h> + struct xt_esp { - u_int32_t spis[2]; /* Security Parameter Index */ - u_int8_t invflags; /* Inverse flags */ + __u32 spis[2]; /* Security Parameter Index */ + __u8 invflags; /* Inverse flags */ }; /* Values for "invflags" field in struct xt_esp. */ diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h index 51b18d83b47..b1925b5925e 100644 --- a/include/linux/netfilter/xt_hashlimit.h +++ b/include/linux/netfilter/xt_hashlimit.h @@ -1,6 +1,8 @@ #ifndef _XT_HASHLIMIT_H #define _XT_HASHLIMIT_H +#include <linux/types.h> + /* timings are in milliseconds. */ #define XT_HASHLIMIT_SCALE 10000 /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 @@ -18,15 +20,15 @@ enum { }; struct hashlimit_cfg { - u_int32_t mode; /* bitmask of XT_HASHLIMIT_HASH_* */ - u_int32_t avg; /* Average secs between packets * scale */ - u_int32_t burst; /* Period multiplier for upper limit. */ + __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ + __u32 avg; /* Average secs between packets * scale */ + __u32 burst; /* Period multiplier for upper limit. */ /* user specified */ - u_int32_t size; /* how many buckets */ - u_int32_t max; /* max number of entries */ - u_int32_t gc_interval; /* gc interval */ - u_int32_t expire; /* when do entries expire? */ + __u32 size; /* how many buckets */ + __u32 max; /* max number of entries */ + __u32 gc_interval; /* gc interval */ + __u32 expire; /* when do entries expire? */ }; struct xt_hashlimit_info { @@ -42,17 +44,17 @@ struct xt_hashlimit_info { }; struct hashlimit_cfg1 { - u_int32_t mode; /* bitmask of XT_HASHLIMIT_HASH_* */ - u_int32_t avg; /* Average secs between packets * scale */ - u_int32_t burst; /* Period multiplier for upper limit. */ + __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */ + __u32 avg; /* Average secs between packets * scale */ + __u32 burst; /* Period multiplier for upper limit. */ /* user specified */ - u_int32_t size; /* how many buckets */ - u_int32_t max; /* max number of entries */ - u_int32_t gc_interval; /* gc interval */ - u_int32_t expire; /* when do entries expire? */ + __u32 size; /* how many buckets */ + __u32 max; /* max number of entries */ + __u32 gc_interval; /* gc interval */ + __u32 expire; /* when do entries expire? */ - u_int8_t srcmask, dstmask; + __u8 srcmask, dstmask; }; struct xt_hashlimit_mtinfo1 { diff --git a/include/linux/netfilter/xt_iprange.h b/include/linux/netfilter/xt_iprange.h index a4299c7d368..c1f21a779a4 100644 --- a/include/linux/netfilter/xt_iprange.h +++ b/include/linux/netfilter/xt_iprange.h @@ -1,6 +1,8 @@ #ifndef _LINUX_NETFILTER_XT_IPRANGE_H #define _LINUX_NETFILTER_XT_IPRANGE_H 1 +#include <linux/types.h> + enum { IPRANGE_SRC = 1 << 0, /* match source IP address */ IPRANGE_DST = 1 << 1, /* match destination IP address */ @@ -11,7 +13,7 @@ enum { struct xt_iprange_mtinfo { union nf_inet_addr src_min, src_max; union nf_inet_addr dst_min, dst_max; - u_int8_t flags; + __u8 flags; }; #endif /* _LINUX_NETFILTER_XT_IPRANGE_H */ diff --git a/include/linux/netfilter/xt_length.h b/include/linux/netfilter/xt_length.h index 7c2b439f73f..b82ed7c4b1e 100644 --- a/include/linux/netfilter/xt_length.h +++ b/include/linux/netfilter/xt_length.h @@ -1,9 +1,11 @@ #ifndef _XT_LENGTH_H #define _XT_LENGTH_H +#include <linux/types.h> + struct xt_length_info { - u_int16_t min, max; - u_int8_t invert; + __u16 min, max; + __u8 invert; }; #endif /*_XT_LENGTH_H*/ diff --git a/include/linux/netfilter/xt_limit.h b/include/linux/netfilter/xt_limit.h index b3ce65375ec..bb47fc4d2ad 100644 --- a/include/linux/netfilter/xt_limit.h +++ b/include/linux/netfilter/xt_limit.h @@ -1,21 +1,24 @@ #ifndef _XT_RATE_H #define _XT_RATE_H +#include <linux/types.h> + /* timings are in milliseconds. */ #define XT_LIMIT_SCALE 10000 +struct xt_limit_priv; + /* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490 seconds, or one every 59 hours. */ struct xt_rateinfo { - u_int32_t avg; /* Average secs between packets * scale */ - u_int32_t burst; /* Period multiplier for upper limit. */ + __u32 avg; /* Average secs between packets * scale */ + __u32 burst; /* Period multiplier for upper limit. */ /* Used internally by the kernel */ - unsigned long prev; - u_int32_t credit; - u_int32_t credit_cap, cost; + unsigned long prev; /* moved to xt_limit_priv */ + __u32 credit; /* moved to xt_limit_priv */ + __u32 credit_cap, cost; - /* Ugly, ugly fucker. */ - struct xt_rateinfo *master; + struct xt_limit_priv *master; }; #endif /*_XT_RATE_H*/ diff --git a/include/linux/netfilter/xt_mark.h b/include/linux/netfilter/xt_mark.h index fae74bc3f34..6607c8f38ea 100644 --- a/include/linux/netfilter/xt_mark.h +++ b/include/linux/netfilter/xt_mark.h @@ -1,14 +1,11 @@ #ifndef _XT_MARK_H #define _XT_MARK_H -struct xt_mark_info { - unsigned long mark, mask; - u_int8_t invert; -}; +#include <linux/types.h> struct xt_mark_mtinfo1 { - u_int32_t mark, mask; - u_int8_t invert; + __u32 mark, mask; + __u8 invert; }; #endif /*_XT_MARK_H*/ diff --git a/include/linux/netfilter/xt_multiport.h b/include/linux/netfilter/xt_multiport.h index d49ee418371..185db499fcb 100644 --- a/include/linux/netfilter/xt_multiport.h +++ b/include/linux/netfilter/xt_multiport.h @@ -1,6 +1,8 @@ #ifndef _XT_MULTIPORT_H #define _XT_MULTIPORT_H +#include <linux/types.h> + enum xt_multiport_flags { XT_MULTIPORT_SOURCE, @@ -13,18 +15,18 @@ enum xt_multiport_flags /* Must fit inside union xt_matchinfo: 16 bytes */ struct xt_multiport { - u_int8_t flags; /* Type of comparison */ - u_int8_t count; /* Number of ports */ - u_int16_t ports[XT_MULTI_PORTS]; /* Ports */ + __u8 flags; /* Type of comparison */ + __u8 count; /* Number of ports */ + __u16 ports[XT_MULTI_PORTS]; /* Ports */ }; struct xt_multiport_v1 { - u_int8_t flags; /* Type of comparison */ - u_int8_t count; /* Number of ports */ - u_int16_t ports[XT_MULTI_PORTS]; /* Ports */ - u_int8_t pflags[XT_MULTI_PORTS]; /* Port flags */ - u_int8_t invert; /* Invert flag */ + __u8 flags; /* Type of comparison */ + __u8 count; /* Number of ports */ + __u16 ports[XT_MULTI_PORTS]; /* Ports */ + __u8 pflags[XT_MULTI_PORTS]; /* Port flags */ + __u8 invert; /* Invert flag */ }; #endif /*_XT_MULTIPORT_H*/ diff --git a/include/linux/netfilter/xt_osf.h b/include/linux/netfilter/xt_osf.h new file mode 100644 index 00000000000..18afa495f97 --- /dev/null +++ b/include/linux/netfilter/xt_osf.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2003+ Evgeniy Polyakov <johnpol@2ka.mxt.ru> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _XT_OSF_H +#define _XT_OSF_H + +#include <linux/types.h> + +#define MAXGENRELEN 32 + +#define XT_OSF_GENRE (1<<0) +#define XT_OSF_TTL (1<<1) +#define XT_OSF_LOG (1<<2) +#define XT_OSF_INVERT (1<<3) + +#define XT_OSF_LOGLEVEL_ALL 0 /* log all matched fingerprints */ +#define XT_OSF_LOGLEVEL_FIRST 1 /* log only the first matced fingerprint */ +#define XT_OSF_LOGLEVEL_ALL_KNOWN 2 /* do not log unknown packets */ + +#define XT_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */ +#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */ +#define XT_OSF_TTL_NOCHECK 2 /* Do not compare ip and fingerprint TTL at all */ + +struct xt_osf_info { + char genre[MAXGENRELEN]; + __u32 len; + __u32 flags; + __u32 loglevel; + __u32 ttl; +}; + +/* + * Wildcard MSS (kind of). + * It is used to implement a state machine for the different wildcard values + * of the MSS and window sizes. + */ +struct xt_osf_wc { + __u32 wc; + __u32 val; +}; + +/* + * This struct represents IANA options + * http://www.iana.org/assignments/tcp-parameters + */ +struct xt_osf_opt { + __u16 kind, length; + struct xt_osf_wc wc; +}; + +struct xt_osf_user_finger { + struct xt_osf_wc wss; + + __u8 ttl, df; + __u16 ss, mss; + __u16 opt_num; + + char genre[MAXGENRELEN]; + char version[MAXGENRELEN]; + char subtype[MAXGENRELEN]; + + /* MAX_IPOPTLEN is maximum if all options are NOPs or EOLs */ + struct xt_osf_opt opt[MAX_IPOPTLEN]; +}; + +struct xt_osf_nlmsg { + struct xt_osf_user_finger f; + struct iphdr ip; + struct tcphdr tcp; +}; + +/* Defines for IANA option kinds */ + +enum iana_options { + OSFOPT_EOL = 0, /* End of options */ + OSFOPT_NOP, /* NOP */ + OSFOPT_MSS, /* Maximum segment size */ + OSFOPT_WSO, /* Window scale option */ + OSFOPT_SACKP, /* SACK permitted */ + OSFOPT_SACK, /* SACK */ + OSFOPT_ECHO, + OSFOPT_ECHOREPLY, + OSFOPT_TS, /* Timestamp option */ + OSFOPT_POCP, /* Partial Order Connection Permitted */ + OSFOPT_POSP, /* Partial Order Service Profile */ + + /* Others are not used in the current OSF */ + OSFOPT_EMPTY = 255, +}; + +/* + * Initial window size option state machine: multiple of mss, mtu or + * plain numeric value. Can also be made as plain numeric value which + * is not a multiple of specified value. + */ +enum xt_osf_window_size_options { + OSF_WSS_PLAIN = 0, + OSF_WSS_MSS, + OSF_WSS_MTU, + OSF_WSS_MODULO, + OSF_WSS_MAX, +}; + +/* + * Add/remove fingerprint from the kernel. + */ +enum xt_osf_msg_types { + OSF_MSG_ADD, + OSF_MSG_REMOVE, + OSF_MSG_MAX, +}; + +enum xt_osf_attr_type { + OSF_ATTR_UNSPEC, + OSF_ATTR_FINGER, + OSF_ATTR_MAX, +}; + +#endif /* _XT_OSF_H */ diff --git a/include/linux/netfilter/xt_owner.h b/include/linux/netfilter/xt_owner.h index c84e52cfe41..2081761714b 100644 --- a/include/linux/netfilter/xt_owner.h +++ b/include/linux/netfilter/xt_owner.h @@ -1,6 +1,8 @@ #ifndef _XT_OWNER_MATCH_H #define _XT_OWNER_MATCH_H +#include <linux/types.h> + enum { XT_OWNER_UID = 1 << 0, XT_OWNER_GID = 1 << 1, @@ -8,9 +10,9 @@ enum { }; struct xt_owner_match_info { - u_int32_t uid_min, uid_max; - u_int32_t gid_min, gid_max; - u_int8_t match, invert; + __u32 uid_min, uid_max; + __u32 gid_min, gid_max; + __u8 match, invert; }; #endif /* _XT_OWNER_MATCH_H */ diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h index 25a7a1815b5..8555e399886 100644 --- a/include/linux/netfilter/xt_physdev.h +++ b/include/linux/netfilter/xt_physdev.h @@ -1,6 +1,8 @@ #ifndef _XT_PHYSDEV_H #define _XT_PHYSDEV_H +#include <linux/types.h> + #ifdef __KERNEL__ #include <linux/if.h> #endif @@ -17,8 +19,8 @@ struct xt_physdev_info { char in_mask[IFNAMSIZ]; char physoutdev[IFNAMSIZ]; char out_mask[IFNAMSIZ]; - u_int8_t invert; - u_int8_t bitmask; + __u8 invert; + __u8 bitmask; }; #endif /*_XT_PHYSDEV_H*/ diff --git a/include/linux/netfilter/xt_policy.h b/include/linux/netfilter/xt_policy.h index 053d8cc6546..7bb64e7c853 100644 --- a/include/linux/netfilter/xt_policy.h +++ b/include/linux/netfilter/xt_policy.h @@ -1,6 +1,8 @@ #ifndef _XT_POLICY_H #define _XT_POLICY_H +#include <linux/types.h> + #define XT_POLICY_MAX_ELEM 4 enum xt_policy_flags @@ -19,7 +21,7 @@ enum xt_policy_modes struct xt_policy_spec { - u_int8_t saddr:1, + __u8 saddr:1, daddr:1, proto:1, mode:1, @@ -55,9 +57,9 @@ struct xt_policy_elem #endif }; __be32 spi; - u_int32_t reqid; - u_int8_t proto; - u_int8_t mode; + __u32 reqid; + __u8 proto; + __u8 mode; struct xt_policy_spec match; struct xt_policy_spec invert; @@ -66,8 +68,8 @@ struct xt_policy_elem struct xt_policy_info { struct xt_policy_elem pol[XT_POLICY_MAX_ELEM]; - u_int16_t flags; - u_int16_t len; + __u16 flags; + __u16 len; }; #endif /* _XT_POLICY_H */ diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h index 4c8368d781e..8dc89dfc136 100644 --- a/include/linux/netfilter/xt_quota.h +++ b/include/linux/netfilter/xt_quota.h @@ -6,13 +6,15 @@ enum xt_quota_flags { }; #define XT_QUOTA_MASK 0x1 +struct xt_quota_priv; + struct xt_quota_info { u_int32_t flags; u_int32_t pad; /* Used internally by the kernel */ aligned_u64 quota; - struct xt_quota_info *master; + struct xt_quota_priv *master; }; #endif /* _XT_QUOTA_H */ diff --git a/include/linux/netfilter/xt_rateest.h b/include/linux/netfilter/xt_rateest.h index 2010cb74250..d40a6196842 100644 --- a/include/linux/netfilter/xt_rateest.h +++ b/include/linux/netfilter/xt_rateest.h @@ -1,6 +1,8 @@ #ifndef _XT_RATEEST_MATCH_H #define _XT_RATEEST_MATCH_H +#include <linux/types.h> + enum xt_rateest_match_flags { XT_RATEEST_MATCH_INVERT = 1<<0, XT_RATEEST_MATCH_ABS = 1<<1, @@ -20,12 +22,12 @@ enum xt_rateest_match_mode { struct xt_rateest_match_info { char name1[IFNAMSIZ]; char name2[IFNAMSIZ]; - u_int16_t flags; - u_int16_t mode; - u_int32_t bps1; - u_int32_t pps1; - u_int32_t bps2; - u_int32_t pps2; + __u16 flags; + __u16 mode; + __u32 bps1; + __u32 pps1; + __u32 bps2; + __u32 pps2; /* Used internally by the kernel */ struct xt_rateest *est1 __attribute__((aligned(8))); diff --git a/include/linux/netfilter/xt_realm.h b/include/linux/netfilter/xt_realm.h index 220e8724571..d4a82ee56a0 100644 --- a/include/linux/netfilter/xt_realm.h +++ b/include/linux/netfilter/xt_realm.h @@ -1,10 +1,12 @@ #ifndef _XT_REALM_H #define _XT_REALM_H +#include <linux/types.h> + struct xt_realm_info { - u_int32_t id; - u_int32_t mask; - u_int8_t invert; + __u32 id; + __u32 mask; + __u8 invert; }; #endif /* _XT_REALM_H */ diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h index 5cfeb81c679..d2c27660992 100644 --- a/include/linux/netfilter/xt_recent.h +++ b/include/linux/netfilter/xt_recent.h @@ -1,6 +1,8 @@ #ifndef _LINUX_NETFILTER_XT_RECENT_H #define _LINUX_NETFILTER_XT_RECENT_H 1 +#include <linux/types.h> + enum { XT_RECENT_CHECK = 1 << 0, XT_RECENT_SET = 1 << 1, @@ -15,12 +17,12 @@ enum { }; struct xt_recent_mtinfo { - u_int32_t seconds; - u_int32_t hit_count; - u_int8_t check_set; - u_int8_t invert; + __u32 seconds; + __u32 hit_count; + __u8 check_set; + __u8 invert; char name[XT_RECENT_NAME_LEN]; - u_int8_t side; + __u8 side; }; #endif /* _LINUX_NETFILTER_XT_RECENT_H */ diff --git a/include/linux/netfilter/xt_sctp.h b/include/linux/netfilter/xt_sctp.h index 32000ba6ece..29287be696a 100644 --- a/include/linux/netfilter/xt_sctp.h +++ b/include/linux/netfilter/xt_sctp.h @@ -1,6 +1,8 @@ #ifndef _XT_SCTP_H_ #define _XT_SCTP_H_ +#include <linux/types.h> + #define XT_SCTP_SRC_PORTS 0x01 #define XT_SCTP_DEST_PORTS 0x02 #define XT_SCTP_CHUNK_TYPES 0x04 @@ -8,49 +10,49 @@ #define XT_SCTP_VALID_FLAGS 0x07 struct xt_sctp_flag_info { - u_int8_t chunktype; - u_int8_t flag; - u_int8_t flag_mask; + __u8 chunktype; + __u8 flag; + __u8 flag_mask; }; #define XT_NUM_SCTP_FLAGS 4 struct xt_sctp_info { - u_int16_t dpts[2]; /* Min, Max */ - u_int16_t spts[2]; /* Min, Max */ + __u16 dpts[2]; /* Min, Max */ + __u16 spts[2]; /* Min, Max */ - u_int32_t chunkmap[256 / sizeof (u_int32_t)]; /* Bit mask of chunks to be matched according to RFC 2960 */ + __u32 chunkmap[256 / sizeof (__u32)]; /* Bit mask of chunks to be matched according to RFC 2960 */ #define SCTP_CHUNK_MATCH_ANY 0x01 /* Match if any of the chunk types are present */ #define SCTP_CHUNK_MATCH_ALL 0x02 /* Match if all of the chunk types are present */ #define SCTP_CHUNK_MATCH_ONLY 0x04 /* Match if these are the only chunk types present */ - u_int32_t chunk_match_type; + __u32 chunk_match_type; struct xt_sctp_flag_info flag_info[XT_NUM_SCTP_FLAGS]; int flag_count; - u_int32_t flags; - u_int32_t invflags; + __u32 flags; + __u32 invflags; }; #define bytes(type) (sizeof(type) * 8) #define SCTP_CHUNKMAP_SET(chunkmap, type) \ do { \ - (chunkmap)[type / bytes(u_int32_t)] |= \ - 1 << (type % bytes(u_int32_t)); \ + (chunkmap)[type / bytes(__u32)] |= \ + 1 << (type % bytes(__u32)); \ } while (0) #define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \ do { \ - (chunkmap)[type / bytes(u_int32_t)] &= \ - ~(1 << (type % bytes(u_int32_t))); \ + (chunkmap)[type / bytes(__u32)] &= \ + ~(1 << (type % bytes(__u32))); \ } while (0) #define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \ ({ \ - ((chunkmap)[type / bytes (u_int32_t)] & \ - (1 << (type % bytes (u_int32_t)))) ? 1: 0; \ + ((chunkmap)[type / bytes (__u32)] & \ + (1 << (type % bytes (__u32)))) ? 1: 0; \ }) #define SCTP_CHUNKMAP_RESET(chunkmap) \ @@ -65,7 +67,7 @@ struct xt_sctp_info { #define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \ __sctp_chunkmap_is_clear((chunkmap), ARRAY_SIZE(chunkmap)) static inline bool -__sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n) +__sctp_chunkmap_is_clear(const __u32 *chunkmap, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) @@ -77,7 +79,7 @@ __sctp_chunkmap_is_clear(const u_int32_t *chunkmap, unsigned int n) #define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \ __sctp_chunkmap_is_all_set((chunkmap), ARRAY_SIZE(chunkmap)) static inline bool -__sctp_chunkmap_is_all_set(const u_int32_t *chunkmap, unsigned int n) +__sctp_chunkmap_is_all_set(const __u32 *chunkmap, unsigned int n) { unsigned int i; for (i = 0; i < n; ++i) diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h new file mode 100644 index 00000000000..6f475b8ff34 --- /dev/null +++ b/include/linux/netfilter/xt_socket.h @@ -0,0 +1,12 @@ +#ifndef _XT_SOCKET_H +#define _XT_SOCKET_H + +enum { + XT_SOCKET_TRANSPARENT = 1 << 0, +}; + +struct xt_socket_mtinfo1 { + __u8 flags; +}; + +#endif /* _XT_SOCKET_H */ diff --git a/include/linux/netfilter/xt_statistic.h b/include/linux/netfilter/xt_statistic.h index 3d38bc97504..4e983ef0c96 100644 --- a/include/linux/netfilter/xt_statistic.h +++ b/include/linux/netfilter/xt_statistic.h @@ -1,6 +1,8 @@ #ifndef _XT_STATISTIC_H #define _XT_STATISTIC_H +#include <linux/types.h> + enum xt_statistic_mode { XT_STATISTIC_MODE_RANDOM, XT_STATISTIC_MODE_NTH, @@ -13,21 +15,22 @@ enum xt_statistic_flags { }; #define XT_STATISTIC_MASK 0x1 +struct xt_statistic_priv; + struct xt_statistic_info { - u_int16_t mode; - u_int16_t flags; + __u16 mode; + __u16 flags; union { struct { - u_int32_t probability; + __u32 probability; } random; struct { - u_int32_t every; - u_int32_t packet; - /* Used internally by the kernel */ - u_int32_t count; + __u32 every; + __u32 packet; + __u32 count; /* unused */ } nth; } u; - struct xt_statistic_info *master __attribute__((aligned(8))); + struct xt_statistic_priv *master __attribute__((aligned(8))); }; #endif /* _XT_STATISTIC_H */ diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h index 8a6ba7bbef9..ecbb95fc89e 100644 --- a/include/linux/netfilter/xt_string.h +++ b/include/linux/netfilter/xt_string.h @@ -1,6 +1,8 @@ #ifndef _XT_STRING_H #define _XT_STRING_H +#include <linux/types.h> + #define XT_STRING_MAX_PATTERN_SIZE 128 #define XT_STRING_MAX_ALGO_NAME_SIZE 16 @@ -11,18 +13,18 @@ enum { struct xt_string_info { - u_int16_t from_offset; - u_int16_t to_offset; + __u16 from_offset; + __u16 to_offset; char algo[XT_STRING_MAX_ALGO_NAME_SIZE]; char pattern[XT_STRING_MAX_PATTERN_SIZE]; - u_int8_t patlen; + __u8 patlen; union { struct { - u_int8_t invert; + __u8 invert; } v0; struct { - u_int8_t flags; + __u8 flags; } v1; } u; diff --git a/include/linux/netfilter/xt_tcpmss.h b/include/linux/netfilter/xt_tcpmss.h index e03274c4c79..fbac56b9e66 100644 --- a/include/linux/netfilter/xt_tcpmss.h +++ b/include/linux/netfilter/xt_tcpmss.h @@ -1,9 +1,11 @@ #ifndef _XT_TCPMSS_MATCH_H #define _XT_TCPMSS_MATCH_H +#include <linux/types.h> + struct xt_tcpmss_match_info { - u_int16_t mss_min, mss_max; - u_int8_t invert; + __u16 mss_min, mss_max; + __u8 invert; }; #endif /*_XT_TCPMSS_MATCH_H*/ diff --git a/include/linux/netfilter/xt_tcpudp.h b/include/linux/netfilter/xt_tcpudp.h index 78bc65f11ad..a490a0bc1d2 100644 --- a/include/linux/netfilter/xt_tcpudp.h +++ b/include/linux/netfilter/xt_tcpudp.h @@ -1,15 +1,17 @@ #ifndef _XT_TCPUDP_H #define _XT_TCPUDP_H +#include <linux/types.h> + /* TCP matching stuff */ struct xt_tcp { - u_int16_t spts[2]; /* Source port range. */ - u_int16_t dpts[2]; /* Destination port range. */ - u_int8_t option; /* TCP Option iff non-zero*/ - u_int8_t flg_mask; /* TCP flags mask byte */ - u_int8_t flg_cmp; /* TCP flags compare byte */ - u_int8_t invflags; /* Inverse flags */ + __u16 spts[2]; /* Source port range. */ + __u16 dpts[2]; /* Destination port range. */ + __u8 option; /* TCP Option iff non-zero*/ + __u8 flg_mask; /* TCP flags mask byte */ + __u8 flg_cmp; /* TCP flags compare byte */ + __u8 invflags; /* Inverse flags */ }; /* Values for "inv" field in struct ipt_tcp. */ @@ -22,9 +24,9 @@ struct xt_tcp /* UDP matching stuff */ struct xt_udp { - u_int16_t spts[2]; /* Source port range. */ - u_int16_t dpts[2]; /* Destination port range. */ - u_int8_t invflags; /* Inverse flags */ + __u16 spts[2]; /* Source port range. */ + __u16 dpts[2]; /* Destination port range. */ + __u8 invflags; /* Inverse flags */ }; /* Values for "invflags" field in struct ipt_udp. */ diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index 590ac3d6d5d..6fe3e6aa10d 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h @@ -265,7 +265,7 @@ struct arpt_error } extern struct xt_table *arpt_register_table(struct net *net, - struct xt_table *table, + const struct xt_table *table, const struct arpt_replace *repl); extern void arpt_unregister_table(struct xt_table *table); extern unsigned int arpt_do_table(struct sk_buff *skb, diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 499aa937590..f8105e54716 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -59,9 +59,9 @@ static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb) static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) { switch (skb->protocol) { - case __constant_htons(ETH_P_8021Q): + case __cpu_to_be16(ETH_P_8021Q): return VLAN_HLEN; - case __constant_htons(ETH_P_PPP_SES): + case __cpu_to_be16(ETH_P_PPP_SES): return PPPOE_SES_HLEN; default: return 0; diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index e40ddb94b1a..ea281e6a204 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -301,7 +301,7 @@ struct ebt_table #define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \ ~(__alignof__(struct ebt_replace)-1)) extern struct ebt_table *ebt_register_table(struct net *net, - struct ebt_table *table); + const struct ebt_table *table); extern void ebt_unregister_table(struct ebt_table *table); extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild index 3a7105bb8f3..431b4076192 100644 --- a/include/linux/netfilter_ipv4/Kbuild +++ b/include/linux/netfilter_ipv4/Kbuild @@ -1,46 +1,14 @@ -header-y += ipt_CLASSIFY.h header-y += ipt_CLUSTERIP.h -header-y += ipt_CONNMARK.h -header-y += ipt_DSCP.h header-y += ipt_ECN.h header-y += ipt_LOG.h -header-y += ipt_MARK.h -header-y += ipt_NFQUEUE.h header-y += ipt_REJECT.h header-y += ipt_SAME.h -header-y += ipt_TCPMSS.h -header-y += ipt_TOS.h header-y += ipt_TTL.h header-y += ipt_ULOG.h header-y += ipt_addrtype.h header-y += ipt_ah.h -header-y += ipt_comment.h -header-y += ipt_connbytes.h -header-y += ipt_connmark.h -header-y += ipt_conntrack.h -header-y += ipt_dccp.h -header-y += ipt_dscp.h header-y += ipt_ecn.h -header-y += ipt_esp.h -header-y += ipt_hashlimit.h -header-y += ipt_helper.h -header-y += ipt_iprange.h -header-y += ipt_length.h -header-y += ipt_limit.h -header-y += ipt_mac.h -header-y += ipt_mark.h -header-y += ipt_multiport.h -header-y += ipt_owner.h -header-y += ipt_physdev.h -header-y += ipt_pkttype.h -header-y += ipt_policy.h header-y += ipt_realm.h -header-y += ipt_recent.h -header-y += ipt_sctp.h -header-y += ipt_state.h -header-y += ipt_string.h -header-y += ipt_tcpmss.h -header-y += ipt_tos.h header-y += ipt_ttl.h unifdef-y += ip_queue.h diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 092bd50581a..61fafc868a7 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -245,7 +245,7 @@ ipt_get_target(struct ipt_entry *e) extern void ipt_init(void) __init; extern struct xt_table *ipt_register_table(struct net *net, - struct xt_table *table, + const struct xt_table *table, const struct ipt_replace *repl); extern void ipt_unregister_table(struct xt_table *table); diff --git a/include/linux/netfilter_ipv4/ipt_CLASSIFY.h b/include/linux/netfilter_ipv4/ipt_CLASSIFY.h deleted file mode 100644 index a46d511b5c3..00000000000 --- a/include/linux/netfilter_ipv4/ipt_CLASSIFY.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IPT_CLASSIFY_H -#define _IPT_CLASSIFY_H - -#include <linux/netfilter/xt_CLASSIFY.h> -#define ipt_classify_target_info xt_classify_target_info - -#endif /*_IPT_CLASSIFY_H */ diff --git a/include/linux/netfilter_ipv4/ipt_CONNMARK.h b/include/linux/netfilter_ipv4/ipt_CONNMARK.h deleted file mode 100644 index 9ecfee0a9e3..00000000000 --- a/include/linux/netfilter_ipv4/ipt_CONNMARK.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _IPT_CONNMARK_H_target -#define _IPT_CONNMARK_H_target - -/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> - * by Henrik Nordstrom <hno@marasystems.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#include <linux/netfilter/xt_CONNMARK.h> -#define IPT_CONNMARK_SET XT_CONNMARK_SET -#define IPT_CONNMARK_SAVE XT_CONNMARK_SAVE -#define IPT_CONNMARK_RESTORE XT_CONNMARK_RESTORE - -#define ipt_connmark_target_info xt_connmark_target_info - -#endif /*_IPT_CONNMARK_H_target*/ diff --git a/include/linux/netfilter_ipv4/ipt_DSCP.h b/include/linux/netfilter_ipv4/ipt_DSCP.h deleted file mode 100644 index 3491e524d5e..00000000000 --- a/include/linux/netfilter_ipv4/ipt_DSCP.h +++ /dev/null @@ -1,18 +0,0 @@ -/* iptables module for setting the IPv4 DSCP field - * - * (C) 2002 Harald Welte <laforge@gnumonks.org> - * based on ipt_FTOS.c (C) 2000 by Matthew G. Marsh <mgm@paktronix.com> - * This software is distributed under GNU GPL v2, 1991 - * - * See RFC2474 for a description of the DSCP field within the IP Header. - * - * ipt_DSCP.h,v 1.7 2002/03/14 12:03:13 laforge Exp -*/ -#ifndef _IPT_DSCP_TARGET_H -#define _IPT_DSCP_TARGET_H -#include <linux/netfilter_ipv4/ipt_dscp.h> -#include <linux/netfilter/xt_DSCP.h> - -#define ipt_DSCP_info xt_DSCP_info - -#endif /* _IPT_DSCP_TARGET_H */ diff --git a/include/linux/netfilter_ipv4/ipt_ECN.h b/include/linux/netfilter_ipv4/ipt_ECN.h index 94e0d986646..7ca45918ab8 100644 --- a/include/linux/netfilter_ipv4/ipt_ECN.h +++ b/include/linux/netfilter_ipv4/ipt_ECN.h @@ -8,9 +8,9 @@ */ #ifndef _IPT_ECN_TARGET_H #define _IPT_ECN_TARGET_H -#include <linux/netfilter_ipv4/ipt_DSCP.h> +#include <linux/netfilter/xt_DSCP.h> -#define IPT_ECN_IP_MASK (~IPT_DSCP_MASK) +#define IPT_ECN_IP_MASK (~XT_DSCP_MASK) #define IPT_ECN_OP_SET_IP 0x01 /* set ECN bits of IPv4 header */ #define IPT_ECN_OP_SET_ECE 0x10 /* set ECE bit of TCP header */ diff --git a/include/linux/netfilter_ipv4/ipt_MARK.h b/include/linux/netfilter_ipv4/ipt_MARK.h deleted file mode 100644 index 697a486a96d..00000000000 --- a/include/linux/netfilter_ipv4/ipt_MARK.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _IPT_MARK_H_target -#define _IPT_MARK_H_target - -/* Backwards compatibility for old userspace */ - -#include <linux/netfilter/xt_MARK.h> - -/* Version 0 */ -#define ipt_mark_target_info xt_mark_target_info - -/* Version 1 */ -#define IPT_MARK_SET XT_MARK_SET -#define IPT_MARK_AND XT_MARK_AND -#define IPT_MARK_OR XT_MARK_OR - -#define ipt_mark_target_info_v1 xt_mark_target_info_v1 - -#endif /*_IPT_MARK_H_target*/ diff --git a/include/linux/netfilter_ipv4/ipt_NFQUEUE.h b/include/linux/netfilter_ipv4/ipt_NFQUEUE.h deleted file mode 100644 index 97a2a7557cb..00000000000 --- a/include/linux/netfilter_ipv4/ipt_NFQUEUE.h +++ /dev/null @@ -1,16 +0,0 @@ -/* iptables module for using NFQUEUE mechanism - * - * (C) 2005 Harald Welte <laforge@netfilter.org> - * - * This software is distributed under GNU GPL v2, 1991 - * -*/ -#ifndef _IPT_NFQ_TARGET_H -#define _IPT_NFQ_TARGET_H - -/* Backwards compatibility for old userspace */ -#include <linux/netfilter/xt_NFQUEUE.h> - -#define ipt_NFQ_info xt_NFQ_info - -#endif /* _IPT_DSCP_TARGET_H */ diff --git a/include/linux/netfilter_ipv4/ipt_TCPMSS.h b/include/linux/netfilter_ipv4/ipt_TCPMSS.h deleted file mode 100644 index 7a850f94582..00000000000 --- a/include/linux/netfilter_ipv4/ipt_TCPMSS.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _IPT_TCPMSS_H -#define _IPT_TCPMSS_H - -#include <linux/netfilter/xt_TCPMSS.h> - -#define ipt_tcpmss_info xt_tcpmss_info -#define IPT_TCPMSS_CLAMP_PMTU XT_TCPMSS_CLAMP_PMTU - -#endif /*_IPT_TCPMSS_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_TOS.h b/include/linux/netfilter_ipv4/ipt_TOS.h deleted file mode 100644 index 6bf9e1fdfd8..00000000000 --- a/include/linux/netfilter_ipv4/ipt_TOS.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _IPT_TOS_H_target -#define _IPT_TOS_H_target - -#ifndef IPTOS_NORMALSVC -#define IPTOS_NORMALSVC 0 -#endif - -struct ipt_tos_target_info { - u_int8_t tos; -}; - -#endif /*_IPT_TOS_H_target*/ diff --git a/include/linux/netfilter_ipv4/ipt_comment.h b/include/linux/netfilter_ipv4/ipt_comment.h deleted file mode 100644 index ae2afc2f748..00000000000 --- a/include/linux/netfilter_ipv4/ipt_comment.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _IPT_COMMENT_H -#define _IPT_COMMENT_H - -#include <linux/netfilter/xt_comment.h> - -#define IPT_MAX_COMMENT_LEN XT_MAX_COMMENT_LEN - -#define ipt_comment_info xt_comment_info - -#endif /* _IPT_COMMENT_H */ diff --git a/include/linux/netfilter_ipv4/ipt_connbytes.h b/include/linux/netfilter_ipv4/ipt_connbytes.h deleted file mode 100644 index f63e6ee9111..00000000000 --- a/include/linux/netfilter_ipv4/ipt_connbytes.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _IPT_CONNBYTES_H -#define _IPT_CONNBYTES_H - -#include <linux/netfilter/xt_connbytes.h> -#define ipt_connbytes_what xt_connbytes_what - -#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PKTS -#define IPT_CONNBYTES_BYTES XT_CONNBYTES_BYTES -#define IPT_CONNBYTES_AVGPKT XT_CONNBYTES_AVGPKT - -#define ipt_connbytes_direction xt_connbytes_direction -#define IPT_CONNBYTES_DIR_ORIGINAL XT_CONNBYTES_DIR_ORIGINAL -#define IPT_CONNBYTES_DIR_REPLY XT_CONNBYTES_DIR_REPLY -#define IPT_CONNBYTES_DIR_BOTH XT_CONNBYTES_DIR_BOTH - -#define ipt_connbytes_info xt_connbytes_info - -#endif diff --git a/include/linux/netfilter_ipv4/ipt_connmark.h b/include/linux/netfilter_ipv4/ipt_connmark.h deleted file mode 100644 index c7ba6560d44..00000000000 --- a/include/linux/netfilter_ipv4/ipt_connmark.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IPT_CONNMARK_H -#define _IPT_CONNMARK_H - -#include <linux/netfilter/xt_connmark.h> -#define ipt_connmark_info xt_connmark_info - -#endif /*_IPT_CONNMARK_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_conntrack.h b/include/linux/netfilter_ipv4/ipt_conntrack.h deleted file mode 100644 index cde6762949c..00000000000 --- a/include/linux/netfilter_ipv4/ipt_conntrack.h +++ /dev/null @@ -1,28 +0,0 @@ -/* Header file for kernel module to match connection tracking information. - * GPL (C) 2001 Marc Boucher (marc@mbsi.ca). - */ - -#ifndef _IPT_CONNTRACK_H -#define _IPT_CONNTRACK_H - -#include <linux/netfilter/xt_conntrack.h> - -#define IPT_CONNTRACK_STATE_BIT(ctinfo) XT_CONNTRACK_STATE_BIT(ctinfo) -#define IPT_CONNTRACK_STATE_INVALID XT_CONNTRACK_STATE_INVALID - -#define IPT_CONNTRACK_STATE_SNAT XT_CONNTRACK_STATE_SNAT -#define IPT_CONNTRACK_STATE_DNAT XT_CONNTRACK_STATE_DNAT -#define IPT_CONNTRACK_STATE_UNTRACKED XT_CONNTRACK_STATE_UNTRACKED - -/* flags, invflags: */ -#define IPT_CONNTRACK_STATE XT_CONNTRACK_STATE -#define IPT_CONNTRACK_PROTO XT_CONNTRACK_PROTO -#define IPT_CONNTRACK_ORIGSRC XT_CONNTRACK_ORIGSRC -#define IPT_CONNTRACK_ORIGDST XT_CONNTRACK_ORIGDST -#define IPT_CONNTRACK_REPLSRC XT_CONNTRACK_REPLSRC -#define IPT_CONNTRACK_REPLDST XT_CONNTRACK_REPLDST -#define IPT_CONNTRACK_STATUS XT_CONNTRACK_STATUS -#define IPT_CONNTRACK_EXPIRES XT_CONNTRACK_EXPIRES - -#define ipt_conntrack_info xt_conntrack_info -#endif /*_IPT_CONNTRACK_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_dccp.h b/include/linux/netfilter_ipv4/ipt_dccp.h deleted file mode 100644 index e70d11e1f53..00000000000 --- a/include/linux/netfilter_ipv4/ipt_dccp.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _IPT_DCCP_H_ -#define _IPT_DCCP_H_ - -#include <linux/netfilter/xt_dccp.h> -#define IPT_DCCP_SRC_PORTS XT_DCCP_SRC_PORTS -#define IPT_DCCP_DEST_PORTS XT_DCCP_DEST_PORTS -#define IPT_DCCP_TYPE XT_DCCP_TYPE -#define IPT_DCCP_OPTION XT_DCCP_OPTION - -#define IPT_DCCP_VALID_FLAGS XT_DCCP_VALID_FLAGS - -#define ipt_dccp_info xt_dccp_info - -#endif /* _IPT_DCCP_H_ */ - diff --git a/include/linux/netfilter_ipv4/ipt_dscp.h b/include/linux/netfilter_ipv4/ipt_dscp.h deleted file mode 100644 index 4b82ca912b0..00000000000 --- a/include/linux/netfilter_ipv4/ipt_dscp.h +++ /dev/null @@ -1,21 +0,0 @@ -/* iptables module for matching the IPv4 DSCP field - * - * (C) 2002 Harald Welte <laforge@gnumonks.org> - * This software is distributed under GNU GPL v2, 1991 - * - * See RFC2474 for a description of the DSCP field within the IP Header. - * - * ipt_dscp.h,v 1.3 2002/08/05 19:00:21 laforge Exp -*/ -#ifndef _IPT_DSCP_H -#define _IPT_DSCP_H - -#include <linux/netfilter/xt_dscp.h> - -#define IPT_DSCP_MASK XT_DSCP_MASK -#define IPT_DSCP_SHIFT XT_DSCP_SHIFT -#define IPT_DSCP_MAX XT_DSCP_MAX - -#define ipt_dscp_info xt_dscp_info - -#endif /* _IPT_DSCP_H */ diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h index 1f0d9a4d337..9945baa4ccd 100644 --- a/include/linux/netfilter_ipv4/ipt_ecn.h +++ b/include/linux/netfilter_ipv4/ipt_ecn.h @@ -8,9 +8,9 @@ */ #ifndef _IPT_ECN_H #define _IPT_ECN_H -#include <linux/netfilter_ipv4/ipt_dscp.h> +#include <linux/netfilter/xt_dscp.h> -#define IPT_ECN_IP_MASK (~IPT_DSCP_MASK) +#define IPT_ECN_IP_MASK (~XT_DSCP_MASK) #define IPT_ECN_OP_MATCH_IP 0x01 #define IPT_ECN_OP_MATCH_ECE 0x10 diff --git a/include/linux/netfilter_ipv4/ipt_esp.h b/include/linux/netfilter_ipv4/ipt_esp.h deleted file mode 100644 index 78296e7eeff..00000000000 --- a/include/linux/netfilter_ipv4/ipt_esp.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _IPT_ESP_H -#define _IPT_ESP_H - -#include <linux/netfilter/xt_esp.h> - -#define ipt_esp xt_esp -#define IPT_ESP_INV_SPI XT_ESP_INV_SPI -#define IPT_ESP_INV_MASK XT_ESP_INV_MASK - -#endif /*_IPT_ESP_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_hashlimit.h b/include/linux/netfilter_ipv4/ipt_hashlimit.h deleted file mode 100644 index 5662120a3d7..00000000000 --- a/include/linux/netfilter_ipv4/ipt_hashlimit.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _IPT_HASHLIMIT_H -#define _IPT_HASHLIMIT_H - -#include <linux/netfilter/xt_hashlimit.h> - -#define IPT_HASHLIMIT_SCALE XT_HASHLIMIT_SCALE -#define IPT_HASHLIMIT_HASH_DIP XT_HASHLIMIT_HASH_DIP -#define IPT_HASHLIMIT_HASH_DPT XT_HASHLIMIT_HASH_DPT -#define IPT_HASHLIMIT_HASH_SIP XT_HASHLIMIT_HASH_SIP -#define IPT_HASHLIMIT_HASH_SPT XT_HASHLIMIT_HASH_SPT - -#define ipt_hashlimit_info xt_hashlimit_info - -#endif /* _IPT_HASHLIMIT_H */ diff --git a/include/linux/netfilter_ipv4/ipt_helper.h b/include/linux/netfilter_ipv4/ipt_helper.h deleted file mode 100644 index 80452c21855..00000000000 --- a/include/linux/netfilter_ipv4/ipt_helper.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IPT_HELPER_H -#define _IPT_HELPER_H - -#include <linux/netfilter/xt_helper.h> -#define ipt_helper_info xt_helper_info - -#endif /* _IPT_HELPER_H */ diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h deleted file mode 100644 index 5f1aebde4d2..00000000000 --- a/include/linux/netfilter_ipv4/ipt_iprange.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef _IPT_IPRANGE_H -#define _IPT_IPRANGE_H - -#include <linux/types.h> -#include <linux/netfilter/xt_iprange.h> - -struct ipt_iprange { - /* Inclusive: network order. */ - __be32 min_ip, max_ip; -}; - -struct ipt_iprange_info -{ - struct ipt_iprange src; - struct ipt_iprange dst; - - /* Flags from above */ - u_int8_t flags; -}; - -#endif /* _IPT_IPRANGE_H */ diff --git a/include/linux/netfilter_ipv4/ipt_length.h b/include/linux/netfilter_ipv4/ipt_length.h deleted file mode 100644 index 9b45206ffce..00000000000 --- a/include/linux/netfilter_ipv4/ipt_length.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IPT_LENGTH_H -#define _IPT_LENGTH_H - -#include <linux/netfilter/xt_length.h> -#define ipt_length_info xt_length_info - -#endif /*_IPT_LENGTH_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_limit.h b/include/linux/netfilter_ipv4/ipt_limit.h deleted file mode 100644 index 92f5cd07bbc..00000000000 --- a/include/linux/netfilter_ipv4/ipt_limit.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _IPT_RATE_H -#define _IPT_RATE_H - -#include <linux/netfilter/xt_limit.h> -#define IPT_LIMIT_SCALE XT_LIMIT_SCALE -#define ipt_rateinfo xt_rateinfo - -#endif /*_IPT_RATE_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_mac.h b/include/linux/netfilter_ipv4/ipt_mac.h deleted file mode 100644 index b186008a3c4..00000000000 --- a/include/linux/netfilter_ipv4/ipt_mac.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IPT_MAC_H -#define _IPT_MAC_H - -#include <linux/netfilter/xt_mac.h> -#define ipt_mac_info xt_mac_info - -#endif /*_IPT_MAC_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_mark.h b/include/linux/netfilter_ipv4/ipt_mark.h deleted file mode 100644 index bfde67c6122..00000000000 --- a/include/linux/netfilter_ipv4/ipt_mark.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _IPT_MARK_H -#define _IPT_MARK_H - -/* Backwards compatibility for old userspace */ -#include <linux/netfilter/xt_mark.h> - -#define ipt_mark_info xt_mark_info - -#endif /*_IPT_MARK_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_multiport.h b/include/linux/netfilter_ipv4/ipt_multiport.h deleted file mode 100644 index 55fe85eca88..00000000000 --- a/include/linux/netfilter_ipv4/ipt_multiport.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _IPT_MULTIPORT_H -#define _IPT_MULTIPORT_H - -#include <linux/netfilter/xt_multiport.h> - -#define IPT_MULTIPORT_SOURCE XT_MULTIPORT_SOURCE -#define IPT_MULTIPORT_DESTINATION XT_MULTIPORT_DESTINATION -#define IPT_MULTIPORT_EITHER XT_MULTIPORT_EITHER - -#define IPT_MULTI_PORTS XT_MULTI_PORTS - -#define ipt_multiport xt_multiport -#define ipt_multiport_v1 xt_multiport_v1 - -#endif /*_IPT_MULTIPORT_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_owner.h b/include/linux/netfilter_ipv4/ipt_owner.h deleted file mode 100644 index 92f4bdac54e..00000000000 --- a/include/linux/netfilter_ipv4/ipt_owner.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _IPT_OWNER_H -#define _IPT_OWNER_H - -/* match and invert flags */ -#define IPT_OWNER_UID 0x01 -#define IPT_OWNER_GID 0x02 -#define IPT_OWNER_PID 0x04 -#define IPT_OWNER_SID 0x08 -#define IPT_OWNER_COMM 0x10 - -struct ipt_owner_info { - uid_t uid; - gid_t gid; - pid_t pid; - pid_t sid; - char comm[16]; - u_int8_t match, invert; /* flags */ -}; - -#endif /*_IPT_OWNER_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_physdev.h b/include/linux/netfilter_ipv4/ipt_physdev.h deleted file mode 100644 index 2400e7140f2..00000000000 --- a/include/linux/netfilter_ipv4/ipt_physdev.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _IPT_PHYSDEV_H -#define _IPT_PHYSDEV_H - -/* Backwards compatibility for old userspace */ - -#include <linux/netfilter/xt_physdev.h> - -#define IPT_PHYSDEV_OP_IN XT_PHYSDEV_OP_IN -#define IPT_PHYSDEV_OP_OUT XT_PHYSDEV_OP_OUT -#define IPT_PHYSDEV_OP_BRIDGED XT_PHYSDEV_OP_BRIDGED -#define IPT_PHYSDEV_OP_ISIN XT_PHYSDEV_OP_ISIN -#define IPT_PHYSDEV_OP_ISOUT XT_PHYSDEV_OP_ISOUT -#define IPT_PHYSDEV_OP_MASK XT_PHYSDEV_OP_MASK - -#define ipt_physdev_info xt_physdev_info - -#endif /*_IPT_PHYSDEV_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_pkttype.h b/include/linux/netfilter_ipv4/ipt_pkttype.h deleted file mode 100644 index ff1fbc949a0..00000000000 --- a/include/linux/netfilter_ipv4/ipt_pkttype.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IPT_PKTTYPE_H -#define _IPT_PKTTYPE_H - -#include <linux/netfilter/xt_pkttype.h> -#define ipt_pkttype_info xt_pkttype_info - -#endif /*_IPT_PKTTYPE_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h deleted file mode 100644 index 1037fb2cd20..00000000000 --- a/include/linux/netfilter_ipv4/ipt_policy.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _IPT_POLICY_H -#define _IPT_POLICY_H - -#include <linux/netfilter/xt_policy.h> - -#define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM - -/* ipt_policy_flags */ -#define IPT_POLICY_MATCH_IN XT_POLICY_MATCH_IN -#define IPT_POLICY_MATCH_OUT XT_POLICY_MATCH_OUT -#define IPT_POLICY_MATCH_NONE XT_POLICY_MATCH_NONE -#define IPT_POLICY_MATCH_STRICT XT_POLICY_MATCH_STRICT - -/* ipt_policy_modes */ -#define IPT_POLICY_MODE_TRANSPORT XT_POLICY_MODE_TRANSPORT -#define IPT_POLICY_MODE_TUNNEL XT_POLICY_MODE_TUNNEL - -#define ipt_policy_spec xt_policy_spec -#define ipt_policy_addr xt_policy_addr -#define ipt_policy_elem xt_policy_elem -#define ipt_policy_info xt_policy_info - -#endif /* _IPT_POLICY_H */ diff --git a/include/linux/netfilter_ipv4/ipt_recent.h b/include/linux/netfilter_ipv4/ipt_recent.h deleted file mode 100644 index d636cca133c..00000000000 --- a/include/linux/netfilter_ipv4/ipt_recent.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef _IPT_RECENT_H -#define _IPT_RECENT_H - -#include <linux/netfilter/xt_recent.h> - -#define ipt_recent_info xt_recent_mtinfo - -enum { - IPT_RECENT_CHECK = XT_RECENT_CHECK, - IPT_RECENT_SET = XT_RECENT_SET, - IPT_RECENT_UPDATE = XT_RECENT_UPDATE, - IPT_RECENT_REMOVE = XT_RECENT_REMOVE, - IPT_RECENT_TTL = XT_RECENT_TTL, - - IPT_RECENT_SOURCE = XT_RECENT_SOURCE, - IPT_RECENT_DEST = XT_RECENT_DEST, - - IPT_RECENT_NAME_LEN = XT_RECENT_NAME_LEN, -}; - -#endif /*_IPT_RECENT_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_sctp.h b/include/linux/netfilter_ipv4/ipt_sctp.h deleted file mode 100644 index 80b3dbacd19..00000000000 --- a/include/linux/netfilter_ipv4/ipt_sctp.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef _IPT_SCTP_H_ -#define _IPT_SCTP_H_ - -#define IPT_SCTP_SRC_PORTS 0x01 -#define IPT_SCTP_DEST_PORTS 0x02 -#define IPT_SCTP_CHUNK_TYPES 0x04 - -#define IPT_SCTP_VALID_FLAGS 0x07 - - -struct ipt_sctp_flag_info { - u_int8_t chunktype; - u_int8_t flag; - u_int8_t flag_mask; -}; - -#define IPT_NUM_SCTP_FLAGS 4 - -struct ipt_sctp_info { - u_int16_t dpts[2]; /* Min, Max */ - u_int16_t spts[2]; /* Min, Max */ - - u_int32_t chunkmap[256 / sizeof (u_int32_t)]; /* Bit mask of chunks to be matched according to RFC 2960 */ - -#define SCTP_CHUNK_MATCH_ANY 0x01 /* Match if any of the chunk types are present */ -#define SCTP_CHUNK_MATCH_ALL 0x02 /* Match if all of the chunk types are present */ -#define SCTP_CHUNK_MATCH_ONLY 0x04 /* Match if these are the only chunk types present */ - - u_int32_t chunk_match_type; - struct ipt_sctp_flag_info flag_info[IPT_NUM_SCTP_FLAGS]; - int flag_count; - - u_int32_t flags; - u_int32_t invflags; -}; - -#define bytes(type) (sizeof(type) * 8) - -#define SCTP_CHUNKMAP_SET(chunkmap, type) \ - do { \ - chunkmap[type / bytes(u_int32_t)] |= \ - 1 << (type % bytes(u_int32_t)); \ - } while (0) - -#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \ - do { \ - chunkmap[type / bytes(u_int32_t)] &= \ - ~(1 << (type % bytes(u_int32_t))); \ - } while (0) - -#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \ -({ \ - (chunkmap[type / bytes (u_int32_t)] & \ - (1 << (type % bytes (u_int32_t)))) ? 1: 0; \ -}) - -#define SCTP_CHUNKMAP_RESET(chunkmap) \ - do { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \ - chunkmap[i] = 0; \ - } while (0) - -#define SCTP_CHUNKMAP_SET_ALL(chunkmap) \ - do { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \ - chunkmap[i] = ~0; \ - } while (0) - -#define SCTP_CHUNKMAP_COPY(destmap, srcmap) \ - do { \ - int i; \ - for (i = 0; i < ARRAY_SIZE(chunkmap); i++) \ - destmap[i] = srcmap[i]; \ - } while (0) - -#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap) \ -({ \ - int i; \ - int flag = 1; \ - for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \ - if (chunkmap[i]) { \ - flag = 0; \ - break; \ - } \ - } \ - flag; \ -}) - -#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap) \ -({ \ - int i; \ - int flag = 1; \ - for (i = 0; i < ARRAY_SIZE(chunkmap); i++) { \ - if (chunkmap[i] != ~0) { \ - flag = 0; \ - break; \ - } \ - } \ - flag; \ -}) - -#endif /* _IPT_SCTP_H_ */ - diff --git a/include/linux/netfilter_ipv4/ipt_state.h b/include/linux/netfilter_ipv4/ipt_state.h deleted file mode 100644 index a44a99cc28c..00000000000 --- a/include/linux/netfilter_ipv4/ipt_state.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _IPT_STATE_H -#define _IPT_STATE_H - -/* Backwards compatibility for old userspace */ - -#include <linux/netfilter/xt_state.h> - -#define IPT_STATE_BIT XT_STATE_BIT -#define IPT_STATE_INVALID XT_STATE_INVALID - -#define IPT_STATE_UNTRACKED XT_STATE_UNTRACKED - -#define ipt_state_info xt_state_info - -#endif /*_IPT_STATE_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_string.h b/include/linux/netfilter_ipv4/ipt_string.h deleted file mode 100644 index c26de305990..00000000000 --- a/include/linux/netfilter_ipv4/ipt_string.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _IPT_STRING_H -#define _IPT_STRING_H - -#include <linux/netfilter/xt_string.h> - -#define IPT_STRING_MAX_PATTERN_SIZE XT_STRING_MAX_PATTERN_SIZE -#define IPT_STRING_MAX_ALGO_NAME_SIZE XT_STRING_MAX_ALGO_NAME_SIZE -#define ipt_string_info xt_string_info - -#endif /*_IPT_STRING_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_tcpmss.h b/include/linux/netfilter_ipv4/ipt_tcpmss.h deleted file mode 100644 index 18bbc8e8e00..00000000000 --- a/include/linux/netfilter_ipv4/ipt_tcpmss.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IPT_TCPMSS_MATCH_H -#define _IPT_TCPMSS_MATCH_H - -#include <linux/netfilter/xt_tcpmss.h> -#define ipt_tcpmss_match_info xt_tcpmss_match_info - -#endif /*_IPT_TCPMSS_MATCH_H*/ diff --git a/include/linux/netfilter_ipv4/ipt_tos.h b/include/linux/netfilter_ipv4/ipt_tos.h deleted file mode 100644 index a21f5df23c5..00000000000 --- a/include/linux/netfilter_ipv4/ipt_tos.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _IPT_TOS_H -#define _IPT_TOS_H - -struct ipt_tos_info { - u_int8_t tos; - u_int8_t invert; -}; - -#ifndef IPTOS_NORMALSVC -#define IPTOS_NORMALSVC 0 -#endif - -#endif /*_IPT_TOS_H*/ diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild index 8887a5fcd1d..e864eaee9e5 100644 --- a/include/linux/netfilter_ipv6/Kbuild +++ b/include/linux/netfilter_ipv6/Kbuild @@ -1,21 +1,12 @@ header-y += ip6t_HL.h header-y += ip6t_LOG.h -header-y += ip6t_MARK.h header-y += ip6t_REJECT.h header-y += ip6t_ah.h -header-y += ip6t_esp.h header-y += ip6t_frag.h -header-y += ip6t_hl.h header-y += ip6t_ipv6header.h -header-y += ip6t_length.h -header-y += ip6t_limit.h -header-y += ip6t_mac.h -header-y += ip6t_mark.h -header-y += ip6t_multiport.h +header-y += ip6t_hl.h +header-y += ip6t_mh.h header-y += ip6t_opts.h -header-y += ip6t_owner.h -header-y += ip6t_physdev.h -header-y += ip6t_policy.h header-y += ip6t_rt.h unifdef-y += ip6_tables.h diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 1089e33cf63..a64e1451ac3 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -306,7 +306,7 @@ ip6t_get_target(struct ip6t_entry *e) extern void ip6t_init(void) __init; extern struct xt_table *ip6t_register_table(struct net *net, - struct xt_table *table, + const struct xt_table *table, const struct ip6t_replace *repl); extern void ip6t_unregister_table(struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, diff --git a/include/linux/netfilter_ipv6/ip6t_MARK.h b/include/linux/netfilter_ipv6/ip6t_MARK.h deleted file mode 100644 index 7cf629a8ab9..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_MARK.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _IP6T_MARK_H_target -#define _IP6T_MARK_H_target - -/* Backwards compatibility for old userspace */ -#include <linux/netfilter/xt_MARK.h> - -#define ip6t_mark_target_info xt_mark_target_info - -#endif /*_IP6T_MARK_H_target*/ diff --git a/include/linux/netfilter_ipv6/ip6t_esp.h b/include/linux/netfilter_ipv6/ip6t_esp.h deleted file mode 100644 index f62eaf53c16..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_esp.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _IP6T_ESP_H -#define _IP6T_ESP_H - -#include <linux/netfilter/xt_esp.h> - -#define ip6t_esp xt_esp -#define IP6T_ESP_INV_SPI XT_ESP_INV_SPI -#define IP6T_ESP_INV_MASK XT_ESP_INV_MASK - -#endif /*_IP6T_ESP_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_length.h b/include/linux/netfilter_ipv6/ip6t_length.h deleted file mode 100644 index 9e9689d03ed..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_length.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _IP6T_LENGTH_H -#define _IP6T_LENGTH_H - -#include <linux/netfilter/xt_length.h> -#define ip6t_length_info xt_length_info - -#endif /*_IP6T_LENGTH_H*/ - diff --git a/include/linux/netfilter_ipv6/ip6t_limit.h b/include/linux/netfilter_ipv6/ip6t_limit.h deleted file mode 100644 index 487e5ea342c..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_limit.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _IP6T_RATE_H -#define _IP6T_RATE_H - -#include <linux/netfilter/xt_limit.h> -#define IP6T_LIMIT_SCALE XT_LIMIT_SCALE -#define ip6t_rateinfo xt_rateinfo - -#endif /*_IP6T_RATE_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_mac.h b/include/linux/netfilter_ipv6/ip6t_mac.h deleted file mode 100644 index ac58e83e942..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_mac.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _IP6T_MAC_H -#define _IP6T_MAC_H - -#include <linux/netfilter/xt_mac.h> -#define ip6t_mac_info xt_mac_info - -#endif /*_IP6T_MAC_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_mark.h b/include/linux/netfilter_ipv6/ip6t_mark.h deleted file mode 100644 index ff204951ddc..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_mark.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _IP6T_MARK_H -#define _IP6T_MARK_H - -/* Backwards compatibility for old userspace */ -#include <linux/netfilter/xt_mark.h> - -#define ip6t_mark_info xt_mark_info - -#endif /*_IPT_MARK_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_multiport.h b/include/linux/netfilter_ipv6/ip6t_multiport.h deleted file mode 100644 index 042c92661ce..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_multiport.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _IP6T_MULTIPORT_H -#define _IP6T_MULTIPORT_H - -#include <linux/netfilter/xt_multiport.h> - -#define IP6T_MULTIPORT_SOURCE XT_MULTIPORT_SOURCE -#define IP6T_MULTIPORT_DESTINATION XT_MULTIPORT_DESTINATION -#define IP6T_MULTIPORT_EITHER XT_MULTIPORT_EITHER - -#define IP6T_MULTI_PORTS XT_MULTI_PORTS - -#define ip6t_multiport xt_multiport - -#endif /*_IP6T_MULTIPORT_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_owner.h b/include/linux/netfilter_ipv6/ip6t_owner.h deleted file mode 100644 index 19937da3d10..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_owner.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _IP6T_OWNER_H -#define _IP6T_OWNER_H - -/* match and invert flags */ -#define IP6T_OWNER_UID 0x01 -#define IP6T_OWNER_GID 0x02 -#define IP6T_OWNER_PID 0x04 -#define IP6T_OWNER_SID 0x08 - -struct ip6t_owner_info { - uid_t uid; - gid_t gid; - pid_t pid; - pid_t sid; - u_int8_t match, invert; /* flags */ -}; - -#endif /*_IPT_OWNER_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_physdev.h b/include/linux/netfilter_ipv6/ip6t_physdev.h deleted file mode 100644 index c161c0a81b5..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_physdev.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _IP6T_PHYSDEV_H -#define _IP6T_PHYSDEV_H - -/* Backwards compatibility for old userspace */ - -#include <linux/netfilter/xt_physdev.h> - -#define IP6T_PHYSDEV_OP_IN XT_PHYSDEV_OP_IN -#define IP6T_PHYSDEV_OP_OUT XT_PHYSDEV_OP_OUT -#define IP6T_PHYSDEV_OP_BRIDGED XT_PHYSDEV_OP_BRIDGED -#define IP6T_PHYSDEV_OP_ISIN XT_PHYSDEV_OP_ISIN -#define IP6T_PHYSDEV_OP_ISOUT XT_PHYSDEV_OP_ISOUT -#define IP6T_PHYSDEV_OP_MASK XT_PHYSDEV_OP_MASK - -#define ip6t_physdev_info xt_physdev_info - -#endif /*_IP6T_PHYSDEV_H*/ diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h deleted file mode 100644 index b1c449d7ec8..00000000000 --- a/include/linux/netfilter_ipv6/ip6t_policy.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _IP6T_POLICY_H -#define _IP6T_POLICY_H - -#include <linux/netfilter/xt_policy.h> - -#define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM - -/* ip6t_policy_flags */ -#define IP6T_POLICY_MATCH_IN XT_POLICY_MATCH_IN -#define IP6T_POLICY_MATCH_OUT XT_POLICY_MATCH_OUT -#define IP6T_POLICY_MATCH_NONE XT_POLICY_MATCH_NONE -#define IP6T_POLICY_MATCH_STRICT XT_POLICY_MATCH_STRICT - -/* ip6t_policy_modes */ -#define IP6T_POLICY_MODE_TRANSPORT XT_POLICY_MODE_TRANSPORT -#define IP6T_POLICY_MODE_TUNNEL XT_POLICY_MODE_TUNNEL - -#define ip6t_policy_spec xt_policy_spec -#define ip6t_policy_addr xt_policy_addr -#define ip6t_policy_elem xt_policy_elem -#define ip6t_policy_info xt_policy_info - -#endif /* _IP6T_POLICY_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 51b09a1f46c..080f6ba9e73 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -103,6 +103,8 @@ struct nlmsgerr #define NETLINK_ADD_MEMBERSHIP 1 #define NETLINK_DROP_MEMBERSHIP 2 #define NETLINK_PKTINFO 3 +#define NETLINK_BROADCAST_ERROR 4 +#define NETLINK_NO_ENOBUFS 5 struct nl_pktinfo { @@ -174,12 +176,16 @@ struct netlink_skb_parms #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) +extern void netlink_table_grab(void); +extern void netlink_table_ungrab(void); + extern struct sock *netlink_kernel_create(struct net *net, int unit,unsigned int groups, void (*input)(struct sk_buff *skb), struct mutex *cb_mutex, struct module *module); extern void netlink_kernel_release(struct sock *sk); +extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); @@ -215,12 +221,13 @@ int netlink_sendskb(struct sock *sk, struct sk_buff *skb); struct netlink_callback { - struct sk_buff *skb; - struct nlmsghdr *nlh; - int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); - int (*done)(struct netlink_callback *cb); - int family; - long args[6]; + struct sk_buff *skb; + const struct nlmsghdr *nlh; + int (*dump)(struct sk_buff * skb, + struct netlink_callback *cb); + int (*done)(struct netlink_callback *cb); + int family; + long args[6]; }; struct netlink_notify @@ -256,7 +263,7 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) NLMSG_NEW(skb, pid, seq, type, len, 0) extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, - struct nlmsghdr *nlh, + const struct nlmsghdr *nlh, int (*dump)(struct sk_buff *skb, struct netlink_callback*), int (*done)(struct netlink_callback*)); diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e38d3c9dccd..2524267210d 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -18,7 +18,7 @@ struct netpoll { const char *name; void (*rx_hook)(struct netpoll *, int, char *, int); - u32 local_ip, remote_ip; + __be32 local_ip, remote_ip; u16 local_port, remote_port; u8 remote_mac[ETH_ALEN]; }; @@ -63,6 +63,13 @@ static inline int netpoll_rx(struct sk_buff *skb) return ret; } +static inline int netpoll_rx_on(struct sk_buff *skb) +{ + struct netpoll_info *npinfo = skb->dev->npinfo; + + return npinfo && (npinfo->rx_np || npinfo->rx_flags); +} + static inline int netpoll_receive_skb(struct sk_buff *skb) { if (!list_empty(&skb->dev->napi_list)) @@ -99,6 +106,10 @@ static inline int netpoll_rx(struct sk_buff *skb) { return 0; } +static inline int netpoll_rx_on(struct sk_buff *skb) +{ + return 0; +} static inline int netpoll_receive_skb(struct sk_buff *skb) { return 0; diff --git a/include/linux/nfs.h b/include/linux/nfs.h index 54af92c1c70..f387919bbc5 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h @@ -25,8 +25,9 @@ #define NFSMODE_SOCK 0140000 #define NFSMODE_FIFO 0010000 -#define NFS_MNT_PROGRAM 100005 -#define NFS_MNT_PORT 627 +#define NFS_MNT_PROGRAM 100005 +#define NFS_MNT_VERSION 1 +#define NFS_MNT3_VERSION 3 /* * NFS stats. The good thing with these values is that NFSv3 errors are @@ -109,7 +110,6 @@ NFSERR_FILE_OPEN = 10046, /* v4 */ NFSERR_ADMIN_REVOKED = 10047, /* v4 */ NFSERR_CB_PATH_DOWN = 10048, /* v4 */ - NFSERR_REPLAY_ME = 10049 /* v4 */ }; /* NFSv2 file types - beware, these are not the same in NFSv3 */ diff --git a/include/linux/nfs2.h b/include/linux/nfs2.h index 0ed9517138f..fde24b30cc9 100644 --- a/include/linux/nfs2.h +++ b/include/linux/nfs2.h @@ -64,11 +64,4 @@ struct nfs2_fh { #define NFSPROC_READDIR 16 #define NFSPROC_STATFS 17 -#define NFS_MNT_PROGRAM 100005 -#define NFS_MNT_VERSION 1 -#define MNTPROC_NULL 0 -#define MNTPROC_MNT 1 -#define MNTPROC_UMNT 3 -#define MNTPROC_UMNTALL 4 - #endif /* _LINUX_NFS2_H */ diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h index 539f3b550ea..ac33806ec7f 100644 --- a/include/linux/nfs3.h +++ b/include/linux/nfs3.h @@ -88,12 +88,7 @@ struct nfs3_fh { #define NFS3PROC_PATHCONF 20 #define NFS3PROC_COMMIT 21 -#define NFS_MNT3_PROGRAM 100005 #define NFS_MNT3_VERSION 3 -#define MOUNTPROC3_NULL 0 -#define MOUNTPROC3_MNT 1 -#define MOUNTPROC3_UMNT 3 -#define MOUNTPROC3_UMNTALL 4 #if defined(__KERNEL__) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index b912311a56b..33b283601f6 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -21,6 +21,8 @@ #define NFS4_FHSIZE 128 #define NFS4_MAXPATHLEN PATH_MAX #define NFS4_MAXNAMLEN NAME_MAX +#define NFS4_OPAQUE_LIMIT 1024 +#define NFS4_MAX_SESSIONID_LEN 16 #define NFS4_ACCESS_READ 0x0001 #define NFS4_ACCESS_LOOKUP 0x0002 @@ -38,6 +40,7 @@ #define NFS4_OPEN_RESULT_CONFIRM 0x0002 #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 +#define NFS4_SHARE_ACCESS_MASK 0x000F #define NFS4_SHARE_ACCESS_READ 0x0001 #define NFS4_SHARE_ACCESS_WRITE 0x0002 #define NFS4_SHARE_ACCESS_BOTH 0x0003 @@ -45,6 +48,19 @@ #define NFS4_SHARE_DENY_WRITE 0x0002 #define NFS4_SHARE_DENY_BOTH 0x0003 +/* nfs41 */ +#define NFS4_SHARE_WANT_MASK 0xFF00 +#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000 +#define NFS4_SHARE_WANT_READ_DELEG 0x0100 +#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200 +#define NFS4_SHARE_WANT_ANY_DELEG 0x0300 +#define NFS4_SHARE_WANT_NO_DELEG 0x0400 +#define NFS4_SHARE_WANT_CANCEL 0x0500 + +#define NFS4_SHARE_WHEN_MASK 0xF0000 +#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000 +#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000 + #define NFS4_SET_TO_SERVER_TIME 0 #define NFS4_SET_TO_CLIENT_TIME 1 @@ -88,8 +104,43 @@ #define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 #define NFS4_ACE_MASK_ALL 0x001F01FF +#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001 +#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002 +#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000 +#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000 +#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000 +#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 +#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 +/* + * Since the validity of these bits depends on whether + * they're set in the argument or response, have separate + * invalid flag masks for arg (_A) and resp (_R). + */ +#define EXCHGID4_FLAG_MASK_A 0x40070003 +#define EXCHGID4_FLAG_MASK_R 0x80070003 + +#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 +#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 +#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004 +#define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008 +#define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010 +#define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020 +#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040 +#define SEQ4_STATUS_LEASE_MOVED 0x00000080 +#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100 + #define NFS4_MAX_UINT64 (~(u64)0) +/* An NFS4 sessions server must support at least NFS4_MAX_OPS operations. + * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly. + */ +#define NFS4_MAX_OPS 8 + +/* Our NFS4 client back channel server only wants the cb_sequene and the + * actual operation per compound + */ +#define NFS4_MAX_BACK_CHANNEL_OPS 2 + enum nfs4_acl_whotype { NFS4_ACL_WHO_NAMED = 0, NFS4_ACL_WHO_OWNER, @@ -154,6 +205,28 @@ enum nfs_opnum4 { OP_VERIFY = 37, OP_WRITE = 38, OP_RELEASE_LOCKOWNER = 39, + + /* nfs41 */ + OP_BACKCHANNEL_CTL = 40, + OP_BIND_CONN_TO_SESSION = 41, + OP_EXCHANGE_ID = 42, + OP_CREATE_SESSION = 43, + OP_DESTROY_SESSION = 44, + OP_FREE_STATEID = 45, + OP_GET_DIR_DELEGATION = 46, + OP_GETDEVICEINFO = 47, + OP_GETDEVICELIST = 48, + OP_LAYOUTCOMMIT = 49, + OP_LAYOUTGET = 50, + OP_LAYOUTRETURN = 51, + OP_SECINFO_NO_NAME = 52, + OP_SEQUENCE = 53, + OP_SET_SSV = 54, + OP_TEST_STATEID = 55, + OP_WANT_DELEGATION = 56, + OP_DESTROY_CLIENTID = 57, + OP_RECLAIM_COMPLETE = 58, + OP_ILLEGAL = 10044, }; @@ -230,7 +303,48 @@ enum nfsstat4 { NFS4ERR_DEADLOCK = 10045, NFS4ERR_FILE_OPEN = 10046, NFS4ERR_ADMIN_REVOKED = 10047, - NFS4ERR_CB_PATH_DOWN = 10048 + NFS4ERR_CB_PATH_DOWN = 10048, + + /* nfs41 */ + NFS4ERR_BADIOMODE = 10049, + NFS4ERR_BADLAYOUT = 10050, + NFS4ERR_BAD_SESSION_DIGEST = 10051, + NFS4ERR_BADSESSION = 10052, + NFS4ERR_BADSLOT = 10053, + NFS4ERR_COMPLETE_ALREADY = 10054, + NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055, + NFS4ERR_DELEG_ALREADY_WANTED = 10056, + NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */ + NFS4ERR_LAYOUTTRYLATER = 10058, + NFS4ERR_LAYOUTUNAVAILABLE = 10059, + NFS4ERR_NOMATCHING_LAYOUT = 10060, + NFS4ERR_RECALLCONFLICT = 10061, + NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062, + NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */ + NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */ + NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */ + NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */ + NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */ + NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */ + NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */ + NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */ + NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */ + NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */ + /* Error 10073 is unused. */ + NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */ + NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */ + NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not origional */ + NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */ + NFS4ERR_DEADSESSION = 10078, /* persistent session dead */ + NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */ + NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */ + NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */ + NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */ + NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */ + NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */ + NFS4ERR_REJECT_DELEG = 10085, /* on callback */ + NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */ + NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */ }; /* @@ -265,7 +379,13 @@ enum opentype4 { enum createmode4 { NFS4_CREATE_UNCHECKED = 0, NFS4_CREATE_GUARDED = 1, - NFS4_CREATE_EXCLUSIVE = 2 + NFS4_CREATE_EXCLUSIVE = 2, + /* + * New to NFSv4.1. If session is persistent, + * GUARDED4 MUST be used. Otherwise, use + * EXCLUSIVE4_1 instead of EXCLUSIVE4. + */ + NFS4_CREATE_EXCLUSIVE4_1 = 3 }; enum limit_by4 { @@ -301,6 +421,8 @@ enum lock_type4 { #define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) #define FATTR4_WORD0_LEASE_TIME (1UL << 10) #define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) +/* Mandatory in NFSv4.1 */ +#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11) /* Recommended Attributes */ #define FATTR4_WORD0_ACL (1UL << 12) @@ -350,7 +472,15 @@ enum lock_type4 { #define NFSPROC4_NULL 0 #define NFSPROC4_COMPOUND 1 +#define NFS4_VERSION 4 #define NFS4_MINOR_VERSION 0 + +#if defined(CONFIG_NFS_V4_1) +#define NFS4_MAX_MINOR_VERSION 1 +#else +#define NFS4_MAX_MINOR_VERSION 0 +#endif /* CONFIG_NFS_V4_1 */ + #define NFS4_DEBUG 1 /* Index of predefined Linux client operations */ @@ -391,6 +521,29 @@ enum { NFSPROC4_CLNT_GETACL, NFSPROC4_CLNT_SETACL, NFSPROC4_CLNT_FS_LOCATIONS, + + /* nfs41 */ + NFSPROC4_CLNT_EXCHANGE_ID, + NFSPROC4_CLNT_CREATE_SESSION, + NFSPROC4_CLNT_DESTROY_SESSION, + NFSPROC4_CLNT_SEQUENCE, + NFSPROC4_CLNT_GET_LEASE_TIME, +}; + +/* nfs41 types */ +struct nfs4_sessionid { + unsigned char data[NFS4_MAX_SESSIONID_LEN]; +}; + +/* Create Session Flags */ +#define SESSION4_PERSIST 0x001 +#define SESSION4_BACK_CHAN 0x002 +#define SESSION4_RDMA 0x004 + +enum state_protect_how4 { + SP4_NONE = 0, + SP4_MACH_CRED = 1, + SP4_SSV = 2 }; #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index db867b04ac3..f6b90240dd4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -166,8 +166,7 @@ struct nfs_inode { */ struct radix_tree_root nfs_page_tree; - unsigned long ncommit, - npages; + unsigned long npages; /* Open contexts for shared mmap writes */ struct list_head open_files; @@ -186,6 +185,9 @@ struct nfs_inode { fmode_t delegation_state; struct rw_semaphore rwsem; #endif /* CONFIG_NFS_V4*/ +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; +#endif struct inode vfs_inode; }; @@ -207,6 +209,9 @@ struct nfs_inode { #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ +#define NFS_INO_FLUSHING (4) /* inode is flushing out data */ +#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ +#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { @@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode) return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); } +static inline int NFS_FSCACHE(const struct inode *inode) +{ + return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); +} + static inline __u64 NFS_FILEID(const struct inode *inode) { return NFS_I(inode)->fileid; @@ -415,7 +425,7 @@ extern const struct inode_operations nfs_dir_inode_operations; extern const struct inode_operations nfs3_dir_inode_operations; #endif /* CONFIG_NFS_V3 */ extern const struct file_operations nfs_dir_operations; -extern struct dentry_operations nfs_dentry_operations; +extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); @@ -463,7 +473,6 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *); extern int nfs_flush_incompatible(struct file *file, struct page *page); extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *); -extern void nfs_writedata_release(void *); /* * Try to write back everything synchronously (but check the @@ -478,7 +487,6 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_write_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_write_data *wdata); -extern void nfs_commitdata_release(void *wdata); #else static inline int nfs_commit_inode(struct inode *inode, int how) @@ -497,6 +505,7 @@ nfs_have_writebacks(struct inode *inode) * Allocate nfs_write_data structures */ extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages); +extern void nfs_writedata_free(struct nfs_write_data *); /* * linux/fs/nfs/read.c @@ -505,12 +514,14 @@ extern int nfs_readpage(struct file *, struct page *); extern int nfs_readpages(struct file *, struct address_space *, struct list_head *, unsigned); extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); -extern void nfs_readdata_release(void *data); +extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, + struct page *); /* * Allocate nfs_read_data structures */ extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages); +extern void nfs_readdata_free(struct nfs_read_data *); /* * linux/fs/nfs3proc.c @@ -583,6 +594,7 @@ extern void * nfs_root_data(void); #define NFSDBG_CALLBACK 0x0100 #define NFSDBG_CLIENT 0x0200 #define NFSDBG_MOUNT 0x0400 +#define NFSDBG_FSCACHE 0x0800 #define NFSDBG_ALL 0xFFFF #ifdef __KERNEL__ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 9bb81aec91c..320569eabe3 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -4,11 +4,17 @@ #include <linux/list.h> #include <linux/backing-dev.h> #include <linux/wait.h> +#include <linux/nfs_xdr.h> +#include <linux/sunrpc/xprt.h> #include <asm/atomic.h> +struct nfs4_session; struct nfs_iostats; struct nlm_host; +struct nfs4_sequence_args; +struct nfs4_sequence_res; +struct nfs_server; /* * The nfs_client identifies our client state to the server. @@ -18,6 +24,7 @@ struct nfs_client { int cl_cons_state; /* current construction state (-ve: init error) */ #define NFS_CS_READY 0 /* ready to be used */ #define NFS_CS_INITING 1 /* busy initialising */ +#define NFS_CS_SESSION_INITING 2 /* busy initialising session */ unsigned long cl_res_state; /* NFS resources state */ #define NFS_CS_CALLBACK 1 /* - callback started */ #define NFS_CS_IDMAP 2 /* - idmap started */ @@ -32,6 +39,7 @@ struct nfs_client { const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ int cl_proto; /* Network transport protocol */ + u32 cl_minorversion;/* NFSv4 minorversion */ struct rpc_cred *cl_machine_cred; #ifdef CONFIG_NFS_V4 @@ -63,6 +71,25 @@ struct nfs_client { */ char cl_ipaddr[48]; unsigned char cl_id_uniquifier; + int (* cl_call_sync)(struct nfs_server *server, + struct rpc_message *msg, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + int cache_reply); +#endif /* CONFIG_NFS_V4 */ + +#ifdef CONFIG_NFS_V4_1 + /* clientid returned from EXCHANGE_ID, used by session operations */ + u64 cl_ex_clid; + /* The sequence id to use for the next CREATE_SESSION */ + u32 cl_seqid; + /* The flags used for obtaining the clientid during EXCHANGE_ID */ + u32 cl_exchange_flags; + struct nfs4_session *cl_session; /* sharred session */ +#endif /* CONFIG_NFS_V4_1 */ + +#ifdef CONFIG_NFS_FSCACHE + struct fscache_cookie *fscache; /* client index cache cookie */ #endif }; @@ -96,16 +123,28 @@ struct nfs_server { unsigned int acdirmin; unsigned int acdirmax; unsigned int namelen; + unsigned int options; /* extra options enabled by mount */ +#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ struct nfs_fsid fsid; __u64 maxfilesize; /* maximum file size */ unsigned long mount_time; /* when this fs was mounted */ dev_t s_dev; /* superblock dev numbers */ +#ifdef CONFIG_NFS_FSCACHE + struct nfs_fscache_key *fscache_key; /* unique key for superblock */ + struct fscache_cookie *fscache; /* superblock cookie */ +#endif + #ifdef CONFIG_NFS_V4 u32 attr_bitmask[2];/* V4 bitmask representing the set of attributes supported on this filesystem */ + u32 cache_consistency_bitmask[2]; + /* V4 bitmask representing the subset + of change attribute, size, ctime + and mtime attributes supported by + the server */ u32 acl_bitmask; /* V4 bitmask representing the ACEs that are supported on this filesystem */ @@ -128,5 +167,56 @@ struct nfs_server { #define NFS_CAP_SYMLINKS (1U << 2) #define NFS_CAP_ACLS (1U << 3) #define NFS_CAP_ATOMIC_OPEN (1U << 4) +#define NFS_CAP_CHANGE_ATTR (1U << 5) +#define NFS_CAP_FILEID (1U << 6) +#define NFS_CAP_MODE (1U << 7) +#define NFS_CAP_NLINK (1U << 8) +#define NFS_CAP_OWNER (1U << 9) +#define NFS_CAP_OWNER_GROUP (1U << 10) +#define NFS_CAP_ATIME (1U << 11) +#define NFS_CAP_CTIME (1U << 12) +#define NFS_CAP_MTIME (1U << 13) + + +/* maximum number of slots to use */ +#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE + +#if defined(CONFIG_NFS_V4_1) + +/* Sessions */ +#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) +struct nfs4_slot_table { + struct nfs4_slot *slots; /* seqid per slot */ + unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */ + spinlock_t slot_tbl_lock; + struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */ + int max_slots; /* # slots in table */ + int highest_used_slotid; /* sent to server on each SEQ. + * op for dynamic resizing */ +}; + +static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp) +{ + return sp - tbl->slots; +} + +/* + * Session related parameters + */ +struct nfs4_session { + struct nfs4_sessionid sess_id; + u32 flags; + unsigned long session_state; + u32 hash_alg; + u32 ssv_len; + + /* The fore and back channel */ + struct nfs4_channel_attrs fc_attrs; + struct nfs4_slot_table fc_slot_table; + struct nfs4_channel_attrs bc_attrs; + struct nfs4_slot_table bc_slot_table; + struct nfs_client *clp; +}; +#endif /* CONFIG_NFS_V4_1 */ #endif diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h index 15a9f3b7289..91a1c24e0cb 100644 --- a/include/linux/nfs_idmap.h +++ b/include/linux/nfs_idmap.h @@ -37,6 +37,8 @@ #ifndef NFS_IDMAP_H #define NFS_IDMAP_H +#include <linux/types.h> + /* XXX from bits/utmp.h */ #define IDMAP_NAMESZ 128 diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h index 1cb9a3fed2b..68b10f5f890 100644 --- a/include/linux/nfs_iostat.h +++ b/include/linux/nfs_iostat.h @@ -116,4 +116,16 @@ enum nfs_stat_eventcounters { __NFSIOS_COUNTSMAX, }; +/* + * NFS local caching servicing counters + */ +enum nfs_stat_fscachecounters { + NFSIOS_FSCACHE_PAGES_READ_OK, + NFSIOS_FSCACHE_PAGES_READ_FAIL, + NFSIOS_FSCACHE_PAGES_WRITTEN_OK, + NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, + NFSIOS_FSCACHE_PAGES_UNCACHED, + __NFSIOS_FSCACHEMAX, +}; + #endif /* _LINUX_NFS_IOSTAT */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index a550b528319..62f63fb0c4c 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -27,12 +27,8 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid } struct nfs_fattr { - unsigned short valid; /* which fields are valid */ - __u64 pre_size; /* pre_op_attr.size */ - struct timespec pre_mtime; /* pre_op_attr.mtime */ - struct timespec pre_ctime; /* pre_op_attr.ctime */ - enum nfs_ftype type; /* always use NFSv2 types */ - __u32 mode; + unsigned int valid; /* which fields are valid */ + umode_t mode; __u32 nlink; __u32 uid; __u32 gid; @@ -52,19 +48,55 @@ struct nfs_fattr { struct timespec atime; struct timespec mtime; struct timespec ctime; - __u32 bitmap[2]; /* NFSv4 returned attribute bitmap */ __u64 change_attr; /* NFSv4 change attribute */ __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ + __u64 pre_size; /* pre_op_attr.size */ + struct timespec pre_mtime; /* pre_op_attr.mtime */ + struct timespec pre_ctime; /* pre_op_attr.ctime */ unsigned long time_start; unsigned long gencount; }; -#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ -#define NFS_ATTR_FATTR 0x0002 /* post-op attributes */ -#define NFS_ATTR_FATTR_V3 0x0004 /* NFSv3 attributes */ -#define NFS_ATTR_FATTR_V4 0x0008 /* NFSv4 change attribute */ -#define NFS_ATTR_WCC_V4 0x0010 /* pre-op change attribute */ -#define NFS_ATTR_FATTR_V4_REFERRAL 0x0020 /* NFSv4 referral */ +#define NFS_ATTR_FATTR_TYPE (1U << 0) +#define NFS_ATTR_FATTR_MODE (1U << 1) +#define NFS_ATTR_FATTR_NLINK (1U << 2) +#define NFS_ATTR_FATTR_OWNER (1U << 3) +#define NFS_ATTR_FATTR_GROUP (1U << 4) +#define NFS_ATTR_FATTR_RDEV (1U << 5) +#define NFS_ATTR_FATTR_SIZE (1U << 6) +#define NFS_ATTR_FATTR_PRESIZE (1U << 7) +#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8) +#define NFS_ATTR_FATTR_SPACE_USED (1U << 9) +#define NFS_ATTR_FATTR_FSID (1U << 10) +#define NFS_ATTR_FATTR_FILEID (1U << 11) +#define NFS_ATTR_FATTR_ATIME (1U << 12) +#define NFS_ATTR_FATTR_MTIME (1U << 13) +#define NFS_ATTR_FATTR_CTIME (1U << 14) +#define NFS_ATTR_FATTR_PREMTIME (1U << 15) +#define NFS_ATTR_FATTR_PRECTIME (1U << 16) +#define NFS_ATTR_FATTR_CHANGE (1U << 17) +#define NFS_ATTR_FATTR_PRECHANGE (1U << 18) +#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */ + +#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ + | NFS_ATTR_FATTR_MODE \ + | NFS_ATTR_FATTR_NLINK \ + | NFS_ATTR_FATTR_OWNER \ + | NFS_ATTR_FATTR_GROUP \ + | NFS_ATTR_FATTR_RDEV \ + | NFS_ATTR_FATTR_SIZE \ + | NFS_ATTR_FATTR_FSID \ + | NFS_ATTR_FATTR_FILEID \ + | NFS_ATTR_FATTR_ATIME \ + | NFS_ATTR_FATTR_MTIME \ + | NFS_ATTR_FATTR_CTIME) +#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_BLOCKS_USED) +#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_SPACE_USED) +#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ + | NFS_ATTR_FATTR_SPACE_USED \ + | NFS_ATTR_FATTR_CHANGE) /* * Info on the file system @@ -113,6 +145,44 @@ struct nfs4_change_info { }; struct nfs_seqid; + +/* nfs41 sessions channel attributes */ +struct nfs4_channel_attrs { + u32 headerpadsz; + u32 max_rqst_sz; + u32 max_resp_sz; + u32 max_resp_sz_cached; + u32 max_ops; + u32 max_reqs; +}; + +/* nfs41 sessions slot seqid */ +struct nfs4_slot { + u32 seq_nr; +}; + +struct nfs4_sequence_args { + struct nfs4_session *sa_session; + u8 sa_slotid; + u8 sa_cache_this; +}; + +struct nfs4_sequence_res { + struct nfs4_session *sr_session; + u8 sr_slotid; /* slot used to send request */ + unsigned long sr_renewal_time; + int sr_status; /* sequence operation status */ +}; + +struct nfs4_get_lease_time_args { + struct nfs4_sequence_args la_seq_args; +}; + +struct nfs4_get_lease_time_res { + struct nfs_fsinfo *lr_fsinfo; + struct nfs4_sequence_res lr_seq_res; +}; + /* * Arguments to the open call. */ @@ -133,6 +203,7 @@ struct nfs_openargs { const struct nfs_server *server; /* Needed for ID mapping */ const u32 * bitmask; __u32 claim; + struct nfs4_sequence_args seq_args; }; struct nfs_openres { @@ -149,6 +220,7 @@ struct nfs_openres { __u32 do_recall; __u64 maxsize; __u32 attrset[NFS4_BITMAP_SIZE]; + struct nfs4_sequence_res seq_res; }; /* @@ -174,6 +246,7 @@ struct nfs_closeargs { struct nfs_seqid * seqid; fmode_t fmode; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs_closeres { @@ -181,6 +254,7 @@ struct nfs_closeres { struct nfs_fattr * fattr; struct nfs_seqid * seqid; const struct nfs_server *server; + struct nfs4_sequence_res seq_res; }; /* * * Arguments to the lock,lockt, and locku call. @@ -201,12 +275,14 @@ struct nfs_lock_args { unsigned char block : 1; unsigned char reclaim : 1; unsigned char new_lock_owner : 1; + struct nfs4_sequence_args seq_args; }; struct nfs_lock_res { nfs4_stateid stateid; struct nfs_seqid * lock_seqid; struct nfs_seqid * open_seqid; + struct nfs4_sequence_res seq_res; }; struct nfs_locku_args { @@ -214,32 +290,38 @@ struct nfs_locku_args { struct file_lock * fl; struct nfs_seqid * seqid; nfs4_stateid * stateid; + struct nfs4_sequence_args seq_args; }; struct nfs_locku_res { nfs4_stateid stateid; struct nfs_seqid * seqid; + struct nfs4_sequence_res seq_res; }; struct nfs_lockt_args { struct nfs_fh * fh; struct file_lock * fl; struct nfs_lowner lock_owner; + struct nfs4_sequence_args seq_args; }; struct nfs_lockt_res { struct file_lock * denied; /* LOCK, LOCKT failed */ + struct nfs4_sequence_res seq_res; }; struct nfs4_delegreturnargs { const struct nfs_fh *fhandle; const nfs4_stateid *stateid; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_delegreturnres { struct nfs_fattr * fattr; const struct nfs_server *server; + struct nfs4_sequence_res seq_res; }; /* @@ -252,12 +334,14 @@ struct nfs_readargs { __u32 count; unsigned int pgbase; struct page ** pages; + struct nfs4_sequence_args seq_args; }; struct nfs_readres { struct nfs_fattr * fattr; __u32 count; int eof; + struct nfs4_sequence_res seq_res; }; /* @@ -272,6 +356,7 @@ struct nfs_writeargs { unsigned int pgbase; struct page ** pages; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs_writeverf { @@ -284,6 +369,7 @@ struct nfs_writeres { struct nfs_writeverf * verf; __u32 count; const struct nfs_server *server; + struct nfs4_sequence_res seq_res; }; /* @@ -293,12 +379,14 @@ struct nfs_removeargs { const struct nfs_fh *fh; struct qstr name; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs_removeres { const struct nfs_server *server; struct nfs4_change_info cinfo; struct nfs_fattr dir_attr; + struct nfs4_sequence_res seq_res; }; /* @@ -351,6 +439,7 @@ struct nfs_setattrargs { struct iattr * iap; const struct nfs_server * server; /* Needed for name mapping */ const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs_setaclargs { @@ -358,6 +447,11 @@ struct nfs_setaclargs { size_t acl_len; unsigned int acl_pgbase; struct page ** acl_pages; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_setaclres { + struct nfs4_sequence_res seq_res; }; struct nfs_getaclargs { @@ -365,11 +459,18 @@ struct nfs_getaclargs { size_t acl_len; unsigned int acl_pgbase; struct page ** acl_pages; + struct nfs4_sequence_args seq_args; +}; + +struct nfs_getaclres { + size_t acl_len; + struct nfs4_sequence_res seq_res; }; struct nfs_setattrres { struct nfs_fattr * fattr; const struct nfs_server * server; + struct nfs4_sequence_res seq_res; }; struct nfs_linkargs { @@ -406,6 +507,8 @@ struct nfs3_setaclargs { int mask; struct posix_acl * acl_access; struct posix_acl * acl_default; + size_t len; + unsigned int npages; struct page ** pages; }; @@ -549,6 +652,7 @@ struct nfs4_accessargs { const struct nfs_fh * fh; const u32 * bitmask; u32 access; + struct nfs4_sequence_args seq_args; }; struct nfs4_accessres { @@ -556,6 +660,7 @@ struct nfs4_accessres { struct nfs_fattr * fattr; u32 supported; u32 access; + struct nfs4_sequence_res seq_res; }; struct nfs4_create_arg { @@ -575,6 +680,7 @@ struct nfs4_create_arg { const struct iattr * attrs; const struct nfs_fh * dir_fh; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_create_res { @@ -583,21 +689,30 @@ struct nfs4_create_res { struct nfs_fattr * fattr; struct nfs4_change_info dir_cinfo; struct nfs_fattr * dir_fattr; + struct nfs4_sequence_res seq_res; }; struct nfs4_fsinfo_arg { const struct nfs_fh * fh; const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_fsinfo_res { + struct nfs_fsinfo *fsinfo; + struct nfs4_sequence_res seq_res; }; struct nfs4_getattr_arg { const struct nfs_fh * fh; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_getattr_res { const struct nfs_server * server; struct nfs_fattr * fattr; + struct nfs4_sequence_res seq_res; }; struct nfs4_link_arg { @@ -605,6 +720,7 @@ struct nfs4_link_arg { const struct nfs_fh * dir_fh; const struct qstr * name; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_link_res { @@ -612,6 +728,7 @@ struct nfs4_link_res { struct nfs_fattr * fattr; struct nfs4_change_info cinfo; struct nfs_fattr * dir_attr; + struct nfs4_sequence_res seq_res; }; @@ -619,21 +736,30 @@ struct nfs4_lookup_arg { const struct nfs_fh * dir_fh; const struct qstr * name; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_lookup_res { const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs_fh * fh; + struct nfs4_sequence_res seq_res; }; struct nfs4_lookup_root_arg { const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_pathconf_arg { const struct nfs_fh * fh; const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_pathconf_res { + struct nfs_pathconf *pathconf; + struct nfs4_sequence_res seq_res; }; struct nfs4_readdir_arg { @@ -644,11 +770,13 @@ struct nfs4_readdir_arg { struct page ** pages; /* zero-copy data */ unsigned int pgbase; /* zero-copy data */ const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_readdir_res { nfs4_verifier verifier; unsigned int pgbase; + struct nfs4_sequence_res seq_res; }; struct nfs4_readlink { @@ -656,6 +784,11 @@ struct nfs4_readlink { unsigned int pgbase; unsigned int pglen; /* zero-copy data */ struct page ** pages; /* zero-copy data */ + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_readlink_res { + struct nfs4_sequence_res seq_res; }; struct nfs4_rename_arg { @@ -664,6 +797,7 @@ struct nfs4_rename_arg { const struct qstr * old_name; const struct qstr * new_name; const u32 * bitmask; + struct nfs4_sequence_args seq_args; }; struct nfs4_rename_res { @@ -672,6 +806,7 @@ struct nfs4_rename_res { struct nfs_fattr * old_fattr; struct nfs4_change_info new_cinfo; struct nfs_fattr * new_fattr; + struct nfs4_sequence_res seq_res; }; #define NFS4_SETCLIENTID_NAMELEN (127) @@ -690,6 +825,17 @@ struct nfs4_setclientid { struct nfs4_statfs_arg { const struct nfs_fh * fh; const u32 * bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_statfs_res { + struct nfs_fsstat *fsstat; + struct nfs4_sequence_res seq_res; +}; + +struct nfs4_server_caps_arg { + struct nfs_fh *fhandle; + struct nfs4_sequence_args seq_args; }; struct nfs4_server_caps_res { @@ -697,6 +843,7 @@ struct nfs4_server_caps_res { u32 acl_bitmask; u32 has_links; u32 has_symlinks; + struct nfs4_sequence_res seq_res; }; struct nfs4_string { @@ -731,10 +878,68 @@ struct nfs4_fs_locations_arg { const struct qstr *name; struct page *page; const u32 *bitmask; + struct nfs4_sequence_args seq_args; +}; + +struct nfs4_fs_locations_res { + struct nfs4_fs_locations *fs_locations; + struct nfs4_sequence_res seq_res; }; #endif /* CONFIG_NFS_V4 */ +struct nfstime4 { + u64 seconds; + u32 nseconds; +}; + +#ifdef CONFIG_NFS_V4_1 +struct nfs_impl_id4 { + u32 domain_len; + char *domain; + u32 name_len; + char *name; + struct nfstime4 date; +}; + +#define NFS4_EXCHANGE_ID_LEN (48) +struct nfs41_exchange_id_args { + struct nfs_client *client; + nfs4_verifier *verifier; + unsigned int id_len; + char id[NFS4_EXCHANGE_ID_LEN]; + u32 flags; +}; + +struct server_owner { + uint64_t minor_id; + uint32_t major_id_sz; + char major_id[NFS4_OPAQUE_LIMIT]; +}; + +struct server_scope { + uint32_t server_scope_sz; + char server_scope[NFS4_OPAQUE_LIMIT]; +}; + +struct nfs41_exchange_id_res { + struct nfs_client *client; + u32 flags; +}; + +struct nfs41_create_session_args { + struct nfs_client *client; + uint32_t flags; + uint32_t cb_program; + struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ + struct nfs4_channel_attrs bc_attrs; /* Back Channel */ +}; + +struct nfs41_create_session_res { + struct nfs_client *client; +}; +#endif /* CONFIG_NFS_V4_1 */ + struct nfs_page; #define NFS_PAGEVEC_SIZE (8U) @@ -783,7 +988,7 @@ struct nfs_access_entry; */ struct nfs_rpc_ops { u32 version; /* Protocol version */ - struct dentry_operations *dentry_ops; + const struct dentry_operations *dentry_ops; const struct inode_operations *dir_inode_ops; const struct inode_operations *file_inode_ops; @@ -834,6 +1039,7 @@ struct nfs_rpc_ops { int (*lock)(struct file *, int, struct file_lock *); int (*lock_check_bounds)(const struct file_lock *); void (*clear_acl_cache)(struct inode *); + void (*close_context)(struct nfs_open_context *ctx, int); }; /* diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 54487a99beb..43011b69297 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h @@ -37,6 +37,9 @@ #define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \ >> PAGE_SHIFT) +#define NFS_ACL_MAX_ENTRIES_INLINE (5) +#define NFS_ACL_INLINE_BUFSIZE ((2*(2+3*NFS_ACL_MAX_ENTRIES_INLINE)) << 2) + static inline unsigned int nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default) { diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h index 04b355c801d..3a3f58934f5 100644 --- a/include/linux/nfsd/cache.h +++ b/include/linux/nfsd/cache.h @@ -14,8 +14,7 @@ #include <linux/uio.h> /* - * Representation of a reply cache entry. The first two members *must* - * be hash_next and hash_prev. + * Representation of a reply cache entry. */ struct svc_cacherep { struct hlist_node c_hash; @@ -76,4 +75,12 @@ void nfsd_reply_cache_shutdown(void); int nfsd_cache_lookup(struct svc_rqst *, int); void nfsd_cache_update(struct svc_rqst *, int, __be32 *); +#ifdef CONFIG_NFSD_V4 +void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); +#else /* CONFIG_NFSD_V4 */ +static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) +{ +} +#endif /* CONFIG_NFSD_V4 */ + #endif /* NFSCACHE_H */ diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 5431512b275..a6d9ef2bb34 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h @@ -10,9 +10,8 @@ #ifndef NFSD_EXPORT_H #define NFSD_EXPORT_H -#include <asm/types.h> -#ifdef __KERNEL__ # include <linux/types.h> +#ifdef __KERNEL__ # include <linux/in.h> #endif @@ -126,11 +125,9 @@ void nfsd_export_flush(void); void exp_readlock(void); void exp_readunlock(void); struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, - struct vfsmount *, - struct dentry *); + struct path *); struct svc_export * rqst_exp_parent(struct svc_rqst *, - struct vfsmount *mnt, - struct dentry *dentry); + struct path *); int exp_rootfh(struct auth_domain *, char *path, struct knfsd_fh *, int maxsize); __be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index e19f45991b2..2b49d676d0c 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h @@ -23,7 +23,7 @@ /* * nfsd version */ -#define NFSD_SUPPORTED_MINOR_VERSION 0 +#define NFSD_SUPPORTED_MINOR_VERSION 1 /* * Flags for nfsd_permission @@ -53,6 +53,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); extern struct svc_program nfsd_program; extern struct svc_version nfsd_version2, nfsd_version3, nfsd_version4; +extern u32 nfsd_supported_minorversion; extern struct mutex nfsd_mutex; extern struct svc_serv *nfsd_serv; @@ -105,7 +106,7 @@ void nfsd_close(struct file *); __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, loff_t, struct kvec *, int, unsigned long *); __be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, - loff_t, struct kvec *,int, unsigned long, int *); + loff_t, struct kvec *,int, unsigned long *, int *); __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, char *, int *); __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, @@ -149,6 +150,7 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; int nfsd_vers(int vers, enum vers_op change); +int nfsd_minorversion(u32 minorversion, enum vers_op change); void nfsd_reset_versions(void); int nfsd_create_serv(void); @@ -186,78 +188,119 @@ void nfsd_lockd_shutdown(void); /* * These macros provide pre-xdr'ed values for faster operation. */ -#define nfs_ok __constant_htonl(NFS_OK) -#define nfserr_perm __constant_htonl(NFSERR_PERM) -#define nfserr_noent __constant_htonl(NFSERR_NOENT) -#define nfserr_io __constant_htonl(NFSERR_IO) -#define nfserr_nxio __constant_htonl(NFSERR_NXIO) -#define nfserr_eagain __constant_htonl(NFSERR_EAGAIN) -#define nfserr_acces __constant_htonl(NFSERR_ACCES) -#define nfserr_exist __constant_htonl(NFSERR_EXIST) -#define nfserr_xdev __constant_htonl(NFSERR_XDEV) -#define nfserr_nodev __constant_htonl(NFSERR_NODEV) -#define nfserr_notdir __constant_htonl(NFSERR_NOTDIR) -#define nfserr_isdir __constant_htonl(NFSERR_ISDIR) -#define nfserr_inval __constant_htonl(NFSERR_INVAL) -#define nfserr_fbig __constant_htonl(NFSERR_FBIG) -#define nfserr_nospc __constant_htonl(NFSERR_NOSPC) -#define nfserr_rofs __constant_htonl(NFSERR_ROFS) -#define nfserr_mlink __constant_htonl(NFSERR_MLINK) -#define nfserr_opnotsupp __constant_htonl(NFSERR_OPNOTSUPP) -#define nfserr_nametoolong __constant_htonl(NFSERR_NAMETOOLONG) -#define nfserr_notempty __constant_htonl(NFSERR_NOTEMPTY) -#define nfserr_dquot __constant_htonl(NFSERR_DQUOT) -#define nfserr_stale __constant_htonl(NFSERR_STALE) -#define nfserr_remote __constant_htonl(NFSERR_REMOTE) -#define nfserr_wflush __constant_htonl(NFSERR_WFLUSH) -#define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE) -#define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC) -#define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE) -#define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP) -#define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL) -#define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT) -#define nfserr_badtype __constant_htonl(NFSERR_BADTYPE) -#define nfserr_jukebox __constant_htonl(NFSERR_JUKEBOX) -#define nfserr_denied __constant_htonl(NFSERR_DENIED) -#define nfserr_deadlock __constant_htonl(NFSERR_DEADLOCK) -#define nfserr_expired __constant_htonl(NFSERR_EXPIRED) -#define nfserr_bad_cookie __constant_htonl(NFSERR_BAD_COOKIE) -#define nfserr_same __constant_htonl(NFSERR_SAME) -#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE) -#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID) -#define nfserr_resource __constant_htonl(NFSERR_RESOURCE) -#define nfserr_moved __constant_htonl(NFSERR_MOVED) -#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE) -#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH) -#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED) -#define nfserr_stale_stateid __constant_htonl(NFSERR_STALE_STATEID) -#define nfserr_old_stateid __constant_htonl(NFSERR_OLD_STATEID) -#define nfserr_bad_stateid __constant_htonl(NFSERR_BAD_STATEID) -#define nfserr_bad_seqid __constant_htonl(NFSERR_BAD_SEQID) -#define nfserr_symlink __constant_htonl(NFSERR_SYMLINK) -#define nfserr_not_same __constant_htonl(NFSERR_NOT_SAME) -#define nfserr_restorefh __constant_htonl(NFSERR_RESTOREFH) -#define nfserr_attrnotsupp __constant_htonl(NFSERR_ATTRNOTSUPP) -#define nfserr_bad_xdr __constant_htonl(NFSERR_BAD_XDR) -#define nfserr_openmode __constant_htonl(NFSERR_OPENMODE) -#define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD) -#define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL) -#define nfserr_grace __constant_htonl(NFSERR_GRACE) -#define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE) -#define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD) -#define nfserr_badname __constant_htonl(NFSERR_BADNAME) -#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) -#define nfserr_locked __constant_htonl(NFSERR_LOCKED) -#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC) -#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) +#define nfs_ok cpu_to_be32(NFS_OK) +#define nfserr_perm cpu_to_be32(NFSERR_PERM) +#define nfserr_noent cpu_to_be32(NFSERR_NOENT) +#define nfserr_io cpu_to_be32(NFSERR_IO) +#define nfserr_nxio cpu_to_be32(NFSERR_NXIO) +#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN) +#define nfserr_acces cpu_to_be32(NFSERR_ACCES) +#define nfserr_exist cpu_to_be32(NFSERR_EXIST) +#define nfserr_xdev cpu_to_be32(NFSERR_XDEV) +#define nfserr_nodev cpu_to_be32(NFSERR_NODEV) +#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR) +#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR) +#define nfserr_inval cpu_to_be32(NFSERR_INVAL) +#define nfserr_fbig cpu_to_be32(NFSERR_FBIG) +#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC) +#define nfserr_rofs cpu_to_be32(NFSERR_ROFS) +#define nfserr_mlink cpu_to_be32(NFSERR_MLINK) +#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP) +#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG) +#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY) +#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT) +#define nfserr_stale cpu_to_be32(NFSERR_STALE) +#define nfserr_remote cpu_to_be32(NFSERR_REMOTE) +#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH) +#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE) +#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC) +#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE) +#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP) +#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL) +#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT) +#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE) +#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX) +#define nfserr_denied cpu_to_be32(NFSERR_DENIED) +#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK) +#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED) +#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE) +#define nfserr_same cpu_to_be32(NFSERR_SAME) +#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE) +#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID) +#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE) +#define nfserr_moved cpu_to_be32(NFSERR_MOVED) +#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE) +#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH) +#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED) +#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID) +#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID) +#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID) +#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID) +#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK) +#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME) +#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH) +#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) +#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) +#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) +#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) +#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) +#define nfserr_grace cpu_to_be32(NFSERR_GRACE) +#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE) +#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD) +#define nfserr_badname cpu_to_be32(NFSERR_BADNAME) +#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) +#define nfserr_locked cpu_to_be32(NFSERR_LOCKED) +#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) +#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) +#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) +#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) +#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION) +#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT) +#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY) +#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION) +#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED) +#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY) +#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER) +#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE) +#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT) +#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT) +#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE) +#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED) +#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS) +#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG) +#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG) +#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE) +#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP) +#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND) +#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS) +#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION) +#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP) +#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY) +#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE) +#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY) +#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT) +#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION) +#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP) +#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT) +#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP) +#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED) +#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE) +#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL) +#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG) +#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT) +#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED) /* error codes for internal use */ /* if a request fails due to kmalloc failure, it gets dropped. * Client should resend eventually */ -#define nfserr_dropit __constant_htonl(30000) +#define nfserr_dropit cpu_to_be32(30000) /* end-of-file indicator in readdir */ -#define nfserr_eof __constant_htonl(30001) +#define nfserr_eof cpu_to_be32(30001) +/* replay detected */ +#define nfserr_replay_me cpu_to_be32(11001) +/* nfs41 replay detected */ +#define nfserr_replay_cache cpu_to_be32(11002) /* Check for dir entries '.' and '..' */ #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) @@ -300,7 +343,7 @@ extern struct timeval nfssvc_boot; * TIME_BACKUP (unlikely to be supported any time soon) * TIME_CREATE (unlikely to be supported any time soon) */ -#define NFSD_SUPPORTED_ATTRS_WORD0 \ +#define NFSD4_SUPPORTED_ATTRS_WORD0 \ (FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ @@ -312,7 +355,7 @@ extern struct timeval nfssvc_boot; | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) -#define NFSD_SUPPORTED_ATTRS_WORD1 \ +#define NFSD4_SUPPORTED_ATTRS_WORD1 \ (FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ @@ -320,6 +363,35 @@ extern struct timeval nfssvc_boot; | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) +#define NFSD4_SUPPORTED_ATTRS_WORD2 0 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \ + NFSD4_SUPPORTED_ATTRS_WORD0 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \ + NFSD4_SUPPORTED_ATTRS_WORD1 + +#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \ + (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) + +static inline u32 nfsd_suppattrs0(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 + : NFSD4_SUPPORTED_ATTRS_WORD0; +} + +static inline u32 nfsd_suppattrs1(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1 + : NFSD4_SUPPORTED_ATTRS_WORD1; +} + +static inline u32 nfsd_suppattrs2(u32 minorversion) +{ + return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2 + : NFSD4_SUPPORTED_ATTRS_WORD2; +} + /* These will return ERR_INVAL if specified in GETATTR or READDIR. */ #define NFSD_WRITEONLY_ATTRS_WORD1 \ (FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) @@ -330,6 +402,19 @@ extern struct timeval nfssvc_boot; #define NFSD_WRITEABLE_ATTRS_WORD1 \ (FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) +#define NFSD_WRITEABLE_ATTRS_WORD2 0 + +#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \ + NFSD_WRITEABLE_ATTRS_WORD0 +/* + * we currently store the exclusive create verifier in the v_{a,m}time + * attributes so the client can't set these at create time using EXCLUSIVE4_1 + */ +#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \ + (NFSD_WRITEABLE_ATTRS_WORD1 & \ + ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)) +#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \ + NFSD_WRITEABLE_ATTRS_WORD2 #endif /* CONFIG_NFSD_V4 */ diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index b2e093870bc..8f641c90845 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h @@ -14,9 +14,8 @@ #ifndef _LINUX_NFSD_FH_H #define _LINUX_NFSD_FH_H -#include <asm/types.h> -#ifdef __KERNEL__ # include <linux/types.h> +#ifdef __KERNEL__ # include <linux/string.h> # include <linux/fs.h> #endif @@ -152,9 +151,15 @@ typedef struct svc_fh { __u64 fh_pre_size; /* size before operation */ struct timespec fh_pre_mtime; /* mtime before oper */ struct timespec fh_pre_ctime; /* ctime before oper */ + /* + * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode) + * to find out if it is valid. + */ + u64 fh_pre_change; /* Post-op attributes saved in fh_unlock */ struct kstat fh_post_attr; /* full attrs after operation */ + u64 fh_post_change; /* nfsv4 change; see above */ #endif /* CONFIG_NFSD_V3 */ } svc_fh; @@ -270,6 +275,13 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src) return dst; } +static inline void +fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src) +{ + dst->fh_size = src->fh_size; + memcpy(&dst->fh_base, &src->fh_base, src->fh_size); +} + static __inline__ struct svc_fh * fh_init(struct svc_fh *fhp, int maxsize) { @@ -292,6 +304,7 @@ fill_pre_wcc(struct svc_fh *fhp) fhp->fh_pre_mtime = inode->i_mtime; fhp->fh_pre_ctime = inode->i_ctime; fhp->fh_pre_size = inode->i_size; + fhp->fh_pre_change = inode->i_version; fhp->fh_pre_saved = 1; } } diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 128298c0362..57ab2ed0845 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -41,7 +41,6 @@ #include <linux/kref.h> #include <linux/sunrpc/clnt.h> -#define NFS4_OPAQUE_LIMIT 1024 typedef struct { u32 cl_boot; u32 cl_id; @@ -61,16 +60,6 @@ typedef struct { #define si_stateownerid si_opaque.so_stateownerid #define si_fileid si_opaque.so_fileid - -struct nfs4_cb_recall { - u32 cbr_ident; - int cbr_trunc; - stateid_t cbr_stateid; - u32 cbr_fhlen; - char cbr_fhval[NFS4_FHSIZE]; - struct nfs4_delegation *cbr_dp; -}; - struct nfs4_delegation { struct list_head dl_perfile; struct list_head dl_perclnt; @@ -82,23 +71,91 @@ struct nfs4_delegation { struct file *dl_vfs_file; u32 dl_type; time_t dl_time; - struct nfs4_cb_recall dl_recall; +/* For recall: */ + u32 dl_ident; + stateid_t dl_stateid; + struct knfsd_fh dl_fh; + int dl_retries; }; -#define dl_stateid dl_recall.cbr_stateid -#define dl_fhlen dl_recall.cbr_fhlen -#define dl_fhval dl_recall.cbr_fhval - /* client delegation callback info */ -struct nfs4_callback { +struct nfs4_cb_conn { /* SETCLIENTID info */ u32 cb_addr; unsigned short cb_port; u32 cb_prog; - u32 cb_ident; + u32 cb_minorversion; + u32 cb_ident; /* minorversion 0 only */ /* RPC client info */ atomic_t cb_set; /* successful CB_NULL call */ struct rpc_clnt * cb_client; + struct rpc_cred * cb_cred; +}; + +/* Maximum number of slots per session. 128 is useful for long haul TCP */ +#define NFSD_MAX_SLOTS_PER_SESSION 128 +/* Maximum number of pages per slot cache entry */ +#define NFSD_PAGES_PER_SLOT 1 +/* Maximum number of operations per session compound */ +#define NFSD_MAX_OPS_PER_COMPOUND 16 + +struct nfsd4_cache_entry { + __be32 ce_status; + struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */ + struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1]; + int ce_cachethis; + short ce_resused; + int ce_opcnt; + int ce_rpchdrlen; +}; + +struct nfsd4_slot { + bool sl_inuse; + u32 sl_seqid; + struct nfsd4_cache_entry sl_cache_entry; +}; + +struct nfsd4_channel_attrs { + u32 headerpadsz; + u32 maxreq_sz; + u32 maxresp_sz; + u32 maxresp_cached; + u32 maxops; + u32 maxreqs; + u32 nr_rdma_attrs; + u32 rdma_attrs; +}; + +struct nfsd4_session { + struct kref se_ref; + struct list_head se_hash; /* hash by sessionid */ + struct list_head se_perclnt; + u32 se_flags; + struct nfs4_client *se_client; /* for expire_client */ + struct nfs4_sessionid se_sessionid; + struct nfsd4_channel_attrs se_fchannel; + struct nfsd4_channel_attrs se_bchannel; + struct nfsd4_slot se_slots[]; /* forward channel slots */ +}; + +static inline void +nfsd4_put_session(struct nfsd4_session *ses) +{ + extern void free_session(struct kref *kref); + kref_put(&ses->se_ref, free_session); +} + +static inline void +nfsd4_get_session(struct nfsd4_session *ses) +{ + kref_get(&ses->se_ref); +} + +/* formatted contents of nfs4_sessionid */ +struct nfsd4_sessionid { + clientid_t clientid; + u32 sequence; + u32 reserved; }; #define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ @@ -129,9 +186,15 @@ struct nfs4_client { struct svc_cred cl_cred; /* setclientid principal */ clientid_t cl_clientid; /* generated by server */ nfs4_verifier cl_confirm; /* generated by server */ - struct nfs4_callback cl_callback; /* callback info */ + struct nfs4_cb_conn cl_cb_conn; /* callback info */ atomic_t cl_count; /* ref count */ u32 cl_firststate; /* recovery dir creation */ + + /* for nfs41 */ + struct list_head cl_sessions; + struct nfsd4_slot cl_slot; /* create_session slot */ + u32 cl_exchange_flags; + struct nfs4_sessionid cl_sessionid; }; /* struct nfs4_client_reset @@ -168,8 +231,7 @@ struct nfs4_replay { unsigned int rp_buflen; char *rp_buf; unsigned intrp_allocated; - int rp_openfh_len; - char rp_openfh[NFS4_FHSIZE]; + struct knfsd_fh rp_openfh; char rp_ibuf[NFSD4_REPLAY_ISIZE]; }; @@ -217,7 +279,7 @@ struct nfs4_stateowner { * share_acces, share_deny on the file. */ struct nfs4_file { - struct kref fi_ref; + atomic_t fi_ref; struct list_head fi_hash; /* hash by "struct inode *" */ struct list_head fi_stateids; struct list_head fi_delegations; @@ -259,14 +321,13 @@ struct nfs4_stateid { }; /* flags for preprocess_seqid_op() */ -#define CHECK_FH 0x00000001 +#define HAS_SESSION 0x00000001 #define CONFIRM 0x00000002 #define OPEN_STATE 0x00000004 #define LOCK_STATE 0x00000008 #define RD_STATE 0x00000010 #define WR_STATE 0x00000020 #define CLOSE_STATE 0x00000040 -#define DELEG_RET 0x00000080 #define seqid_mutating_err(err) \ (((err) != nfserr_stale_clientid) && \ @@ -274,7 +335,9 @@ struct nfs4_stateid { ((err) != nfserr_stale_stateid) && \ ((err) != nfserr_bad_stateid)) -extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, +struct nfsd4_compound_state; + +extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, stateid_t *stateid, int flags, struct file **filp); extern void nfs4_lock_state(void); extern void nfs4_unlock_state(void); @@ -290,7 +353,7 @@ extern void nfsd4_init_recdir(char *recdir_name); extern int nfsd4_recdir_load(void); extern void nfsd4_shutdown_recdir(void); extern int nfs4_client_to_reclaim(const char *name); -extern int nfs4_has_reclaimed_state(const char *name); +extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); extern void nfsd4_recdir_purge_old(void); extern int nfsd4_create_clid_dir(struct nfs4_client *clp); extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h index 7678cfbe996..2693ef647df 100644 --- a/include/linux/nfsd/stats.h +++ b/include/linux/nfsd/stats.h @@ -11,6 +11,11 @@ #include <linux/nfs4.h> +/* thread usage wraps very million seconds (approx one fortnight) */ +#define NFSD_USAGE_WRAP (HZ*1000000) + +#ifdef __KERNEL__ + struct nfsd_stats { unsigned int rchits; /* repcache hits */ unsigned int rcmisses; /* repcache hits */ @@ -35,10 +40,6 @@ struct nfsd_stats { }; -/* thread usage wraps very million seconds (approx one fortnight) */ -#define NFSD_USAGE_WRAP (HZ*1000000) - -#ifdef __KERNEL__ extern struct nfsd_stats nfsdstats; extern struct svc_stat nfsd_svcstats; diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h index 4e439765b70..7a3b565b898 100644 --- a/include/linux/nfsd/syscall.h +++ b/include/linux/nfsd/syscall.h @@ -9,9 +9,8 @@ #ifndef NFSD_SYSCALL_H #define NFSD_SYSCALL_H -#include <asm/types.h> -#ifdef __KERNEL__ # include <linux/types.h> +#ifdef __KERNEL__ # include <linux/in.h> #endif #include <linux/posix_types.h> diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 27bd3e38ec5..2bacf753506 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h @@ -45,17 +45,32 @@ #define XDR_LEN(n) (((n) + 3) & ~3) struct nfsd4_compound_state { - struct svc_fh current_fh; - struct svc_fh save_fh; - struct nfs4_stateowner *replay_owner; -}; + struct svc_fh current_fh; + struct svc_fh save_fh; + struct nfs4_stateowner *replay_owner; + /* For sessions DRC */ + struct nfsd4_session *session; + struct nfsd4_slot *slot; + __be32 *statp; + size_t iovlen; + u32 minorversion; + u32 status; +}; + +static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) +{ + return cs->slot != NULL; +} struct nfsd4_change_info { u32 atomic; + bool change_supported; u32 before_ctime_sec; u32 before_ctime_nsec; + u64 before_change; u32 after_ctime_sec; u32 after_ctime_nsec; + u64 after_change; }; struct nfsd4_access { @@ -90,7 +105,7 @@ struct nfsd4_create { u32 specdata2; } dev; /* NF4BLK, NF4CHR */ } u; - u32 cr_bmval[2]; /* request */ + u32 cr_bmval[3]; /* request */ struct iattr cr_iattr; /* request */ struct nfsd4_change_info cr_cinfo; /* response */ struct nfs4_acl *cr_acl; @@ -105,7 +120,7 @@ struct nfsd4_delegreturn { }; struct nfsd4_getattr { - u32 ga_bmval[2]; /* request */ + u32 ga_bmval[3]; /* request */ struct svc_fh *ga_fhp; /* response */ }; @@ -206,11 +221,9 @@ struct nfsd4_open { stateid_t op_delegate_stateid; /* request - response */ u32 op_create; /* request */ u32 op_createmode; /* request */ - u32 op_bmval[2]; /* request */ - union { /* request */ - struct iattr iattr; /* UNCHECKED4,GUARDED4 */ - nfs4_verifier verf; /* EXCLUSIVE4 */ - } u; + u32 op_bmval[3]; /* request */ + struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ + nfs4_verifier verf; /* EXCLUSIVE4 */ clientid_t op_clientid; /* request */ struct xdr_netobj op_owner; /* request */ u32 op_seqid; /* request */ @@ -224,8 +237,8 @@ struct nfsd4_open { struct nfs4_stateowner *op_stateowner; /* used during processing */ struct nfs4_acl *op_acl; }; -#define op_iattr u.iattr -#define op_verf u.verf +#define op_iattr iattr +#define op_verf verf struct nfsd4_open_confirm { stateid_t oc_req_stateid /* request */; @@ -259,7 +272,7 @@ struct nfsd4_readdir { nfs4_verifier rd_verf; /* request */ u32 rd_dircount; /* request */ u32 rd_maxcount; /* request */ - u32 rd_bmval[2]; /* request */ + u32 rd_bmval[3]; /* request */ struct svc_rqst *rd_rqstp; /* response */ struct svc_fh * rd_fhp; /* response */ @@ -301,7 +314,7 @@ struct nfsd4_secinfo { struct nfsd4_setattr { stateid_t sa_stateid; /* request */ - u32 sa_bmval[2]; /* request */ + u32 sa_bmval[3]; /* request */ struct iattr sa_iattr; /* request */ struct nfs4_acl *sa_acl; }; @@ -327,7 +340,7 @@ struct nfsd4_setclientid_confirm { /* also used for NVERIFY */ struct nfsd4_verify { - u32 ve_bmval[2]; /* request */ + u32 ve_bmval[3]; /* request */ u32 ve_attrlen; /* request */ char * ve_attrval; /* request */ }; @@ -344,6 +357,43 @@ struct nfsd4_write { nfs4_verifier wr_verifier; /* response */ }; +struct nfsd4_exchange_id { + nfs4_verifier verifier; + struct xdr_netobj clname; + u32 flags; + clientid_t clientid; + u32 seqid; + int spa_how; +}; + +struct nfsd4_create_session { + clientid_t clientid; + struct nfs4_sessionid sessionid; + u32 seqid; + u32 flags; + struct nfsd4_channel_attrs fore_channel; + struct nfsd4_channel_attrs back_channel; + u32 callback_prog; + u32 uid; + u32 gid; +}; + +struct nfsd4_sequence { + struct nfs4_sessionid sessionid; /* request/response */ + u32 seqid; /* request/response */ + u32 slotid; /* request/response */ + u32 maxslots; /* request/response */ + u32 cachethis; /* request */ +#if 0 + u32 target_maxslots; /* response */ + u32 status_flags; /* response */ +#endif /* not yet */ +}; + +struct nfsd4_destroy_session { + struct nfs4_sessionid sessionid; +}; + struct nfsd4_op { int opnum; __be32 status; @@ -378,6 +428,12 @@ struct nfsd4_op { struct nfsd4_verify verify; struct nfsd4_write write; struct nfsd4_release_lockowner release_lockowner; + + /* NFSv4.1 */ + struct nfsd4_exchange_id exchange_id; + struct nfsd4_create_session create_session; + struct nfsd4_destroy_session destroy_session; + struct nfsd4_sequence sequence; } u; struct nfs4_replay * replay; }; @@ -416,9 +472,22 @@ struct nfsd4_compoundres { u32 taglen; char * tag; u32 opcnt; - __be32 * tagp; /* where to encode tag and opcount */ + __be32 * tagp; /* tag, opcount encode location */ + struct nfsd4_compound_state cstate; }; +static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) +{ + struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; + return args->opcnt == 1; +} + +static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) +{ + return !resp->cstate.slot->sl_cache_entry.ce_cachethis || + nfsd4_is_solo_sequence(resp); +} + #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) static inline void @@ -426,10 +495,16 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) { BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved); cinfo->atomic = 1; - cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; - cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; - cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; - cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; + cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); + if (cinfo->change_supported) { + cinfo->before_change = fhp->fh_pre_change; + cinfo->after_change = fhp->fh_post_change; + } else { + cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; + cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; + cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; + cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; + } } int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *); @@ -448,7 +523,23 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_setclientid_confirm *setclientid_confirm); -extern __be32 nfsd4_process_open1(struct nfsd4_open *open); +extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); +extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, + struct nfsd4_sequence *seq); +extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, + struct nfsd4_compound_state *, +struct nfsd4_exchange_id *); + extern __be32 nfsd4_create_session(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_create_session *); +extern __be32 nfsd4_sequence(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_sequence *); +extern __be32 nfsd4_destroy_session(struct svc_rqst *, + struct nfsd4_compound_state *, + struct nfsd4_destroy_session *); +extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, + struct nfsd4_open *open); extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open); extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h new file mode 100644 index 00000000000..79fec6af3f9 --- /dev/null +++ b/include/linux/nilfs2_fs.h @@ -0,0 +1,801 @@ +/* + * nilfs2_fs.h - NILFS2 on-disk structures and common declarations. + * + * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * Written by Koji Sato <koji@osrg.net> + * Ryusuke Konishi <ryusuke@osrg.net> + */ +/* + * linux/include/linux/ext2_fs.h + * + * Copyright (C) 1992, 1993, 1994, 1995 + * Remy Card (card@masi.ibp.fr) + * Laboratoire MASI - Institut Blaise Pascal + * Universite Pierre et Marie Curie (Paris VI) + * + * from + * + * linux/include/linux/minix_fs.h + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +#ifndef _LINUX_NILFS_FS_H +#define _LINUX_NILFS_FS_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/* + * Inode flags stored in nilfs_inode and on-memory nilfs inode + * + * We define these flags based on ext2-fs because of the + * compatibility reason; to avoid problems in chattr(1) + */ +#define NILFS_SECRM_FL 0x00000001 /* Secure deletion */ +#define NILFS_UNRM_FL 0x00000002 /* Undelete */ +#define NILFS_SYNC_FL 0x00000008 /* Synchronous updates */ +#define NILFS_IMMUTABLE_FL 0x00000010 /* Immutable file */ +#define NILFS_APPEND_FL 0x00000020 /* writes to file may only append */ +#define NILFS_NODUMP_FL 0x00000040 /* do not dump file */ +#define NILFS_NOATIME_FL 0x00000080 /* do not update atime */ +/* Reserved for compression usage... */ +#define NILFS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ +#define NILFS_DIRSYNC_FL 0x00010000 /* dirsync behaviour */ + +#define NILFS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ +#define NILFS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ + + +#define NILFS_INODE_BMAP_SIZE 7 +/** + * struct nilfs_inode - structure of an inode on disk + * @i_blocks: blocks count + * @i_size: size in bytes + * @i_ctime: creation time (seconds) + * @i_mtime: modification time (seconds) + * @i_ctime_nsec: creation time (nano seconds) + * @i_mtime_nsec: modification time (nano seconds) + * @i_uid: user id + * @i_gid: group id + * @i_mode: file mode + * @i_links_count: links count + * @i_flags: file flags + * @i_bmap: block mapping + * @i_xattr: extended attributes + * @i_generation: file generation (for NFS) + * @i_pad: padding + */ +struct nilfs_inode { + __le64 i_blocks; + __le64 i_size; + __le64 i_ctime; + __le64 i_mtime; + __le32 i_ctime_nsec; + __le32 i_mtime_nsec; + __le32 i_uid; + __le32 i_gid; + __le16 i_mode; + __le16 i_links_count; + __le32 i_flags; + __le64 i_bmap[NILFS_INODE_BMAP_SIZE]; +#define i_device_code i_bmap[0] + __le64 i_xattr; + __le32 i_generation; + __le32 i_pad; +}; + +/** + * struct nilfs_super_root - structure of super root + * @sr_sum: check sum + * @sr_bytes: byte count of the structure + * @sr_flags: flags (reserved) + * @sr_nongc_ctime: write time of the last segment not for cleaner operation + * @sr_dat: DAT file inode + * @sr_cpfile: checkpoint file inode + * @sr_sufile: segment usage file inode + */ +struct nilfs_super_root { + __le32 sr_sum; + __le16 sr_bytes; + __le16 sr_flags; + __le64 sr_nongc_ctime; + struct nilfs_inode sr_dat; + struct nilfs_inode sr_cpfile; + struct nilfs_inode sr_sufile; +}; + +#define NILFS_SR_MDT_OFFSET(inode_size, i) \ + ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \ + (inode_size) * (i)) +#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0) +#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1) +#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2) +#define NILFS_SR_BYTES (sizeof(struct nilfs_super_root)) + +/* + * Maximal mount counts + */ +#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */ + +/* + * File system states (sbp->s_state, nilfs->ns_mount_state) + */ +#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */ +#define NILFS_ERROR_FS 0x0002 /* Errors detected */ +#define NILFS_RESIZE_FS 0x0004 /* Resize required */ + +/* + * Mount flags (sbi->s_mount_opt) + */ +#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */ +#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ +#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ +#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ +#define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */ +#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ +#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order + semantics also for data */ + + +/** + * struct nilfs_super_block - structure of super block on disk + */ +struct nilfs_super_block { + __le32 s_rev_level; /* Revision level */ + __le16 s_minor_rev_level; /* minor revision level */ + __le16 s_magic; /* Magic signature */ + + __le16 s_bytes; /* Bytes count of CRC calculation + for this structure. s_reserved + is excluded. */ + __le16 s_flags; /* flags */ + __le32 s_crc_seed; /* Seed value of CRC calculation */ + __le32 s_sum; /* Check sum of super block */ + + __le32 s_log_block_size; /* Block size represented as follows + blocksize = + 1 << (s_log_block_size + 10) */ + __le64 s_nsegments; /* Number of segments in filesystem */ + __le64 s_dev_size; /* block device size in bytes */ + __le64 s_first_data_block; /* 1st seg disk block number */ + __le32 s_blocks_per_segment; /* number of blocks per full segment */ + __le32 s_r_segments_percentage; /* Reserved segments percentage */ + + __le64 s_last_cno; /* Last checkpoint number */ + __le64 s_last_pseg; /* disk block addr pseg written last */ + __le64 s_last_seq; /* seq. number of seg written last */ + __le64 s_free_blocks_count; /* Free blocks count */ + + __le64 s_ctime; /* Creation time (execution time of + newfs) */ + __le64 s_mtime; /* Mount time */ + __le64 s_wtime; /* Write time */ + __le16 s_mnt_count; /* Mount count */ + __le16 s_max_mnt_count; /* Maximal mount count */ + __le16 s_state; /* File system state */ + __le16 s_errors; /* Behaviour when detecting errors */ + __le64 s_lastcheck; /* time of last check */ + + __le32 s_checkinterval; /* max. time between checks */ + __le32 s_creator_os; /* OS */ + __le16 s_def_resuid; /* Default uid for reserved blocks */ + __le16 s_def_resgid; /* Default gid for reserved blocks */ + __le32 s_first_ino; /* First non-reserved inode */ + + __le16 s_inode_size; /* Size of an inode */ + __le16 s_dat_entry_size; /* Size of a dat entry */ + __le16 s_checkpoint_size; /* Size of a checkpoint */ + __le16 s_segment_usage_size; /* Size of a segment usage */ + + __u8 s_uuid[16]; /* 128-bit uuid for volume */ + char s_volume_name[16]; /* volume name */ + char s_last_mounted[64]; /* directory where last mounted */ + + __le32 s_c_interval; /* Commit interval of segment */ + __le32 s_c_block_max; /* Threshold of data amount for + the segment construction */ + __u32 s_reserved[192]; /* padding to the end of the block */ +}; + +/* + * Codes for operating systems + */ +#define NILFS_OS_LINUX 0 +/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */ + +/* + * Revision levels + */ +#define NILFS_CURRENT_REV 2 /* current major revision */ +#define NILFS_MINOR_REV 0 /* minor revision */ + +/* + * Bytes count of super_block for CRC-calculation + */ +#define NILFS_SB_BYTES \ + ((long)&((struct nilfs_super_block *)0)->s_reserved) + +/* + * Special inode number + */ +#define NILFS_ROOT_INO 2 /* Root file inode */ +#define NILFS_DAT_INO 3 /* DAT file */ +#define NILFS_CPFILE_INO 4 /* checkpoint file */ +#define NILFS_SUFILE_INO 5 /* segment usage file */ +#define NILFS_IFILE_INO 6 /* ifile */ +#define NILFS_ATIME_INO 7 /* Atime file (reserved) */ +#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */ +#define NILFS_SKETCH_INO 10 /* Sketch file */ +#define NILFS_USER_INO 11 /* Fisrt user's file inode number */ + +#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */ +#define NILFS_SUPER_MAGIC 0x3434 /* NILFS filesystem magic number */ + +#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in + a full segment */ +#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in + a partial segment */ +#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved + segments */ + +/* + * bytes offset of secondary super block + */ +#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) + +/* + * Maximal count of links to a file + */ +#define NILFS_LINK_MAX 32000 + +/* + * Structure of a directory entry + * (Same as ext2) + */ + +#define NILFS_NAME_LEN 255 + +/* + * The new version of the directory entry. Since V0 structures are + * stored in intel byte order, and the name_len field could never be + * bigger than 255 chars, it's safe to reclaim the extra byte for the + * file_type field. + */ +struct nilfs_dir_entry { + __le64 inode; /* Inode number */ + __le16 rec_len; /* Directory entry length */ + __u8 name_len; /* Name length */ + __u8 file_type; + char name[NILFS_NAME_LEN]; /* File name */ + char pad; +}; + +/* + * NILFS directory file types. Only the low 3 bits are used. The + * other bits are reserved for now. + */ +enum { + NILFS_FT_UNKNOWN, + NILFS_FT_REG_FILE, + NILFS_FT_DIR, + NILFS_FT_CHRDEV, + NILFS_FT_BLKDEV, + NILFS_FT_FIFO, + NILFS_FT_SOCK, + NILFS_FT_SYMLINK, + NILFS_FT_MAX +}; + +/* + * NILFS_DIR_PAD defines the directory entries boundaries + * + * NOTE: It must be a multiple of 8 + */ +#define NILFS_DIR_PAD 8 +#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) +#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ + ~NILFS_DIR_ROUND) + + +/** + * struct nilfs_finfo - file information + * @fi_ino: inode number + * @fi_cno: checkpoint number + * @fi_nblocks: number of blocks (including intermediate blocks) + * @fi_ndatablk: number of file data blocks + */ +struct nilfs_finfo { + __le64 fi_ino; + __le64 fi_cno; + __le32 fi_nblocks; + __le32 fi_ndatablk; + /* array of virtual block numbers */ +}; + +/** + * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned + * @bi_vblocknr: virtual block number + * @bi_blkoff: block offset + */ +struct nilfs_binfo_v { + __le64 bi_vblocknr; + __le64 bi_blkoff; +}; + +/** + * struct nilfs_binfo_dat - information for the block which belongs to the DAT file + * @bi_blkoff: block offset + * @bi_level: level + * @bi_pad: padding + */ +struct nilfs_binfo_dat { + __le64 bi_blkoff; + __u8 bi_level; + __u8 bi_pad[7]; +}; + +/** + * union nilfs_binfo: block information + * @bi_v: nilfs_binfo_v structure + * @bi_dat: nilfs_binfo_dat structure + */ +union nilfs_binfo { + struct nilfs_binfo_v bi_v; + struct nilfs_binfo_dat bi_dat; +}; + +/** + * struct nilfs_segment_summary - segment summary + * @ss_datasum: checksum of data + * @ss_sumsum: checksum of segment summary + * @ss_magic: magic number + * @ss_bytes: size of this structure in bytes + * @ss_flags: flags + * @ss_seq: sequence number + * @ss_create: creation timestamp + * @ss_next: next segment + * @ss_nblocks: number of blocks + * @ss_nfinfo: number of finfo structures + * @ss_sumbytes: total size of segment summary in bytes + * @ss_pad: padding + */ +struct nilfs_segment_summary { + __le32 ss_datasum; + __le32 ss_sumsum; + __le32 ss_magic; + __le16 ss_bytes; + __le16 ss_flags; + __le64 ss_seq; + __le64 ss_create; + __le64 ss_next; + __le32 ss_nblocks; + __le32 ss_nfinfo; + __le32 ss_sumbytes; + __le32 ss_pad; + /* array of finfo structures */ +}; + +#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */ + +/* + * Segment summary flags + */ +#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */ +#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */ +#define NILFS_SS_SR 0x0004 /* has super root */ +#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */ +#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ + +/** + * struct nilfs_palloc_group_desc - block group descriptor + * @pg_nfrees: number of free entries in block group + */ +struct nilfs_palloc_group_desc { + __le32 pg_nfrees; +}; + +/** + * struct nilfs_dat_entry - disk address translation entry + * @dt_blocknr: block number + * @dt_start: start checkpoint number + * @dt_end: end checkpoint number + * @dt_rsv: reserved for future use + */ +struct nilfs_dat_entry { + __le64 de_blocknr; + __le64 de_start; + __le64 de_end; + __le64 de_rsv; +}; + +/** + * struct nilfs_dat_group_desc - block group descriptor + * @dg_nfrees: number of free virtual block numbers in block group + */ +struct nilfs_dat_group_desc { + __le32 dg_nfrees; +}; + + +/** + * struct nilfs_snapshot_list - snapshot list + * @ssl_next: next checkpoint number on snapshot list + * @ssl_prev: previous checkpoint number on snapshot list + */ +struct nilfs_snapshot_list { + __le64 ssl_next; + __le64 ssl_prev; +}; + +/** + * struct nilfs_checkpoint - checkpoint structure + * @cp_flags: flags + * @cp_checkpoints_count: checkpoints count in a block + * @cp_snapshot_list: snapshot list + * @cp_cno: checkpoint number + * @cp_create: creation timestamp + * @cp_nblk_inc: number of blocks incremented by this checkpoint + * @cp_inodes_count: inodes count + * @cp_blocks_count: blocks count + * @cp_ifile_inode: inode of ifile + */ +struct nilfs_checkpoint { + __le32 cp_flags; + __le32 cp_checkpoints_count; + struct nilfs_snapshot_list cp_snapshot_list; + __le64 cp_cno; + __le64 cp_create; + __le64 cp_nblk_inc; + __le64 cp_inodes_count; + __le64 cp_blocks_count; /* Reserved (might be deleted) */ + + /* Do not change the byte offset of ifile inode. + To keep the compatibility of the disk format, + additional fields should be added behind cp_ifile_inode. */ + struct nilfs_inode cp_ifile_inode; +}; + +/* checkpoint flags */ +enum { + NILFS_CHECKPOINT_SNAPSHOT, + NILFS_CHECKPOINT_INVALID, + NILFS_CHECKPOINT_SKETCH, + NILFS_CHECKPOINT_MINOR, +}; + +#define NILFS_CHECKPOINT_FNS(flag, name) \ +static inline void \ +nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \ +{ \ + cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \ + (1UL << NILFS_CHECKPOINT_##flag)); \ +} \ +static inline void \ +nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \ +{ \ + cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \ + ~(1UL << NILFS_CHECKPOINT_##flag)); \ +} \ +static inline int \ +nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \ +{ \ + return !!(le32_to_cpu(cp->cp_flags) & \ + (1UL << NILFS_CHECKPOINT_##flag)); \ +} + +NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot) +NILFS_CHECKPOINT_FNS(INVALID, invalid) +NILFS_CHECKPOINT_FNS(MINOR, minor) + +/** + * struct nilfs_cpinfo - checkpoint information + * @ci_flags: flags + * @ci_pad: padding + * @ci_cno: checkpoint number + * @ci_create: creation timestamp + * @ci_nblk_inc: number of blocks incremented by this checkpoint + * @ci_inodes_count: inodes count + * @ci_blocks_count: blocks count + * @ci_next: next checkpoint number in snapshot list + */ +struct nilfs_cpinfo { + __u32 ci_flags; + __u32 ci_pad; + __u64 ci_cno; + __u64 ci_create; + __u64 ci_nblk_inc; + __u64 ci_inodes_count; + __u64 ci_blocks_count; + __u64 ci_next; +}; + +#define NILFS_CPINFO_FNS(flag, name) \ +static inline int \ +nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \ +{ \ + return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \ +} + +NILFS_CPINFO_FNS(SNAPSHOT, snapshot) +NILFS_CPINFO_FNS(INVALID, invalid) +NILFS_CPINFO_FNS(MINOR, minor) + + +/** + * struct nilfs_cpfile_header - checkpoint file header + * @ch_ncheckpoints: number of checkpoints + * @ch_nsnapshots: number of snapshots + * @ch_snapshot_list: snapshot list + */ +struct nilfs_cpfile_header { + __le64 ch_ncheckpoints; + __le64 ch_nsnapshots; + struct nilfs_snapshot_list ch_snapshot_list; +}; + +#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \ + ((sizeof(struct nilfs_cpfile_header) + \ + sizeof(struct nilfs_checkpoint) - 1) / \ + sizeof(struct nilfs_checkpoint)) + +/** + * struct nilfs_segment_usage - segment usage + * @su_lastmod: last modified timestamp + * @su_nblocks: number of blocks in segment + * @su_flags: flags + */ +struct nilfs_segment_usage { + __le64 su_lastmod; + __le32 su_nblocks; + __le32 su_flags; +}; + +/* segment usage flag */ +enum { + NILFS_SEGMENT_USAGE_ACTIVE, + NILFS_SEGMENT_USAGE_DIRTY, + NILFS_SEGMENT_USAGE_ERROR, + + /* ... */ +}; + +#define NILFS_SEGMENT_USAGE_FNS(flag, name) \ +static inline void \ +nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \ +{ \ + su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \ + (1UL << NILFS_SEGMENT_USAGE_##flag));\ +} \ +static inline void \ +nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \ +{ \ + su->su_flags = \ + cpu_to_le32(le32_to_cpu(su->su_flags) & \ + ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \ +} \ +static inline int \ +nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \ +{ \ + return !!(le32_to_cpu(su->su_flags) & \ + (1UL << NILFS_SEGMENT_USAGE_##flag)); \ +} + +NILFS_SEGMENT_USAGE_FNS(ACTIVE, active) +NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty) +NILFS_SEGMENT_USAGE_FNS(ERROR, error) + +static inline void +nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su) +{ + su->su_lastmod = cpu_to_le64(0); + su->su_nblocks = cpu_to_le32(0); + su->su_flags = cpu_to_le32(0); +} + +static inline int +nilfs_segment_usage_clean(const struct nilfs_segment_usage *su) +{ + return !le32_to_cpu(su->su_flags); +} + +/** + * struct nilfs_sufile_header - segment usage file header + * @sh_ncleansegs: number of clean segments + * @sh_ndirtysegs: number of dirty segments + * @sh_last_alloc: last allocated segment number + */ +struct nilfs_sufile_header { + __le64 sh_ncleansegs; + __le64 sh_ndirtysegs; + __le64 sh_last_alloc; + /* ... */ +}; + +#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \ + ((sizeof(struct nilfs_sufile_header) + \ + sizeof(struct nilfs_segment_usage) - 1) / \ + sizeof(struct nilfs_segment_usage)) + +/** + * nilfs_suinfo - segment usage information + * @sui_lastmod: + * @sui_nblocks: + * @sui_flags: + */ +struct nilfs_suinfo { + __u64 sui_lastmod; + __u32 sui_nblocks; + __u32 sui_flags; +}; + +#define NILFS_SUINFO_FNS(flag, name) \ +static inline int \ +nilfs_suinfo_##name(const struct nilfs_suinfo *si) \ +{ \ + return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \ +} + +NILFS_SUINFO_FNS(ACTIVE, active) +NILFS_SUINFO_FNS(DIRTY, dirty) +NILFS_SUINFO_FNS(ERROR, error) + +static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) +{ + return !si->sui_flags; +} + +/* ioctl */ +enum { + NILFS_CHECKPOINT, + NILFS_SNAPSHOT, +}; + +/** + * struct nilfs_cpmode - + * @cc_cno: + * @cc_mode: + */ +struct nilfs_cpmode { + __u64 cm_cno; + __u32 cm_mode; + __u32 cm_pad; +}; + +/** + * struct nilfs_argv - argument vector + * @v_base: + * @v_nmembs: + * @v_size: + * @v_flags: + * @v_index: + */ +struct nilfs_argv { + __u64 v_base; + __u32 v_nmembs; /* number of members */ + __u16 v_size; /* size of members */ + __u16 v_flags; + __u64 v_index; +}; + +/** + * struct nilfs_period - + * @p_start: + * @p_end: + */ +struct nilfs_period { + __u64 p_start; + __u64 p_end; +}; + +/** + * struct nilfs_cpstat - + * @cs_cno: checkpoint number + * @cs_ncps: number of checkpoints + * @cs_nsss: number of snapshots + */ +struct nilfs_cpstat { + __u64 cs_cno; + __u64 cs_ncps; + __u64 cs_nsss; +}; + +/** + * struct nilfs_sustat - + * @ss_nsegs: number of segments + * @ss_ncleansegs: number of clean segments + * @ss_ndirtysegs: number of dirty segments + * @ss_ctime: creation time of the last segment + * @ss_nongc_ctime: creation time of the last segment not for GC + * @ss_prot_seq: least sequence number of segments which must not be reclaimed + */ +struct nilfs_sustat { + __u64 ss_nsegs; + __u64 ss_ncleansegs; + __u64 ss_ndirtysegs; + __u64 ss_ctime; + __u64 ss_nongc_ctime; + __u64 ss_prot_seq; +}; + +/** + * struct nilfs_vinfo - virtual block number information + * @vi_vblocknr: + * @vi_start: + * @vi_end: + * @vi_blocknr: + */ +struct nilfs_vinfo { + __u64 vi_vblocknr; + __u64 vi_start; + __u64 vi_end; + __u64 vi_blocknr; +}; + +/** + * struct nilfs_vdesc - + */ +struct nilfs_vdesc { + __u64 vd_ino; + __u64 vd_cno; + __u64 vd_vblocknr; + struct nilfs_period vd_period; + __u64 vd_blocknr; + __u64 vd_offset; + __u32 vd_flags; + __u32 vd_pad; +}; + +/** + * struct nilfs_bdesc - + */ +struct nilfs_bdesc { + __u64 bd_ino; + __u64 bd_oblocknr; + __u64 bd_blocknr; + __u64 bd_offset; + __u32 bd_level; + __u32 bd_pad; +}; + +#define NILFS_IOCTL_IDENT 'n' + +#define NILFS_IOCTL_CHANGE_CPMODE \ + _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode) +#define NILFS_IOCTL_DELETE_CHECKPOINT \ + _IOW(NILFS_IOCTL_IDENT, 0x81, __u64) +#define NILFS_IOCTL_GET_CPINFO \ + _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv) +#define NILFS_IOCTL_GET_CPSTAT \ + _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat) +#define NILFS_IOCTL_GET_SUINFO \ + _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv) +#define NILFS_IOCTL_GET_SUSTAT \ + _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat) +#define NILFS_IOCTL_GET_VINFO \ + _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv) +#define NILFS_IOCTL_GET_BDESCS \ + _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv) +#define NILFS_IOCTL_CLEAN_SEGMENTS \ + _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5]) +#define NILFS_IOCTL_SYNC \ + _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) +#define NILFS_IOCTL_RESIZE \ + _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) + +#endif /* _LINUX_NILFS_FS_H */ diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index e86ed59f9ad..a8d71ed43a0 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h @@ -7,7 +7,7 @@ * Copyright 2008 Michael Wu <flamingice@sourmilk.net> * Copyright 2008 Luis Carlos Cobo <luisca@cozybit.com> * Copyright 2008 Michael Buesch <mb@bu3sch.de> - * Copyright 2008 Luis R. Rodriguez <lrodriguez@atheros.com> + * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com> * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> * Copyright 2008 Colin McCabe <colin@cozybit.com> * @@ -25,6 +25,8 @@ * */ +#include <linux/types.h> + /** * DOC: Station handling * @@ -46,8 +48,10 @@ * to get a list of all present wiphys. * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME, - * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or - * %NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET. + * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, + * %NL80211_ATTR_WIPHY_CHANNEL_TYPE, %NL80211_ATTR_WIPHY_RETRY_SHORT, + * %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + * and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD. * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request * or rename notification. Has attributes %NL80211_ATTR_WIPHY and * %NL80211_ATTR_WIPHY_NAME. @@ -72,11 +76,11 @@ * * @NL80211_CMD_GET_KEY: Get sequence counter information for a key specified * by %NL80211_ATTR_KEY_IDX and/or %NL80211_ATTR_MAC. - * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT or - * %NL80211_ATTR_KEY_THRESHOLD. + * @NL80211_CMD_SET_KEY: Set key attributes %NL80211_ATTR_KEY_DEFAULT, + * %NL80211_ATTR_KEY_DEFAULT_MGMT, or %NL80211_ATTR_KEY_THRESHOLD. * @NL80211_CMD_NEW_KEY: add a key with given %NL80211_ATTR_KEY_DATA, - * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC and %NL80211_ATTR_KEY_CIPHER - * attributes. + * %NL80211_ATTR_KEY_IDX, %NL80211_ATTR_MAC, %NL80211_ATTR_KEY_CIPHER, + * and %NL80211_ATTR_KEY_SEQ attributes. * @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_IDX * or %NL80211_ATTR_MAC. * @@ -84,7 +88,7 @@ * %NL80222_CMD_NEW_BEACON message) * @NL80211_CMD_SET_BEACON: set the beacon on an access point interface * using the %NL80211_ATTR_BEACON_INTERVAL, %NL80211_ATTR_DTIM_PERIOD, - * %NL80211_BEACON_HEAD and %NL80211_BEACON_TAIL attributes. + * %NL80211_ATTR_BEACON_HEAD and %NL80211_ATTR_BEACON_TAIL attributes. * @NL80211_CMD_NEW_BEACON: add a new beacon to an access point interface, * parameters are like for %NL80211_CMD_SET_BEACON. * @NL80211_CMD_DEL_BEACON: remove the beacon, stop sending it @@ -113,6 +117,8 @@ * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by * %NL80211_ATTR_IFINDEX. * + * @NL80211_CMD_GET_REG: ask the wireless core to send us its currently set + * regulatory domain. * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command * after being queried by the kernel. CRDA replies by sending a regulatory * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our @@ -133,6 +139,132 @@ * @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the * interface identified by %NL80211_ATTR_IFINDEX * + * @NL80211_CMD_SET_MGMT_EXTRA_IE: Set extra IEs for management frames. The + * interface is identified with %NL80211_ATTR_IFINDEX and the management + * frame subtype with %NL80211_ATTR_MGMT_SUBTYPE. The extra IE data to be + * added to the end of the specified management frame is specified with + * %NL80211_ATTR_IE. If the command succeeds, the requested data will be + * added to all specified management frames generated by + * kernel/firmware/driver. + * Note: This command has been removed and it is only reserved at this + * point to avoid re-using existing command number. The functionality this + * command was planned for has been provided with cleaner design with the + * option to specify additional IEs in NL80211_CMD_TRIGGER_SCAN, + * NL80211_CMD_AUTHENTICATE, NL80211_CMD_ASSOCIATE, + * NL80211_CMD_DEAUTHENTICATE, and NL80211_CMD_DISASSOCIATE. + * + * @NL80211_CMD_GET_SCAN: get scan results + * @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters + * @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to + * NL80211_CMD_GET_SCAN and on the "scan" multicast group) + * @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons, + * partial scan results may be available + * + * @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain + * has been changed and provides details of the request information + * that caused the change such as who initiated the regulatory request + * (%NL80211_ATTR_REG_INITIATOR), the wiphy_idx + * (%NL80211_ATTR_REG_ALPHA2) on which the request was made from if + * the initiator was %NL80211_REGDOM_SET_BY_COUNTRY_IE or + * %NL80211_REGDOM_SET_BY_DRIVER, the type of regulatory domain + * set (%NL80211_ATTR_REG_TYPE), if the type of regulatory domain is + * %NL80211_REG_TYPE_COUNTRY the alpha2 to which we have moved on + * to (%NL80211_ATTR_REG_ALPHA2). + * @NL80211_CMD_REG_BEACON_HINT: indicates to userspace that an AP beacon + * has been found while world roaming thus enabling active scan or + * any mode of operation that initiates TX (beacons) on a channel + * where we would not have been able to do either before. As an example + * if you are world roaming (regulatory domain set to world or if your + * driver is using a custom world roaming regulatory domain) and while + * doing a passive scan on the 5 GHz band you find an AP there (if not + * on a DFS channel) you will now be able to actively scan for that AP + * or use AP mode on your card on that same channel. Note that this will + * never be used for channels 1-11 on the 2 GHz band as they are always + * enabled world wide. This beacon hint is only sent if your device had + * either disabled active scanning or beaconing on a channel. We send to + * userspace the wiphy on which we removed a restriction from + * (%NL80211_ATTR_WIPHY) and the channel on which this occurred + * before (%NL80211_ATTR_FREQ_BEFORE) and after (%NL80211_ATTR_FREQ_AFTER) + * the beacon hint was processed. + * + * @NL80211_CMD_AUTHENTICATE: authentication request and notification. + * This command is used both as a command (request to authenticate) and + * as an event on the "mlme" multicast group indicating completion of the + * authentication process. + * When used as a command, %NL80211_ATTR_IFINDEX is used to identify the + * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and + * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify + * the SSID (mainly for association, but is included in authentication + * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used + * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE + * is used to specify the authentication type. %NL80211_ATTR_IE is used to + * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs) + * to be added to the frame. + * When used as an event, this reports reception of an Authentication + * frame in station and IBSS modes when the local MLME processed the + * frame, i.e., it was for the local STA and was received in correct + * state. This is similar to MLME-AUTHENTICATE.confirm primitive in the + * MLME SAP interface (kernel providing MLME, userspace SME). The + * included %NL80211_ATTR_FRAME attribute contains the management frame + * (including both the header and frame body, but not FCS). This event is + * also used to indicate if the authentication attempt timed out. In that + * case the %NL80211_ATTR_FRAME attribute is replaced with a + * %NL80211_ATTR_TIMED_OUT flag (and %NL80211_ATTR_MAC to indicate which + * pending authentication timed out). + * @NL80211_CMD_ASSOCIATE: association request and notification; like + * NL80211_CMD_AUTHENTICATE but for Association and Reassociation + * (similar to MLME-ASSOCIATE.request, MLME-REASSOCIATE.request, + * MLME-ASSOCIATE.confirm or MLME-REASSOCIATE.confirm primitives). + * @NL80211_CMD_DEAUTHENTICATE: deauthentication request and notification; like + * NL80211_CMD_AUTHENTICATE but for Deauthentication frames (similar to + * MLME-DEAUTHENTICATION.request and MLME-DEAUTHENTICATE.indication + * primitives). + * @NL80211_CMD_DISASSOCIATE: disassociation request and notification; like + * NL80211_CMD_AUTHENTICATE but for Disassociation frames (similar to + * MLME-DISASSOCIATE.request and MLME-DISASSOCIATE.indication primitives). + * + * @NL80211_CMD_MICHAEL_MIC_FAILURE: notification of a locally detected Michael + * MIC (part of TKIP) failure; sent on the "mlme" multicast group; the + * event includes %NL80211_ATTR_MAC to describe the source MAC address of + * the frame with invalid MIC, %NL80211_ATTR_KEY_TYPE to show the key + * type, %NL80211_ATTR_KEY_IDX to indicate the key identifier, and + * %NL80211_ATTR_KEY_SEQ to indicate the TSC value of the frame; this + * event matches with MLME-MICHAELMICFAILURE.indication() primitive + * + * @NL80211_CMD_JOIN_IBSS: Join a new IBSS -- given at least an SSID and a + * FREQ attribute (for the initial frequency if no peer can be found) + * and optionally a MAC (as BSSID) and FREQ_FIXED attribute if those + * should be fixed rather than automatically determined. Can only be + * executed on a network interface that is UP, and fixed BSSID/FREQ + * may be rejected. Another optional parameter is the beacon interval, + * given in the %NL80211_ATTR_BEACON_INTERVAL attribute, which if not + * given defaults to 100 TU (102.4ms). + * @NL80211_CMD_LEAVE_IBSS: Leave the IBSS -- no special arguments, the IBSS is + * determined by the network interface. + * + * @NL80211_CMD_TESTMODE: testmode command, takes a wiphy (or ifindex) attribute + * to identify the device, and the TESTDATA blob attribute to pass through + * to the driver. + * + * @NL80211_CMD_CONNECT: connection request and notification; this command + * requests to connect to a specified network but without separating + * auth and assoc steps. For this, you need to specify the SSID in a + * %NL80211_ATTR_SSID attribute, and can optionally specify the association + * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, + * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_CONTROL_PORT. + * It is also sent as an event, with the BSSID and response IEs when the + * connection is established or failed to be established. This can be + * determined by the STATUS_CODE attribute. + * @NL80211_CMD_ROAM: request that the card roam (currently not implemented), + * sent as an event when the card/driver roamed by itself. + * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify + * userspace that a connection was dropped by the AP or due to other + * reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and + * %NL80211_ATTR_REASON_CODE attributes are used. + * + * @NL80211_CMD_SET_WIPHY_NETNS: Set a wiphy's netns. Note that all devices + * associated with this wiphy must be down and will follow. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -178,6 +310,37 @@ enum nl80211_commands { NL80211_CMD_GET_MESH_PARAMS, NL80211_CMD_SET_MESH_PARAMS, + NL80211_CMD_SET_MGMT_EXTRA_IE /* reserved; not used */, + + NL80211_CMD_GET_REG, + + NL80211_CMD_GET_SCAN, + NL80211_CMD_TRIGGER_SCAN, + NL80211_CMD_NEW_SCAN_RESULTS, + NL80211_CMD_SCAN_ABORTED, + + NL80211_CMD_REG_CHANGE, + + NL80211_CMD_AUTHENTICATE, + NL80211_CMD_ASSOCIATE, + NL80211_CMD_DEAUTHENTICATE, + NL80211_CMD_DISASSOCIATE, + + NL80211_CMD_MICHAEL_MIC_FAILURE, + + NL80211_CMD_REG_BEACON_HINT, + + NL80211_CMD_JOIN_IBSS, + NL80211_CMD_LEAVE_IBSS, + + NL80211_CMD_TESTMODE, + + NL80211_CMD_CONNECT, + NL80211_CMD_ROAM, + NL80211_CMD_DISCONNECT, + + NL80211_CMD_SET_WIPHY_NETNS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -190,6 +353,13 @@ enum nl80211_commands { * here */ #define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS +#define NL80211_CMD_SET_MGMT_EXTRA_IE NL80211_CMD_SET_MGMT_EXTRA_IE +#define NL80211_CMD_REG_CHANGE NL80211_CMD_REG_CHANGE +#define NL80211_CMD_AUTHENTICATE NL80211_CMD_AUTHENTICATE +#define NL80211_CMD_ASSOCIATE NL80211_CMD_ASSOCIATE +#define NL80211_CMD_DEAUTHENTICATE NL80211_CMD_DEAUTHENTICATE +#define NL80211_CMD_DISASSOCIATE NL80211_CMD_DISASSOCIATE +#define NL80211_CMD_REG_BEACON_HINT NL80211_CMD_REG_BEACON_HINT /** * enum nl80211_attrs - nl80211 netlink attributes @@ -208,6 +378,18 @@ enum nl80211_commands { * NL80211_CHAN_HT20 = HT20 only * NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel * NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel + * @NL80211_ATTR_WIPHY_RETRY_SHORT: TX retry limit for frames whose length is + * less than or equal to the RTS threshold; allowed range: 1..255; + * dot11ShortRetryLimit; u8 + * @NL80211_ATTR_WIPHY_RETRY_LONG: TX retry limit for frames whose length is + * greater than the RTS threshold; allowed range: 1..255; + * dot11ShortLongLimit; u8 + * @NL80211_ATTR_WIPHY_FRAG_THRESHOLD: fragmentation threshold, i.e., maximum + * length in octets for frames; allowed range: 256..8000, disable + * fragmentation with (u32)-1; dot11FragmentationThreshold; u32 + * @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length + * larger than or equal to this use RTS/CTS handshake); allowed range: + * 0..65536, disable with (u32)-1; dot11RTSThreshold; u32 * * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on * @NL80211_ATTR_IFNAME: network interface name @@ -231,7 +413,7 @@ enum nl80211_commands { * * @NL80211_ATTR_STA_AID: Association ID for the station (u16) * @NL80211_ATTR_STA_FLAGS: flags, nested element with NLA_FLAG attributes of - * &enum nl80211_sta_flags. + * &enum nl80211_sta_flags (deprecated, use %NL80211_ATTR_STA_FLAGS2) * @NL80211_ATTR_STA_LISTEN_INTERVAL: listen interval as defined by * IEEE 802.11 7.3.1.6 (u16). * @NL80211_ATTR_STA_SUPPORTED_RATES: supported rates, array of supported @@ -284,6 +466,124 @@ enum nl80211_commands { * supported interface types, each a flag attribute with the number * of the interface mode. * + * @NL80211_ATTR_MGMT_SUBTYPE: Management frame subtype for + * %NL80211_CMD_SET_MGMT_EXTRA_IE. + * + * @NL80211_ATTR_IE: Information element(s) data (used, e.g., with + * %NL80211_CMD_SET_MGMT_EXTRA_IE). + * + * @NL80211_ATTR_MAX_NUM_SCAN_SSIDS: number of SSIDs you can scan with + * a single scan request, a wiphy attribute. + * @NL80211_ATTR_MAX_SCAN_IE_LEN: maximum length of information elements + * that can be added to a scan request + * + * @NL80211_ATTR_SCAN_FREQUENCIES: nested attribute with frequencies (in MHz) + * @NL80211_ATTR_SCAN_SSIDS: nested attribute with SSIDs, leave out for passive + * scanning and include a zero-length SSID (wildcard) for wildcard scan + * @NL80211_ATTR_BSS: scan result BSS + * + * @NL80211_ATTR_REG_INITIATOR: indicates who requested the regulatory domain + * currently in effect. This could be any of the %NL80211_REGDOM_SET_BY_* + * @NL80211_ATTR_REG_TYPE: indicates the type of the regulatory domain currently + * set. This can be one of the nl80211_reg_type (%NL80211_REGDOM_TYPE_*) + * + * @NL80211_ATTR_SUPPORTED_COMMANDS: wiphy attribute that specifies + * an array of command numbers (i.e. a mapping index to command number) + * that the driver for the given wiphy supports. + * + * @NL80211_ATTR_FRAME: frame data (binary attribute), including frame header + * and body, but not FCS; used, e.g., with NL80211_CMD_AUTHENTICATE and + * NL80211_CMD_ASSOCIATE events + * @NL80211_ATTR_SSID: SSID (binary attribute, 0..32 octets) + * @NL80211_ATTR_AUTH_TYPE: AuthenticationType, see &enum nl80211_auth_type, + * represented as a u32 + * @NL80211_ATTR_REASON_CODE: ReasonCode for %NL80211_CMD_DEAUTHENTICATE and + * %NL80211_CMD_DISASSOCIATE, u16 + * + * @NL80211_ATTR_KEY_TYPE: Key Type, see &enum nl80211_key_type, represented as + * a u32 + * + * @NL80211_ATTR_FREQ_BEFORE: A channel which has suffered a regulatory change + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _before_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* + * @NL80211_ATTR_FREQ_AFTER: A channel which has suffered a regulatory change + * due to considerations from a beacon hint. This attribute reflects + * the state of the channel _after_ the beacon hint processing. This + * attributes consists of a nested attribute containing + * NL80211_FREQUENCY_ATTR_* + * + * @NL80211_ATTR_CIPHER_SUITES: a set of u32 values indicating the supported + * cipher suites + * + * @NL80211_ATTR_FREQ_FIXED: a flag indicating the IBSS should not try to look + * for other networks on different channels + * + * @NL80211_ATTR_TIMED_OUT: a flag indicating than an operation timed out; this + * is used, e.g., with %NL80211_CMD_AUTHENTICATE event + * + * @NL80211_ATTR_USE_MFP: Whether management frame protection (IEEE 802.11w) is + * used for the association (&enum nl80211_mfp, represented as a u32); + * this attribute can be used + * with %NL80211_CMD_ASSOCIATE request + * + * @NL80211_ATTR_STA_FLAGS2: Attribute containing a + * &struct nl80211_sta_flag_update. + * + * @NL80211_ATTR_CONTROL_PORT: A flag indicating whether user space controls + * IEEE 802.1X port, i.e., sets/clears %NL80211_STA_FLAG_AUTHORIZED, in + * station mode. If the flag is included in %NL80211_CMD_ASSOCIATE + * request, the driver will assume that the port is unauthorized until + * authorized by user space. Otherwise, port is marked authorized by + * default in station mode. + * + * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. + * We recommend using nested, driver-specific attributes within this. + * + * @NL80211_ATTR_DISCONNECTED_BY_AP: A flag indicating that the DISCONNECT + * event was due to the AP disconnecting the station, and not due to + * a local disconnect request. + * @NL80211_ATTR_STATUS_CODE: StatusCode for the %NL80211_CMD_CONNECT + * event (u16) + * @NL80211_ATTR_PRIVACY: Flag attribute, used with connect(), indicating + * that protected APs should be used. + * + * @NL80211_ATTR_CIPHERS_PAIRWISE: Used with CONNECT and ASSOCIATE to + * indicate which unicast key ciphers will be used with the connection + * (an array of u32). + * @NL80211_ATTR_CIPHER_GROUP: Used with CONNECT and ASSOCIATE to indicate + * which group key cipher will be used with the connection (a u32). + * @NL80211_ATTR_WPA_VERSIONS: Used with CONNECT and ASSOCIATE to indicate + * which WPA version(s) the AP we want to associate with is using + * (a u32 with flags from &enum nl80211_wpa_versions). + * @NL80211_ATTR_AKM_SUITES: Used with CONNECT and ASSOCIATE to indicate + * which key management algorithm(s) to use (an array of u32). + * + * @NL80211_ATTR_REQ_IE: (Re)association request information elements as + * sent out by the card, for ROAM and successful CONNECT events. + * @NL80211_ATTR_RESP_IE: (Re)association response information elements as + * sent by peer, for ROAM and successful CONNECT events. + * + * @NL80211_ATTR_PREV_BSSID: previous BSSID, to be used by in ASSOCIATE + * commands to specify using a reassociate frame + * + * @NL80211_ATTR_KEY: key information in a nested attribute with + * %NL80211_KEY_* sub-attributes + * @NL80211_ATTR_KEYS: array of keys for static WEP keys for connect() + * and join_ibss(), key information is in a nested attribute each + * with %NL80211_KEY_* sub-attributes + * + * @NL80211_ATTR_PID: Process ID of a network namespace. + * + * @NL80211_ATTR_GENERATION: Used to indicate consistent snapshots for + * dumps. This number increases whenever the object list being + * dumped changes, and as such userspace can verify that it has + * obtained a complete and consistent snapshot by verifying that + * all dump messages contain the same generation number. If it + * changed then the list changed and the dump should be repeated + * completely from scratch. + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@ -346,21 +646,107 @@ enum nl80211_attrs { NL80211_ATTR_WIPHY_FREQ, NL80211_ATTR_WIPHY_CHANNEL_TYPE, + NL80211_ATTR_KEY_DEFAULT_MGMT, + + NL80211_ATTR_MGMT_SUBTYPE, + NL80211_ATTR_IE, + + NL80211_ATTR_MAX_NUM_SCAN_SSIDS, + + NL80211_ATTR_SCAN_FREQUENCIES, + NL80211_ATTR_SCAN_SSIDS, + NL80211_ATTR_GENERATION, /* replaces old SCAN_GENERATION */ + NL80211_ATTR_BSS, + + NL80211_ATTR_REG_INITIATOR, + NL80211_ATTR_REG_TYPE, + + NL80211_ATTR_SUPPORTED_COMMANDS, + + NL80211_ATTR_FRAME, + NL80211_ATTR_SSID, + NL80211_ATTR_AUTH_TYPE, + NL80211_ATTR_REASON_CODE, + + NL80211_ATTR_KEY_TYPE, + + NL80211_ATTR_MAX_SCAN_IE_LEN, + NL80211_ATTR_CIPHER_SUITES, + + NL80211_ATTR_FREQ_BEFORE, + NL80211_ATTR_FREQ_AFTER, + + NL80211_ATTR_FREQ_FIXED, + + + NL80211_ATTR_WIPHY_RETRY_SHORT, + NL80211_ATTR_WIPHY_RETRY_LONG, + NL80211_ATTR_WIPHY_FRAG_THRESHOLD, + NL80211_ATTR_WIPHY_RTS_THRESHOLD, + + NL80211_ATTR_TIMED_OUT, + + NL80211_ATTR_USE_MFP, + + NL80211_ATTR_STA_FLAGS2, + + NL80211_ATTR_CONTROL_PORT, + + NL80211_ATTR_TESTDATA, + + NL80211_ATTR_PRIVACY, + + NL80211_ATTR_DISCONNECTED_BY_AP, + NL80211_ATTR_STATUS_CODE, + + NL80211_ATTR_CIPHER_SUITES_PAIRWISE, + NL80211_ATTR_CIPHER_SUITE_GROUP, + NL80211_ATTR_WPA_VERSIONS, + NL80211_ATTR_AKM_SUITES, + + NL80211_ATTR_REQ_IE, + NL80211_ATTR_RESP_IE, + + NL80211_ATTR_PREV_BSSID, + + NL80211_ATTR_KEY, + NL80211_ATTR_KEYS, + + NL80211_ATTR_PID, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 }; +/* source-level API compatibility */ +#define NL80211_ATTR_SCAN_GENERATION NL80211_ATTR_GENERATION + /* * Allow user space programs to use #ifdef on new attributes by defining them * here */ +#define NL80211_CMD_CONNECT NL80211_CMD_CONNECT #define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY #define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES #define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS #define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ -#define NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET +#define NL80211_ATTR_WIPHY_CHANNEL_TYPE NL80211_ATTR_WIPHY_CHANNEL_TYPE +#define NL80211_ATTR_MGMT_SUBTYPE NL80211_ATTR_MGMT_SUBTYPE +#define NL80211_ATTR_IE NL80211_ATTR_IE +#define NL80211_ATTR_REG_INITIATOR NL80211_ATTR_REG_INITIATOR +#define NL80211_ATTR_REG_TYPE NL80211_ATTR_REG_TYPE +#define NL80211_ATTR_FRAME NL80211_ATTR_FRAME +#define NL80211_ATTR_SSID NL80211_ATTR_SSID +#define NL80211_ATTR_AUTH_TYPE NL80211_ATTR_AUTH_TYPE +#define NL80211_ATTR_REASON_CODE NL80211_ATTR_REASON_CODE +#define NL80211_ATTR_CIPHER_SUITES_PAIRWISE NL80211_ATTR_CIPHER_SUITES_PAIRWISE +#define NL80211_ATTR_CIPHER_SUITE_GROUP NL80211_ATTR_CIPHER_SUITE_GROUP +#define NL80211_ATTR_WPA_VERSIONS NL80211_ATTR_WPA_VERSIONS +#define NL80211_ATTR_AKM_SUITES NL80211_ATTR_AKM_SUITES +#define NL80211_ATTR_KEY NL80211_ATTR_KEY +#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS #define NL80211_MAX_SUPP_RATES 32 #define NL80211_MAX_SUPP_REG_RULES 32 @@ -369,6 +755,9 @@ enum nl80211_attrs { #define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 #define NL80211_HT_CAPABILITY_LEN 26 +#define NL80211_MAX_NR_CIPHER_SUITES 5 +#define NL80211_MAX_NR_AKM_SUITES 2 + /** * enum nl80211_iftype - (virtual) interface types * @@ -412,12 +801,14 @@ enum nl80211_iftype { * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames * with short barker preamble * @NL80211_STA_FLAG_WME: station is WME/QoS capable + * @NL80211_STA_FLAG_MFP: station uses management frame protection */ enum nl80211_sta_flags { __NL80211_STA_FLAG_INVALID, NL80211_STA_FLAG_AUTHORIZED, NL80211_STA_FLAG_SHORT_PREAMBLE, NL80211_STA_FLAG_WME, + NL80211_STA_FLAG_MFP, /* keep last */ __NL80211_STA_FLAG_AFTER_LAST, @@ -425,6 +816,18 @@ enum nl80211_sta_flags { }; /** + * struct nl80211_sta_flag_update - station flags mask/set + * @mask: mask of station flags to set + * @set: which values to set them to + * + * Both mask and set contain bits as per &enum nl80211_sta_flags. + */ +struct nl80211_sta_flag_update { + __u32 mask; + __u32 set; +} __attribute__((packed)); + +/** * enum nl80211_rate_info - bitrate information * * These attribute types are used with %NL80211_STA_INFO_TXRATE @@ -465,6 +868,9 @@ enum nl80211_rate_info { * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm) * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute * containing info as possible, see &enum nl80211_sta_info_txrate. + * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) + * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this + * station) */ enum nl80211_sta_info { __NL80211_STA_INFO_INVALID, @@ -476,6 +882,8 @@ enum nl80211_sta_info { NL80211_STA_INFO_PLINK_STATE, NL80211_STA_INFO_SIGNAL, NL80211_STA_INFO_TX_BITRATE, + NL80211_STA_INFO_RX_PACKETS, + NL80211_STA_INFO_TX_PACKETS, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, @@ -607,6 +1015,48 @@ enum nl80211_bitrate_attr { }; /** + * enum nl80211_initiator - Indicates the initiator of a reg domain request + * @NL80211_REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world + * regulatory domain. + * @NL80211_REGDOM_SET_BY_USER: User asked the wireless core to set the + * regulatory domain. + * @NL80211_REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the + * wireless core it thinks its knows the regulatory domain we should be in. + * @NL80211_REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an + * 802.11 country information element with regulatory information it + * thinks we should consider. + */ +enum nl80211_reg_initiator { + NL80211_REGDOM_SET_BY_CORE, + NL80211_REGDOM_SET_BY_USER, + NL80211_REGDOM_SET_BY_DRIVER, + NL80211_REGDOM_SET_BY_COUNTRY_IE, +}; + +/** + * enum nl80211_reg_type - specifies the type of regulatory domain + * @NL80211_REGDOM_TYPE_COUNTRY: the regulatory domain set is one that pertains + * to a specific country. When this is set you can count on the + * ISO / IEC 3166 alpha2 country code being valid. + * @NL80211_REGDOM_TYPE_WORLD: the regulatory set domain is the world regulatory + * domain. + * @NL80211_REGDOM_TYPE_CUSTOM_WORLD: the regulatory domain set is a custom + * driver specific world regulatory domain. These do not apply system-wide + * and are only applicable to the individual devices which have requested + * them to be applied. + * @NL80211_REGDOM_TYPE_INTERSECTION: the regulatory domain set is the product + * of an intersection between two regulatory domains -- the previously + * set regulatory domain on the system and the last accepted regulatory + * domain request to be processed. + */ +enum nl80211_reg_type { + NL80211_REGDOM_TYPE_COUNTRY, + NL80211_REGDOM_TYPE_WORLD, + NL80211_REGDOM_TYPE_CUSTOM_WORLD, + NL80211_REGDOM_TYPE_INTERSECTION, +}; + +/** * enum nl80211_reg_rule_attr - regulatory rule attributes * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional * considerations for a given frequency range. These are the @@ -811,4 +1261,131 @@ enum nl80211_channel_type { NL80211_CHAN_HT40MINUS, NL80211_CHAN_HT40PLUS }; + +/** + * enum nl80211_bss - netlink attributes for a BSS + * + * @__NL80211_BSS_INVALID: invalid + * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) + * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) + * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) + * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) + * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the + * raw information elements from the probe response/beacon (bin) + * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon + * in mBm (100 * dBm) (s32) + * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon + * in unspecified units, scaled to 0..100 (u8) + * @NL80211_BSS_STATUS: status, if this BSS is "used" + * @__NL80211_BSS_AFTER_LAST: internal + * @NL80211_BSS_MAX: highest BSS attribute + */ +enum nl80211_bss { + __NL80211_BSS_INVALID, + NL80211_BSS_BSSID, + NL80211_BSS_FREQUENCY, + NL80211_BSS_TSF, + NL80211_BSS_BEACON_INTERVAL, + NL80211_BSS_CAPABILITY, + NL80211_BSS_INFORMATION_ELEMENTS, + NL80211_BSS_SIGNAL_MBM, + NL80211_BSS_SIGNAL_UNSPEC, + NL80211_BSS_STATUS, + + /* keep last */ + __NL80211_BSS_AFTER_LAST, + NL80211_BSS_MAX = __NL80211_BSS_AFTER_LAST - 1 +}; + +/** + * enum nl80211_bss_status - BSS "status" + */ +enum nl80211_bss_status { + NL80211_BSS_STATUS_AUTHENTICATED, + NL80211_BSS_STATUS_ASSOCIATED, + NL80211_BSS_STATUS_IBSS_JOINED, +}; + +/** + * enum nl80211_auth_type - AuthenticationType + * + * @NL80211_AUTHTYPE_OPEN_SYSTEM: Open System authentication + * @NL80211_AUTHTYPE_SHARED_KEY: Shared Key authentication (WEP only) + * @NL80211_AUTHTYPE_FT: Fast BSS Transition (IEEE 802.11r) + * @NL80211_AUTHTYPE_NETWORK_EAP: Network EAP (some Cisco APs and mainly LEAP) + * @__NL80211_AUTHTYPE_NUM: internal + * @NL80211_AUTHTYPE_MAX: maximum valid auth algorithm + * @NL80211_AUTHTYPE_AUTOMATIC: determine automatically (if necessary by + * trying multiple times); this is invalid in netlink -- leave out + * the attribute for this on CONNECT commands. + */ +enum nl80211_auth_type { + NL80211_AUTHTYPE_OPEN_SYSTEM, + NL80211_AUTHTYPE_SHARED_KEY, + NL80211_AUTHTYPE_FT, + NL80211_AUTHTYPE_NETWORK_EAP, + + /* keep last */ + __NL80211_AUTHTYPE_NUM, + NL80211_AUTHTYPE_MAX = __NL80211_AUTHTYPE_NUM - 1, + NL80211_AUTHTYPE_AUTOMATIC +}; + +/** + * enum nl80211_key_type - Key Type + * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key + * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key + * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) + */ +enum nl80211_key_type { + NL80211_KEYTYPE_GROUP, + NL80211_KEYTYPE_PAIRWISE, + NL80211_KEYTYPE_PEERKEY, +}; + +/** + * enum nl80211_mfp - Management frame protection state + * @NL80211_MFP_NO: Management frame protection not used + * @NL80211_MFP_REQUIRED: Management frame protection required + */ +enum nl80211_mfp { + NL80211_MFP_NO, + NL80211_MFP_REQUIRED, +}; + +enum nl80211_wpa_versions { + NL80211_WPA_VERSION_1 = 1 << 0, + NL80211_WPA_VERSION_2 = 1 << 1, +}; + +/** + * enum nl80211_key_attributes - key attributes + * @__NL80211_KEY_INVALID: invalid + * @NL80211_KEY_DATA: (temporal) key data; for TKIP this consists of + * 16 bytes encryption key followed by 8 bytes each for TX and RX MIC + * keys + * @NL80211_KEY_IDX: key ID (u8, 0-3) + * @NL80211_KEY_CIPHER: key cipher suite (u32, as defined by IEEE 802.11 + * section 7.3.2.25.1, e.g. 0x000FAC04) + * @NL80211_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and + * CCMP keys, each six bytes in little endian + * @NL80211_KEY_DEFAULT: flag indicating default key + * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key + * @__NL80211_KEY_AFTER_LAST: internal + * @NL80211_KEY_MAX: highest key attribute + */ +enum nl80211_key_attributes { + __NL80211_KEY_INVALID, + NL80211_KEY_DATA, + NL80211_KEY_IDX, + NL80211_KEY_CIPHER, + NL80211_KEY_SEQ, + NL80211_KEY_DEFAULT, + NL80211_KEY_DEFAULT_MGMT, + + /* keep last */ + __NL80211_KEY_AFTER_LAST, + NL80211_KEY_MAX = __NL80211_KEY_AFTER_LAST - 1 +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h new file mode 100644 index 00000000000..b7d9435d5a9 --- /dev/null +++ b/include/linux/nl802154.h @@ -0,0 +1,123 @@ +/* + * nl802154.h + * + * Copyright (C) 2007, 2008, 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef NL802154_H +#define NL802154_H + +#define IEEE802154_NL_NAME "802.15.4 MAC" +#define IEEE802154_MCAST_COORD_NAME "coordinator" +#define IEEE802154_MCAST_BEACON_NAME "beacon" + +enum { + __IEEE802154_ATTR_INVALID, + + IEEE802154_ATTR_DEV_NAME, + IEEE802154_ATTR_DEV_INDEX, + + IEEE802154_ATTR_STATUS, + + IEEE802154_ATTR_SHORT_ADDR, + IEEE802154_ATTR_HW_ADDR, + IEEE802154_ATTR_PAN_ID, + + IEEE802154_ATTR_CHANNEL, + + IEEE802154_ATTR_COORD_SHORT_ADDR, + IEEE802154_ATTR_COORD_HW_ADDR, + IEEE802154_ATTR_COORD_PAN_ID, + + IEEE802154_ATTR_SRC_SHORT_ADDR, + IEEE802154_ATTR_SRC_HW_ADDR, + IEEE802154_ATTR_SRC_PAN_ID, + + IEEE802154_ATTR_DEST_SHORT_ADDR, + IEEE802154_ATTR_DEST_HW_ADDR, + IEEE802154_ATTR_DEST_PAN_ID, + + IEEE802154_ATTR_CAPABILITY, + IEEE802154_ATTR_REASON, + IEEE802154_ATTR_SCAN_TYPE, + IEEE802154_ATTR_CHANNELS, + IEEE802154_ATTR_DURATION, + IEEE802154_ATTR_ED_LIST, + IEEE802154_ATTR_BCN_ORD, + IEEE802154_ATTR_SF_ORD, + IEEE802154_ATTR_PAN_COORD, + IEEE802154_ATTR_BAT_EXT, + IEEE802154_ATTR_COORD_REALIGN, + IEEE802154_ATTR_SEC, + + IEEE802154_ATTR_PAGE, + + __IEEE802154_ATTR_MAX, +}; + +#define IEEE802154_ATTR_MAX (__IEEE802154_ATTR_MAX - 1) + +extern const struct nla_policy ieee802154_policy[]; + +/* commands */ +/* REQ should be responded with CONF + * and INDIC with RESP + */ +enum { + __IEEE802154_COMMAND_INVALID, + + IEEE802154_ASSOCIATE_REQ, + IEEE802154_ASSOCIATE_CONF, + IEEE802154_DISASSOCIATE_REQ, + IEEE802154_DISASSOCIATE_CONF, + IEEE802154_GET_REQ, + IEEE802154_GET_CONF, + IEEE802154_RESET_REQ, + IEEE802154_RESET_CONF, + IEEE802154_SCAN_REQ, + IEEE802154_SCAN_CONF, + IEEE802154_SET_REQ, + IEEE802154_SET_CONF, + IEEE802154_START_REQ, + IEEE802154_START_CONF, + IEEE802154_SYNC_REQ, + IEEE802154_POLL_REQ, + IEEE802154_POLL_CONF, + + IEEE802154_ASSOCIATE_INDIC, + IEEE802154_ASSOCIATE_RESP, + IEEE802154_DISASSOCIATE_INDIC, + IEEE802154_BEACON_NOTIFY_INDIC, + IEEE802154_ORPHAN_INDIC, + IEEE802154_ORPHAN_RESP, + IEEE802154_COMM_STATUS_INDIC, + IEEE802154_SYNC_LOSS_INDIC, + + IEEE802154_GTS_REQ, /* Not supported yet */ + IEEE802154_GTS_INDIC, /* Not supported yet */ + IEEE802154_GTS_CONF, /* Not supported yet */ + IEEE802154_RX_ENABLE_REQ, /* Not supported yet */ + IEEE802154_RX_ENABLE_CONF, /* Not supported yet */ + + IEEE802154_LIST_IFACE, + + __IEEE802154_CMD_MAX, +}; + +#define IEEE802154_CMD_MAX (__IEEE802154_CMD_MAX - 1) + +#endif diff --git a/include/linux/nls.h b/include/linux/nls.h index 6a882208301..d47beef08df 100644 --- a/include/linux/nls.h +++ b/include/linux/nls.h @@ -3,8 +3,23 @@ #include <linux/init.h> -/* unicode character */ -typedef __u16 wchar_t; +/* Unicode has changed over the years. Unicode code points no longer + * fit into 16 bits; as of Unicode 5 valid code points range from 0 + * to 0x10ffff (17 planes, where each plane holds 65536 code points). + * + * The original decision to represent Unicode characters as 16-bit + * wchar_t values is now outdated. But plane 0 still includes the + * most commonly used characters, so we will retain it. The newer + * 32-bit unicode_t type can be used when it is necessary to + * represent the full Unicode character set. + */ + +/* Plane-0 Unicode character */ +typedef u16 wchar_t; +#define MAX_WCHAR_T 0xffff + +/* Arbitrary Unicode character */ +typedef u32 unicode_t; struct nls_table { const char *charset; @@ -21,6 +36,13 @@ struct nls_table { /* this value hold the maximum octet of charset */ #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */ +/* Byte order for UTF-16 strings */ +enum utf16_endian { + UTF16_HOST_ENDIAN, + UTF16_LITTLE_ENDIAN, + UTF16_BIG_ENDIAN +}; + /* nls.c */ extern int register_nls(struct nls_table *); extern int unregister_nls(struct nls_table *); @@ -28,10 +50,11 @@ extern struct nls_table *load_nls(char *); extern void unload_nls(struct nls_table *); extern struct nls_table *load_nls_default(void); -extern int utf8_mbtowc(wchar_t *, const __u8 *, int); -extern int utf8_mbstowcs(wchar_t *, const __u8 *, int); -extern int utf8_wctomb(__u8 *, wchar_t, int); -extern int utf8_wcstombs(__u8 *, const wchar_t *, int); +extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu); +extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen); +extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs); +extern int utf16s_to_utf8s(const wchar_t *pwcs, int len, + enum utf16_endian endian, u8 *s, int maxlen); static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c) { @@ -58,6 +81,25 @@ static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1, return 0; } +/* + * nls_nullsize - return length of null character for codepage + * @codepage - codepage for which to return length of NULL terminator + * + * Since we can't guarantee that the null terminator will be a particular + * length, we have to check against the codepage. If there's a problem + * determining it, assume a single-byte NULL terminator. + */ +static inline int +nls_nullsize(const struct nls_table *codepage) +{ + int charlen; + char tmp[NLS_MAX_CHARSET_SIZE]; + + charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE); + + return charlen > 0 ? charlen : 1; +} + #define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name)) #endif /* _LINUX_NLS_H */ diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 29af2d5df09..b752e807add 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { } static inline void acpi_nmi_enable(void) { } #endif -#ifndef trigger_all_cpu_backtrace -#define trigger_all_cpu_backtrace() do { } while (0) +/* + * Create trigger_all_cpu_backtrace() out of the arch-provided + * base function. Return whether such support was available, + * to allow calling code to fall back to some other mechanism: + */ +#ifdef arch_trigger_all_cpu_backtrace +static inline bool trigger_all_cpu_backtrace(void) +{ + arch_trigger_all_cpu_backtrace(); + + return true; +} +#else +static inline bool trigger_all_cpu_backtrace(void) +{ + return false; +} #endif #endif diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 848025cd708..b359c4a9ec9 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -82,6 +82,12 @@ * to generate slightly worse code. So use a simple one-line #define * for node_isset(), instead of wrapping an inline inside a macro, the * way we do the other calls. + * + * NODEMASK_SCRATCH + * When doing above logical AND, OR, XOR, Remap operations the callers tend to + * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large, + * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper + * for such situations. See below and CPUMASK_ALLOC also. */ #include <linux/kernel.h> @@ -408,6 +414,19 @@ static inline int num_node_state(enum node_states state) #define next_online_node(nid) next_node((nid), node_states[N_ONLINE]) extern int nr_node_ids; +extern int nr_online_nodes; + +static inline void node_set_online(int nid) +{ + node_set_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} + +static inline void node_set_offline(int nid) +{ + node_clear_state(nid, N_ONLINE); + nr_online_nodes = num_node_state(N_ONLINE); +} #else static inline int node_state(int node, enum node_states state) @@ -434,7 +453,10 @@ static inline int num_node_state(enum node_states state) #define first_online_node 0 #define next_online_node(nid) (MAX_NUMNODES) #define nr_node_ids 1 +#define nr_online_nodes 1 +#define node_set_online(node) node_set_state((node), N_ONLINE) +#define node_set_offline(node) node_clear_state((node), N_ONLINE) #endif #define node_online_map node_states[N_ONLINE] @@ -454,10 +476,29 @@ static inline int num_node_state(enum node_states state) #define node_online(node) node_state((node), N_ONLINE) #define node_possible(node) node_state((node), N_POSSIBLE) -#define node_set_online(node) node_set_state((node), N_ONLINE) -#define node_set_offline(node) node_clear_state((node), N_ONLINE) - #define for_each_node(node) for_each_node_state(node, N_POSSIBLE) #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) +/* + * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) + */ + +#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ +#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL) +#define NODEMASK_FREE(m) kfree(m) +#else +#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m +#define NODEMASK_FREE(m) +#endif + +/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ +struct nodemask_scratch { + nodemask_t mask1; + nodemask_t mask2; +}; + +#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x) +#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) + + #endif /* __LINUX_NODEMASK_H */ diff --git a/include/linux/notifier.h b/include/linux/notifier.h index b86fa2ffca0..44428d247db 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -198,6 +198,9 @@ static inline int notifier_to_errno(int ret) #define NETDEV_CHANGENAME 0x000A #define NETDEV_FEAT_CHANGE 0x000B #define NETDEV_BONDING_FAILOVER 0x000C +#define NETDEV_PRE_UP 0x000D +#define NETDEV_BONDING_OLDTYPE 0x000E +#define NETDEV_BONDING_NEWTYPE 0x000F #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index afad7dec1b3..7b370c7cfef 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -8,6 +8,7 @@ struct mnt_namespace; struct uts_namespace; struct ipc_namespace; struct pid_namespace; +struct fs_struct; /* * A structure to contain pointers to all per-process diff --git a/include/linux/nubus.h b/include/linux/nubus.h index c4355076d1a..e137b3c486a 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h @@ -12,6 +12,7 @@ #ifndef LINUX_NUBUS_H #define LINUX_NUBUS_H +#include <linux/types.h> #ifdef __KERNEL__ #include <asm/nubus.h> #endif @@ -236,6 +237,7 @@ struct nubus_dirent int mask; }; +#ifdef __KERNEL__ struct nubus_board { struct nubus_board* next; struct nubus_dev* first_dev; @@ -350,6 +352,7 @@ void nubus_get_rsrc_mem(void* dest, void nubus_get_rsrc_str(void* dest, const struct nubus_dirent *dirent, int maxlen); +#endif /* __KERNEL__ */ /* We'd like to get rid of this eventually. Only daynaport.c uses it now. */ static inline void *nubus_slot_addr(int slot) diff --git a/include/linux/of.h b/include/linux/of.h index 6a7efa242f5..7be2d1043c1 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -77,6 +77,9 @@ extern int of_n_size_cells(struct device_node *np); extern const struct of_device_id *of_match_node( const struct of_device_id *matches, const struct device_node *node); extern int of_modalias_node(struct device_node *node, char *modalias, int len); +extern struct device_node *of_parse_phandle(struct device_node *np, + const char *phandle_name, + int index); extern int of_parse_phandles_with_args(struct device_node *np, const char *list_name, const char *cells_name, int index, struct device_node **out_node, const void **out_args); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h new file mode 100644 index 00000000000..53b94e025c7 --- /dev/null +++ b/include/linux/of_mdio.h @@ -0,0 +1,25 @@ +/* + * OF helpers for the MDIO (Ethernet PHY) API + * + * Copyright (c) 2009 Secret Lab Technologies, Ltd. + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_OF_MDIO_H +#define __LINUX_OF_MDIO_H + +#include <linux/phy.h> +#include <linux/of.h> + +extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +extern struct phy_device *of_phy_find_device(struct device_node *phy_np); +extern struct phy_device *of_phy_connect(struct net_device *dev, + struct device_node *phy_np, + void (*hndlr)(struct net_device *), + u32 flags, phy_interface_t iface); +extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev, + void (*hndlr)(struct net_device *), + phy_interface_t iface); + +#endif /* __LINUX_OF_MDIO_H */ diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 3d327b67d7e..90840665133 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -51,6 +51,16 @@ extern int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus); extern void of_unregister_driver(struct of_platform_driver *drv); +/* Platform drivers register/unregister */ +static inline int of_register_platform_driver(struct of_platform_driver *drv) +{ + return of_register_driver(drv, &of_platform_bus_type); +} +static inline void of_unregister_platform_driver(struct of_platform_driver *drv) +{ + of_unregister_driver(drv); +} + #include <asm/of_platform.h> extern struct of_device *of_find_device_by_node(struct device_node *np); diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 1d9518bc4c5..5171639ecf0 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -67,6 +67,9 @@ struct oprofile_operations { /* Initiate a stack backtrace. Optional. */ void (*backtrace)(struct pt_regs * const regs, unsigned int depth); + + /* Multiplex between different events. Optional. */ + int (*switch_events)(void); /* CPU identification string. */ char * cpu_type; }; @@ -171,7 +174,6 @@ struct op_sample; struct op_entry { struct ring_buffer_event *event; struct op_sample *sample; - unsigned long irq_flags; unsigned long size; unsigned long *data; }; @@ -180,6 +182,7 @@ void oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, unsigned long pc, int code, int size); int oprofile_add_data(struct op_entry *entry, unsigned long val); +int oprofile_add_data64(struct op_entry *entry, u64 val); int oprofile_write_commit(struct op_entry *entry); #endif /* OPROFILE_H */ diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h new file mode 100644 index 00000000000..b0638fd91e9 --- /dev/null +++ b/include/linux/page-debug-flags.h @@ -0,0 +1,30 @@ +#ifndef LINUX_PAGE_DEBUG_FLAGS_H +#define LINUX_PAGE_DEBUG_FLAGS_H + +/* + * page->debug_flags bits: + * + * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to + * implement generic debug pagealloc feature. The pages are filled with + * poison patterns and set this flag after free_pages(). The poisoned + * pages are verified whether the patterns are not corrupted and clear + * the flag before alloc_pages(). + */ + +enum page_debug_flags { + PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */ +}; + +/* + * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably + * gets turned off when no debug features are enabling it! + */ + +#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS +#if !defined(CONFIG_PAGE_POISONING) \ +/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */ +#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features! +#endif +#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */ + +#endif /* LINUX_PAGE_DEBUG_FLAGS_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 219a523ecdb..e2e5ce54359 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -82,6 +82,7 @@ enum pageflags { PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ + PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ #ifdef CONFIG_PAGEFLAGS_EXTENDED PG_head, /* A head page */ @@ -94,8 +95,8 @@ enum pageflags { PG_reclaim, /* To be reclaimed asap */ PG_buddy, /* Page is free, on buddy lists */ PG_swapbacked, /* Page is backed by RAM/swap */ -#ifdef CONFIG_UNEVICTABLE_LRU PG_unevictable, /* Page is "unevictable" */ +#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT PG_mlocked, /* Page is vma mlocked */ #endif #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR @@ -106,12 +107,17 @@ enum pageflags { /* Filesystems */ PG_checked = PG_owner_priv_1, + /* Two page bits are conscripted by FS-Cache to maintain local caching + * state. These bits are set on pages belonging to the netfs's inodes + * when those inodes are being locally cached. + */ + PG_fscache = PG_private_2, /* page backed by cache */ + /* XEN */ PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, /* SLOB */ - PG_slob_page = PG_active, PG_slob_free = PG_private, /* SLUB */ @@ -180,7 +186,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } struct page; /* forward declaration */ -TESTPAGEFLAG(Locked, locked) +TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) PAGEFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) @@ -192,17 +198,24 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) -PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) - __SETPAGEFLAG(Private, private) PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) -__PAGEFLAG(SlobPage, slob_page) __PAGEFLAG(SlobFree, slob_free) __PAGEFLAG(SlubFrozen, slub_frozen) __PAGEFLAG(SlubDebug, slub_debug) /* + * Private page markings that may be used by the filesystem that owns the page + * for its own purposes. + * - PG_private and PG_private_2 cause releasepage() and co to be invoked + */ +PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) + __CLEARPAGEFLAG(Private, private) +PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) +PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) + +/* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ @@ -231,23 +244,17 @@ PAGEFLAG_FALSE(SwapCache) SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) #endif -#ifdef CONFIG_UNEVICTABLE_LRU PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) TESTCLEARFLAG(Unevictable, unevictable) +#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #define MLOCK_PAGES 1 PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) TESTSCFLAG(Mlocked, mlocked) - #else - #define MLOCK_PAGES 0 PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) - -PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable) - SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable) - __CLEARPAGEFLAG_NOOP(Unevictable) #endif #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR @@ -365,11 +372,9 @@ static inline void __ClearPageTail(struct page *page) #endif /* !PAGEFLAGS_EXTENDED */ -#ifdef CONFIG_UNEVICTABLE_LRU -#define __PG_UNEVICTABLE (1 << PG_unevictable) +#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT #define __PG_MLOCKED (1 << PG_mlocked) #else -#define __PG_UNEVICTABLE 0 #define __PG_MLOCKED 0 #endif @@ -378,10 +383,11 @@ static inline void __ClearPageTail(struct page *page) * these flags set. It they are, there is a problem. */ #define PAGE_FLAGS_CHECK_AT_FREE \ - (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ - 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - __PG_UNEVICTABLE | __PG_MLOCKED) + (1 << PG_lru | 1 << PG_locked | \ + 1 << PG_private | 1 << PG_private_2 | \ + 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ + 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ + 1 << PG_unevictable | __PG_MLOCKED) /* * Flags checked when a page is prepped for return by the page allocator. @@ -391,4 +397,16 @@ static inline void __ClearPageTail(struct page *page) #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) #endif /* !__GENERATING_BOUNDS_H */ + +/** + * page_has_private - Determine if page has private stuff + * @page: The page to be checked + * + * Determine if a page has private stuff, indicating that release routines + * should be invoked upon it. + */ +#define page_has_private(page) \ + ((page)->flags & ((1 << PG_private) | \ + (1 << PG_private_2))) + #endif /* PAGE_FLAGS_H */ diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 602cc1fdee9..13f126c89ae 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -18,7 +18,19 @@ struct page_cgroup { }; void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); -void __init page_cgroup_init(void); + +#ifdef CONFIG_SPARSEMEM +static inline void __init page_cgroup_init_flatmem(void) +{ +} +extern void __init page_cgroup_init(void); +#else +void __init page_cgroup_init_flatmem(void); +static inline void __init page_cgroup_init(void) +{ +} +#endif + struct page_cgroup *lookup_page_cgroup(struct page *page); enum { @@ -87,28 +99,31 @@ static inline void page_cgroup_init(void) { } +static inline void __init page_cgroup_init_flatmem(void) +{ +} + #endif #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP #include <linux/swap.h> -extern struct mem_cgroup * -swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); -extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup(swp_entry_t ent); extern int swap_cgroup_swapon(int type, unsigned long max_pages); extern void swap_cgroup_swapoff(int type); #else #include <linux/swap.h> static inline -struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) { - return NULL; + return 0; } static inline -struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) +unsigned short lookup_swap_cgroup(swp_entry_t ent) { - return NULL; + return 0; } static inline int diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 01ca0856caf..ed5d7501e18 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -18,9 +18,12 @@ * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page * allocation mode flags. */ -#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ -#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ -#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ +enum mapping_flags { + AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ + AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ + AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ + AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ +}; static inline void mapping_set_error(struct address_space *mapping, int error) { @@ -32,9 +35,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) } } -#ifdef CONFIG_UNEVICTABLE_LRU -#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */ - static inline void mapping_set_unevictable(struct address_space *mapping) { set_bit(AS_UNEVICTABLE, &mapping->flags); @@ -51,14 +51,6 @@ static inline int mapping_unevictable(struct address_space *mapping) return test_bit(AS_UNEVICTABLE, &mapping->flags); return !!mapping; } -#else -static inline void mapping_set_unevictable(struct address_space *mapping) { } -static inline void mapping_clear_unevictable(struct address_space *mapping) { } -static inline int mapping_unevictable(struct address_space *mapping) -{ - return 0; -} -#endif static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { @@ -140,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page) { VM_BUG_ON(in_interrupt()); -#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) # ifdef CONFIG_PREEMPT VM_BUG_ON(!in_atomic()); # endif @@ -178,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count) { VM_BUG_ON(in_interrupt()); -#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) # ifdef CONFIG_PREEMPT VM_BUG_ON(!in_atomic()); # endif @@ -380,6 +372,11 @@ static inline void wait_on_page_writeback(struct page *page) extern void end_page_writeback(struct page *page); /* + * Add an arbitrary waiter to a page's wait queue + */ +extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); + +/* * Fault a userspace page into pagetables. Return non-zero on a fault. * * This assumes that two userspace pages are always sufficient. That's diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 7b2886fa7fd..bab82f4c571 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -24,7 +24,6 @@ void __pagevec_release(struct pagevec *pvec); void __pagevec_free(struct pagevec *pvec); void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); void pagevec_strip(struct pagevec *pvec); -void pagevec_swap_free(struct pagevec *pvec); unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages); unsigned pagevec_lookup_tag(struct pagevec *pvec, diff --git a/include/linux/parport.h b/include/linux/parport.h index e1f83c5065c..38a423ed3c0 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -324,6 +324,10 @@ struct parport { int spintime; atomic_t ref_count; + unsigned long devflags; +#define PARPORT_DEVPROC_REGISTERED 0 + struct pardevice *proc_device; /* Currently register proc device */ + struct list_head full_list; struct parport *slaves[3]; }; diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h index ea8c6d84996..cc1767f5cca 100644 --- a/include/linux/parport_pc.h +++ b/include/linux/parport_pc.h @@ -228,10 +228,11 @@ extern void parport_pc_release_resources(struct parport *p); extern int parport_pc_claim_resources(struct parport *p); /* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ -extern struct parport *parport_pc_probe_port (unsigned long base, - unsigned long base_hi, - int irq, int dma, - struct device *dev); -extern void parport_pc_unregister_port (struct parport *p); +extern struct parport *parport_pc_probe_port(unsigned long base, + unsigned long base_hi, + int irq, int dma, + struct device *dev, + int irqflags); +extern void parport_pc_unregister_port(struct parport *p); #endif diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 042c166f65d..93a7c08f869 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -10,72 +10,25 @@ #include <linux/acpi.h> -#define OSC_QUERY_TYPE 0 -#define OSC_SUPPORT_TYPE 1 -#define OSC_CONTROL_TYPE 2 -#define OSC_SUPPORT_MASKS 0x1f - -/* - * _OSC DW0 Definition - */ -#define OSC_QUERY_ENABLE 1 -#define OSC_REQUEST_ERROR 2 -#define OSC_INVALID_UUID_ERROR 4 -#define OSC_INVALID_REVISION_ERROR 8 -#define OSC_CAPABILITIES_MASK_ERROR 16 - -/* - * _OSC DW1 Definition (OS Support Fields) - */ -#define OSC_EXT_PCI_CONFIG_SUPPORT 1 -#define OSC_ACTIVE_STATE_PWR_SUPPORT 2 -#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 -#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 -#define OSC_MSI_SUPPORT 16 - -/* - * _OSC DW1 Definition (OS Control Fields) - */ -#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 -#define OSC_SHPC_NATIVE_HP_CONTROL 2 -#define OSC_PCI_EXPRESS_PME_CONTROL 4 -#define OSC_PCI_EXPRESS_AER_CONTROL 8 -#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 - -#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ - OSC_SHPC_NATIVE_HP_CONTROL | \ - OSC_PCI_EXPRESS_PME_CONTROL | \ - OSC_PCI_EXPRESS_AER_CONTROL | \ - OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) - #ifdef CONFIG_ACPI -extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); -int pci_acpi_osc_support(acpi_handle handle, u32 flags); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { - /* Find root host bridge */ - while (pdev->bus->self) - pdev = pdev->bus->self; - - return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), - pdev->bus->number); + struct pci_bus *pbus = pdev->bus; + /* Find a PCI root bus */ + while (!pci_is_root_bus(pbus)) + pbus = pbus->parent; + return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus), + pbus->number); } static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) { - int seg = pci_domain_nr(pbus), busnr = pbus->number; - struct pci_dev *bridge = pbus->self; - if (bridge) - return DEVICE_ACPI_HANDLE(&(bridge->dev)); - return acpi_get_pci_rootbridge_handle(seg, busnr); + if (!pci_is_root_bus(pbus)) + return DEVICE_ACPI_HANDLE(&(pbus->self->dev)); + return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus), + pbus->number); } #else -#if !defined(AE_ERROR) -typedef u32 acpi_status; -#define AE_ERROR (acpi_status) (0x0001) -#endif -static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) -{return AE_ERROR;} static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { return NULL; } #endif diff --git a/include/linux/pci.h b/include/linux/pci.h index 80f8b8b65fd..115fb7ba508 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -52,6 +52,7 @@ #include <asm/atomic.h> #include <linux/device.h> #include <linux/io.h> +#include <linux/irqreturn.h> /* Include the ID list */ #include <linux/pci_ids.h> @@ -93,6 +94,12 @@ enum { /* #6: expansion ROM resource */ PCI_ROM_RESOURCE, + /* device specific resources */ +#ifdef CONFIG_PCI_IOV + PCI_IOV_RESOURCES, + PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, +#endif + /* resources assigned to buses behind the bridge */ #define PCI_BRIDGE_RESOURCE_NUM 4 @@ -117,6 +124,18 @@ typedef int __bitwise pci_power_t; #define PCI_UNKNOWN ((pci_power_t __force) 5) #define PCI_POWER_ERROR ((pci_power_t __force) -1) +/* Remember to update this when the list above changes! */ +extern const char *pci_power_names[]; + +static inline const char *pci_power_name(pci_power_t state) +{ + return pci_power_names[1 + (int) state]; +} + +#define PCI_PM_D2_DELAY 200 +#define PCI_PM_D3_WAIT 10 +#define PCI_PM_BUS_WAIT 50 + /** The pci_channel state describes connectivity between the CPU and * the pci device. If some PCI bus between here and the pci device * has crashed or locked up, this info is reflected here. @@ -176,6 +195,8 @@ struct pci_cap_saved_state { struct pcie_link_state; struct pci_vpd; +struct pci_sriov; +struct pci_ats; /* * The pci_dev structure is used to describe PCI devices. @@ -252,6 +273,9 @@ struct pci_dev { unsigned int ari_enabled:1; /* ARI forwarding */ unsigned int is_managed:1; unsigned int is_pcie:1; + unsigned int state_saved:1; + unsigned int is_physfn:1; + unsigned int is_virtfn:1; pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -265,6 +289,13 @@ struct pci_dev { struct list_head msi_list; #endif struct pci_vpd *vpd; +#ifdef CONFIG_PCI_IOV + union { + struct pci_sriov *sriov; /* SR-IOV capability related */ + struct pci_dev *physfn; /* the PF this VF is associated with */ + }; + struct pci_ats *ats; /* Address Translation Service */ +#endif }; extern struct pci_dev *alloc_pci_dev(void); @@ -336,6 +367,15 @@ struct pci_bus { #define pci_bus_b(n) list_entry(n, struct pci_bus, node) #define to_pci_bus(n) container_of(n, struct pci_bus, dev) +/* + * Returns true if the pci bus is root (behind host-pci bridge), + * false otherwise + */ +static inline bool pci_is_root_bus(struct pci_bus *pbus) +{ + return !(pbus->parent); +} + #ifdef CONFIG_PCI_MSI static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { @@ -523,7 +563,7 @@ void pcibios_update_irq(struct pci_dev *, int irq); /* Generic PCI functions used internally */ extern struct pci_bus *pci_find_bus(int domain, int busnr); -void pci_bus_add_devices(struct pci_bus *bus); +void pci_bus_add_devices(const struct pci_bus *bus); struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, @@ -569,8 +609,6 @@ extern void pci_sort_breadthfirst(void); struct pci_dev __deprecated *pci_find_device(unsigned int vendor, unsigned int device, struct pci_dev *from); -struct pci_dev __deprecated *pci_find_slot(unsigned int bus, - unsigned int devfn); #endif /* CONFIG_PCI_LEGACY */ enum pci_lost_interrupt_reason { @@ -609,6 +647,7 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val); int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val); +struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val) { @@ -644,6 +683,11 @@ int __must_check pci_reenable_device(struct pci_dev *); int __must_check pcim_enable_device(struct pci_dev *pdev); void pcim_pin_device(struct pci_dev *pdev); +static inline int pci_is_enabled(struct pci_dev *pdev) +{ + return (atomic_read(&pdev->enable_cnt) > 0); +} + static inline int pci_is_managed(struct pci_dev *pdev) { return pdev->is_managed; @@ -668,8 +712,8 @@ int pcix_get_mmrbc(struct pci_dev *dev); int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); int pcie_get_readrq(struct pci_dev *dev); int pcie_set_readrq(struct pci_dev *dev, int rq); +int __pci_reset_function(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); -int pci_execute_reset_function(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); int pci_select_bars(struct pci_dev *dev, unsigned long flags); @@ -679,16 +723,17 @@ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); -size_t pci_get_rom_size(void __iomem *rom, size_t size); +size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); int pci_restore_state(struct pci_dev *dev); +int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); void pci_pme_active(struct pci_dev *dev, bool enable); -int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); +int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); int pci_wake_from_d3(struct pci_dev *dev, bool enable); pci_power_t pci_target_state(struct pci_dev *dev); int pci_prepare_to_sleep(struct pci_dev *dev); @@ -696,6 +741,9 @@ int pci_back_from_sleep(struct pci_dev *dev); /* Functions for PCI Hotplug drivers to use */ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); +#ifdef CONFIG_HOTPLUG +unsigned int pci_rescan_bus(struct pci_bus *bus); +#endif /* Vital product data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); @@ -703,7 +751,7 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void int pci_vpd_truncate(struct pci_dev *dev, size_t size); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ -void pci_bus_assign_resources(struct pci_bus *bus); +void pci_bus_assign_resources(const struct pci_bus *bus); void pci_bus_size_bridges(struct pci_bus *bus); int pci_claim_resource(struct pci_dev *, int); void pci_assign_unassigned_resources(void); @@ -751,7 +799,7 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass); -void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), +void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata); int pci_cfg_space_size_ext(struct pci_dev *dev); int pci_cfg_space_size(struct pci_dev *dev); @@ -784,7 +832,7 @@ struct msix_entry { #ifndef CONFIG_PCI_MSI -static inline int pci_enable_msi(struct pci_dev *dev) +static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) { return -1; } @@ -794,6 +842,10 @@ static inline void pci_msi_shutdown(struct pci_dev *dev) static inline void pci_disable_msi(struct pci_dev *dev) { } +static inline int pci_msix_table_size(struct pci_dev *dev) +{ + return 0; +} static inline int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) { @@ -815,9 +867,10 @@ static inline int pci_msi_enabled(void) return 0; } #else -extern int pci_enable_msi(struct pci_dev *dev); +extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); extern void pci_msi_shutdown(struct pci_dev *dev); extern void pci_disable_msi(struct pci_dev *dev); +extern int pci_msix_table_size(struct pci_dev *dev); extern int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); extern void pci_msix_shutdown(struct pci_dev *dev); @@ -836,6 +889,19 @@ static inline int pcie_aspm_enabled(void) extern int pcie_aspm_enabled(void); #endif +#ifndef CONFIG_PCIE_ECRC +static inline void pcie_set_ecrc_checking(struct pci_dev *dev) +{ + return; +} +static inline void pcie_ecrc_get_policy(char *str) {}; +#else +extern void pcie_set_ecrc_checking(struct pci_dev *dev); +extern void pcie_ecrc_get_policy(char *str); +#endif + +#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) + #ifdef CONFIG_HT_IRQ /* The functions a driver should call */ int ht_create_irq(struct pci_dev *dev, int idx); @@ -890,12 +956,6 @@ static inline struct pci_dev *pci_find_device(unsigned int vendor, return NULL; } -static inline struct pci_dev *pci_find_slot(unsigned int bus, - unsigned int devfn) -{ - return NULL; -} - static inline struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) @@ -1051,6 +1111,10 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, #include <asm/pci.h> +#ifndef PCIBIOS_MAX_MEM_32 +#define PCIBIOS_MAX_MEM_32 (-1) +#endif + /* these helpers provide future and backwards compatibility * for accessing popular PCI BAR info */ #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) @@ -1081,7 +1145,7 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) /* If you want to know what to call your pci_dev, ask this function. * Again, it's a wrapper around the generic device. */ -static inline const char *pci_name(struct pci_dev *pdev) +static inline const char *pci_name(const struct pci_dev *pdev) { return dev_name(&pdev->dev); } @@ -1189,5 +1253,28 @@ int pci_ext_cfg_avail(struct pci_dev *dev); void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); +#ifdef CONFIG_PCI_IOV +extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); +extern void pci_disable_sriov(struct pci_dev *dev); +extern irqreturn_t pci_sriov_migration(struct pci_dev *dev); +#else +static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) +{ + return -ENODEV; +} +static inline void pci_disable_sriov(struct pci_dev *dev) +{ +} +static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev) +{ + return IRQ_NONE; +} +#endif + +#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) +extern void pci_hp_create_module_link(struct pci_slot *pci_slot); +extern void pci_hp_remove_module_link(struct pci_slot *pci_slot); +#endif + #endif /* __KERNEL__ */ #endif /* LINUX_PCI_H */ diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 20998746518..4391741b99d 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -66,17 +66,10 @@ enum pcie_link_speed { PCIE_LNK_SPEED_UNKNOWN = 0xFF, }; -struct hotplug_slot; -struct hotplug_slot_attribute { - struct attribute attr; - ssize_t (*show)(struct hotplug_slot *, char *); - ssize_t (*store)(struct hotplug_slot *, const char *, size_t); -}; -#define to_hotplug_attr(n) container_of(n, struct hotplug_slot_attribute, attr); - /** * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use * @owner: The module owner of this structure + * @mod_name: The module name (KBUILD_MODNAME) of this structure * @enable_slot: Called when the user wants to enable a specific pci slot * @disable_slot: Called when the user wants to disable a specific pci slot * @set_attention_status: Called to set the specific slot's attention LED to @@ -109,6 +102,7 @@ struct hotplug_slot_attribute { */ struct hotplug_slot_ops { struct module *owner; + const char *mod_name; int (*enable_slot) (struct hotplug_slot *slot); int (*disable_slot) (struct hotplug_slot *slot); int (*set_attention_status) (struct hotplug_slot *slot, u8 value); @@ -167,12 +161,21 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) return pci_slot_name(slot->pci_slot); } -extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr, - const char *name); +extern int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, + int nr, const char *name, + struct module *owner, const char *mod_name); extern int pci_hp_deregister(struct hotplug_slot *slot); extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, struct hotplug_slot_info *info); +static inline int pci_hp_register(struct hotplug_slot *slot, + struct pci_bus *pbus, + int devnr, const char *name) +{ + return __pci_hp_register(slot, pbus, devnr, name, + THIS_MODULE, KBUILD_MODNAME); +} + /* PCI Setting Record (Type 0) */ struct hpp_type0 { u32 revision; @@ -226,7 +229,6 @@ struct hotplug_params { extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, struct hotplug_params *hpp); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags); -int acpi_root_bridge(acpi_handle handle); int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); int acpi_pci_detect_ejectable(struct pci_bus *pbus); #endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d56ad9c21c0..555a8262fbc 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -104,6 +104,7 @@ #define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300 #define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 #define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 +#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 @@ -375,6 +376,9 @@ #define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c #define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 #define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c +/* AMD SB Chipset */ +#define PCI_DEVICE_ID_AMD_SB900_IDE 0x780c +#define PCI_DEVICE_ID_AMD_SB900_SATA_IDE 0x7800 #define PCI_VENDOR_ID_VLSI 0x1004 #define PCI_DEVICE_ID_VLSI_82C592 0x0005 @@ -526,6 +530,7 @@ #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 +#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a @@ -535,6 +540,7 @@ #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 #define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 +#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F #define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 #define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 #define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 @@ -834,6 +840,8 @@ #define PCI_DEVICE_ID_PROMISE_20276 0x5275 #define PCI_DEVICE_ID_PROMISE_20277 0x7275 +#define PCI_VENDOR_ID_FOXCONN 0x105b + #define PCI_VENDOR_ID_UMC 0x1060 #define PCI_DEVICE_ID_UMC_UM8673F 0x0101 #define PCI_DEVICE_ID_UMC_UM8886BF 0x673a @@ -941,6 +949,32 @@ #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 #define PCI_DEVICE_ID_SUN_CASSINI 0xabba +#define PCI_VENDOR_ID_NI 0x1093 +#define PCI_DEVICE_ID_NI_PCI2322 0xd130 +#define PCI_DEVICE_ID_NI_PCI2324 0xd140 +#define PCI_DEVICE_ID_NI_PCI2328 0xd150 +#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 +#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 +#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 +#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 +#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 +#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 +#define PCI_DEVICE_ID_NI_PCI2322I 0xd250 +#define PCI_DEVICE_ID_NI_PCI2324I 0xd270 +#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 +#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 +#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db +#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd +#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df +#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 +#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 +#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 +#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 +#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 +#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea +#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec +#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee + #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_643 0x0643 #define PCI_DEVICE_ID_CMD_646 0x0646 @@ -976,6 +1010,7 @@ #define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 #define PCI_DEVICE_ID_PLX_9030 0x9030 #define PCI_DEVICE_ID_PLX_9050 0x9050 +#define PCI_DEVICE_ID_PLX_9056 0x9056 #define PCI_DEVICE_ID_PLX_9080 0x9080 #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 @@ -1037,8 +1072,6 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036 -#define PCI_DEVICE_ID_NVIDIA_NVENET_10 0x0037 -#define PCI_DEVICE_ID_NVIDIA_NVENET_11 0x0038 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041 @@ -1049,21 +1082,16 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054 #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055 -#define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056 -#define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057 #define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059 #define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d #define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065 -#define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066 #define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069 #define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084 #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085 -#define PCI_DEVICE_ID_NVIDIA_NVENET_4 0x0086 #define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089 #define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a -#define PCI_DEVICE_ID_NVIDIA_NVENET_5 0x008c #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091 @@ -1079,15 +1107,12 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5 -#define PCI_DEVICE_ID_NVIDIA_NVENET_3 0x00d6 #define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9 #define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da -#define PCI_DEVICE_ID_NVIDIA_NVENET_7 0x00df #define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4 #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5 -#define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6 #define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0 @@ -1147,7 +1172,6 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4 #define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc #define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1 -#define PCI_DEVICE_ID_NVIDIA_NVENET_1 0x01c3 #define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200 #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201 @@ -1170,8 +1194,6 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F -#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 -#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281 #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282 @@ -1218,42 +1240,21 @@ #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E -#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372 #define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373 -#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5 -#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC -#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE -#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448 -#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450 -#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451 -#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452 -#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453 -#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054C -#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D -#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E -#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F -#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC -#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD -#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE -#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 -#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760 -#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761 -#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762 -#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763 -#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0 -#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1 -#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2 -#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8 +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2 #define PCI_VENDOR_ID_IMS 0x10e0 #define PCI_DEVICE_ID_IMS_TT128 0x9128 @@ -1281,6 +1282,13 @@ #define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ #define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 +#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005 +#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b +#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 +#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 +#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 #define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ #define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 @@ -1312,6 +1320,7 @@ #define PCI_DEVICE_ID_VIA_VT3351 0x0351 #define PCI_DEVICE_ID_VIA_VT3364 0x0364 #define PCI_DEVICE_ID_VIA_8371_0 0x0391 +#define PCI_DEVICE_ID_VIA_6415 0x0415 #define PCI_DEVICE_ID_VIA_8501_0 0x0501 #define PCI_DEVICE_ID_VIA_82C561 0x0561 #define PCI_DEVICE_ID_VIA_82C586_1 0x0571 @@ -1357,6 +1366,7 @@ #define PCI_DEVICE_ID_VIA_8783_0 0x3208 #define PCI_DEVICE_ID_VIA_8237 0x3227 #define PCI_DEVICE_ID_VIA_8251 0x3287 +#define PCI_DEVICE_ID_VIA_8261 0x3402 #define PCI_DEVICE_ID_VIA_8237A 0x3337 #define PCI_DEVICE_ID_VIA_8237S 0x3372 #define PCI_DEVICE_ID_VIA_SATA_EIDE 0x5324 @@ -1366,10 +1376,13 @@ #define PCI_DEVICE_ID_VIA_CX700 0x8324 #define PCI_DEVICE_ID_VIA_CX700_IDE 0x0581 #define PCI_DEVICE_ID_VIA_VX800 0x8353 +#define PCI_DEVICE_ID_VIA_VX855 0x8409 #define PCI_DEVICE_ID_VIA_8371_1 0x8391 #define PCI_DEVICE_ID_VIA_82C598_1 0x8598 #define PCI_DEVICE_ID_VIA_838X_1 0xB188 #define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198 +#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409 +#define PCI_DEVICE_ID_VIA_ANON 0xFFFF #define PCI_VENDOR_ID_SIEMENS 0x110A #define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102 @@ -1440,6 +1453,7 @@ #define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071 #define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072 #define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073 +#define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1 #define PCI_DEVICE_ID_NEO_2DB9 0x00C8 #define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9 #define PCI_DEVICE_ID_NEO_2RJ45 0x00CA @@ -1467,6 +1481,7 @@ #define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214 #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217 #define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227 +#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408 #define PCI_VENDOR_ID_SBE 0x1176 #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 @@ -1807,6 +1822,10 @@ #define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 #define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 +#define PCI_VENDOR_ID_DIGIGRAM 0x1369 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 +#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 + #define PCI_VENDOR_ID_KAWASAKI 0x136b #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 @@ -1874,6 +1893,8 @@ #define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540 #define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550 #define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552 +#define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553 +#define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B #define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560 #define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562 #define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563 @@ -1956,15 +1977,21 @@ #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 +#define PCI_DEVICE_ID_OXSEMI_C950 0x950B #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 +#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 #define PCI_VENDOR_ID_CHELSIO 0x1425 #define PCI_VENDOR_ID_SAMSUNG 0x144d +#define PCI_VENDOR_ID_GIGABYTE 0x1458 + +#define PCI_VENDOR_ID_AMBIT 0x1468 + #define PCI_VENDOR_ID_MYRICOM 0x14c1 #define PCI_VENDOR_ID_TITAN 0x14D2 @@ -2041,7 +2068,6 @@ #define PCI_DEVICE_ID_TIGON3_5787M 0x1693 #define PCI_DEVICE_ID_TIGON3_5782 0x1696 #define PCI_DEVICE_ID_TIGON3_5784 0x1698 -#define PCI_DEVICE_ID_TIGON3_5785 0x1699 #define PCI_DEVICE_ID_TIGON3_5786 0x169a #define PCI_DEVICE_ID_TIGON3_5787 0x169b #define PCI_DEVICE_ID_TIGON3_5788 0x169c @@ -2071,6 +2097,7 @@ #define PCI_VENDOR_ID_MAINPINE 0x1522 #define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100 #define PCI_VENDOR_ID_ENE 0x1524 +#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510 #define PCI_DEVICE_ID_ENE_CB712_SD 0x0550 #define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551 #define PCI_DEVICE_ID_ENE_CB714_SD 0x0750 @@ -2106,6 +2133,8 @@ #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c #define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 +#define PCI_VENDOR_ID_DFI 0x15bd + #define PCI_VENDOR_ID_QUICKNET 0x15e2 #define PCI_DEVICE_ID_QUICKNET_XJ 0x0500 @@ -2209,10 +2238,20 @@ #define PCI_VENDOR_ID_TOPSPIN 0x1867 +#define PCI_VENDOR_ID_SILAN 0x1904 + #define PCI_VENDOR_ID_TDI 0x192E #define PCI_DEVICE_ID_TDI_EHCI 0x0101 #define PCI_VENDOR_ID_FREESCALE 0x1957 +#define PCI_DEVICE_ID_MPC8315E 0x00b4 +#define PCI_DEVICE_ID_MPC8315 0x00b5 +#define PCI_DEVICE_ID_MPC8314E 0x00b6 +#define PCI_DEVICE_ID_MPC8314 0x00b7 +#define PCI_DEVICE_ID_MPC8378E 0x00c4 +#define PCI_DEVICE_ID_MPC8378 0x00c5 +#define PCI_DEVICE_ID_MPC8377E 0x00c6 +#define PCI_DEVICE_ID_MPC8377 0x00c7 #define PCI_DEVICE_ID_MPC8548E 0x0012 #define PCI_DEVICE_ID_MPC8548 0x0013 #define PCI_DEVICE_ID_MPC8543E 0x0014 @@ -2220,6 +2259,8 @@ #define PCI_DEVICE_ID_MPC8547E 0x0018 #define PCI_DEVICE_ID_MPC8545E 0x0019 #define PCI_DEVICE_ID_MPC8545 0x001a +#define PCI_DEVICE_ID_MPC8569E 0x0061 +#define PCI_DEVICE_ID_MPC8569 0x0060 #define PCI_DEVICE_ID_MPC8568E 0x0020 #define PCI_DEVICE_ID_MPC8568 0x0021 #define PCI_DEVICE_ID_MPC8567E 0x0022 @@ -2232,6 +2273,8 @@ #define PCI_DEVICE_ID_MPC8572 0x0041 #define PCI_DEVICE_ID_MPC8536E 0x0050 #define PCI_DEVICE_ID_MPC8536 0x0051 +#define PCI_DEVICE_ID_P2020E 0x0070 +#define PCI_DEVICE_ID_P2020 0x0071 #define PCI_DEVICE_ID_MPC8641 0x7010 #define PCI_DEVICE_ID_MPC8641D 0x7011 #define PCI_DEVICE_ID_MPC8610 0x7018 @@ -2257,6 +2300,10 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 #define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff +#define PCI_VENDOR_ID_QMI 0x1a32 + +#define PCI_VENDOR_ID_AZWAVE 0x1a3b + #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 @@ -2317,6 +2364,9 @@ #define PCI_DEVICE_ID_INTEL_82378 0x0484 #define PCI_DEVICE_ID_INTEL_I960 0x0960 #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 +#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 +#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085 +#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 @@ -2364,6 +2414,7 @@ #define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c #define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 #define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 +#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2 #define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 #define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 #define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 @@ -2421,6 +2472,7 @@ #define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 #define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 #define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 +#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc #define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd #define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da #define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd @@ -2453,6 +2505,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 +#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c +#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 #define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 #define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 @@ -2596,6 +2650,7 @@ #define PCI_DEVICE_ID_NETMOS_9835 0x9835 #define PCI_DEVICE_ID_NETMOS_9845 0x9845 #define PCI_DEVICE_ID_NETMOS_9855 0x9855 +#define PCI_DEVICE_ID_NETMOS_9901 0x9901 #define PCI_VENDOR_ID_3COM_2 0xa727 diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 027815b4635..fcaee42c7ac 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h @@ -235,7 +235,7 @@ #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ #define PCI_PM_CTRL 4 /* PM control and status register */ #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ -#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ +#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */ #define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ #define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ #define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ @@ -295,8 +295,9 @@ #define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */ #define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */ #define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */ +#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */ #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */ -#define PCI_MSI_MASK_BIT 16 /* Mask bits register */ +#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ /* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */ #define PCI_MSIX_FLAGS 2 @@ -304,7 +305,6 @@ #define PCI_MSIX_FLAGS_ENABLE (1 << 15) #define PCI_MSIX_FLAGS_MASKALL (1 << 14) #define PCI_MSIX_FLAGS_BIRMASK (7 << 0) -#define PCI_MSIX_FLAGS_BITMASK (1 << 0) /* CompactPCI Hotswap Register */ @@ -375,6 +375,8 @@ #define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ +#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ +#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */ #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ #define PCI_EXP_DEVCAP 4 /* Device capabilities */ @@ -487,6 +489,8 @@ #define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ #define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */ +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ +#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ /* Extended Capabilities (PCI-X 2.0 and Express) */ #define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) @@ -498,6 +502,8 @@ #define PCI_EXT_CAP_ID_DSN 3 #define PCI_EXT_CAP_ID_PWR 4 #define PCI_EXT_CAP_ID_ARI 14 +#define PCI_EXT_CAP_ID_ATS 15 +#define PCI_EXT_CAP_ID_SRIOV 16 /* Advanced Error Reporting */ #define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ @@ -615,4 +621,44 @@ #define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ #define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ +/* Address Translation Service */ +#define PCI_ATS_CAP 0x04 /* ATS Capability Register */ +#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */ +#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */ +#define PCI_ATS_CTRL 0x06 /* ATS Control Register */ +#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */ +#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */ +#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */ + +/* Single Root I/O Virtualization */ +#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ +#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */ +#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */ +#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */ +#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */ +#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */ +#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */ +#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */ +#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */ +#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */ +#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */ +#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */ +#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ +#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */ +#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */ +#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */ +#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */ +#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */ +#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */ +#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */ +#define PCI_SRIOV_BAR 0x24 /* VF BAR0 */ +#define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */ +#define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/ +#define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */ +#define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */ +#define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */ +#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */ +#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ +#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index 6cd91e3f982..b4c79545330 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h @@ -16,29 +16,30 @@ #define PCIE_ANY_PORT 7 /* Service Type */ -#define PCIE_PORT_SERVICE_PME 1 /* Power Management Event */ -#define PCIE_PORT_SERVICE_AER 2 /* Advanced Error Reporting */ -#define PCIE_PORT_SERVICE_HP 4 /* Native Hotplug */ -#define PCIE_PORT_SERVICE_VC 8 /* Virtual Channel */ +#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ +#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT) +#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */ +#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT) +#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */ +#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) +#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ +#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) /* Root/Upstream/Downstream Port's Interrupt Mode */ +#define PCIE_PORT_NO_IRQ (-1) #define PCIE_PORT_INTx_MODE 0 #define PCIE_PORT_MSI_MODE 1 #define PCIE_PORT_MSIX_MODE 2 -struct pcie_port_service_id { - __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ - __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ - __u32 class, class_mask; /* (class,subclass,prog-if) triplet */ - __u32 port_type, service_type; /* Port Entity */ - kernel_ulong_t driver_data; +struct pcie_port_data { + int port_type; /* Type of the port */ + int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ }; struct pcie_device { int irq; /* Service IRQ/MSI/MSI-X Vector */ - int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ - struct pcie_port_service_id id; /* Service ID */ - struct pci_dev *port; /* Root/Upstream/Downstream Port */ + struct pci_dev *port; /* Root/Upstream/Downstream Port */ + u32 service; /* Port service this device represents */ void *priv_data; /* Service Private Data */ struct device device; /* Generic Device Interface */ }; @@ -56,10 +57,9 @@ static inline void* get_service_data(struct pcie_device *dev) struct pcie_port_service_driver { const char *name; - int (*probe) (struct pcie_device *dev, - const struct pcie_port_service_id *id); + int (*probe) (struct pcie_device *dev); void (*remove) (struct pcie_device *dev); - int (*suspend) (struct pcie_device *dev, pm_message_t state); + int (*suspend) (struct pcie_device *dev); int (*resume) (struct pcie_device *dev); /* Service Error Recovery Handler */ @@ -68,7 +68,9 @@ struct pcie_port_service_driver { /* Link Reset Capability - AER service driver specific */ pci_ers_result_t (*reset_link) (struct pci_dev *dev); - const struct pcie_port_service_id *id_table; + int port_type; /* Type of the port this driver can handle */ + u32 service; /* Port service this device represents */ + struct device_driver driver; }; #define to_service_driver(d) \ diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h index cb7d10f3076..d4cf7a2ceb3 100644 --- a/include/linux/pda_power.h +++ b/include/linux/pda_power.h @@ -31,6 +31,8 @@ struct pda_power_pdata { unsigned int wait_for_status; /* msecs, default is 500 */ unsigned int wait_for_charger; /* msecs, default is 500 */ unsigned int polling_interval; /* msecs, default is 2000 */ + + unsigned long ac_max_uA; /* current to draw when on AC */ }; #endif /* __PDA_POWER_H__ */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h new file mode 100644 index 00000000000..0761491b3ee --- /dev/null +++ b/include/linux/percpu-defs.h @@ -0,0 +1,95 @@ +#ifndef _LINUX_PERCPU_DEFS_H +#define _LINUX_PERCPU_DEFS_H + +/* + * Determine the real variable name from the name visible in the + * kernel sources. + */ +#define per_cpu_var(var) per_cpu__##var + +/* + * Base implementations of per-CPU variable declarations and definitions, where + * the section in which the variable is to be placed is provided by the + * 'section' argument. This may be used to affect the parameters governing the + * variable's storage. + * + * NOTE! The sections for the DECLARE and for the DEFINE must match, lest + * linkage errors occur due the compiler generating the wrong code to access + * that section. + */ +#define DECLARE_PER_CPU_SECTION(type, name, section) \ + extern \ + __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name + +#define DEFINE_PER_CPU_SECTION(type, name, section) \ + __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ + PER_CPU_ATTRIBUTES PER_CPU_DEF_ATTRIBUTES \ + __typeof__(type) per_cpu__##name + +/* + * Variant on the per-CPU variable declaration/definition theme used for + * ordinary per-CPU variables. + */ +#define DECLARE_PER_CPU(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, "") + +#define DEFINE_PER_CPU(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, "") + +/* + * Declaration/definition used for per-CPU variables that must come first in + * the set of variables. + */ +#define DECLARE_PER_CPU_FIRST(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) + +#define DEFINE_PER_CPU_FIRST(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) + +/* + * Declaration/definition used for per-CPU variables that must be cacheline + * aligned under SMP conditions so that, whilst a particular instance of the + * data corresponds to a particular CPU, inefficiencies due to direct access by + * other CPUs are reduced by preventing the data from unnecessarily spanning + * cachelines. + * + * An example of this would be statistical data, where each CPU's set of data + * is updated by that CPU alone, but the data from across all CPUs is collated + * by a CPU processing a read from a proc file. + */ +#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ + ____cacheline_aligned_in_smp + +#define DECLARE_PER_CPU_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ + ____cacheline_aligned + +#define DEFINE_PER_CPU_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ + ____cacheline_aligned + +/* + * Declaration/definition used for per-CPU variables that must be page aligned. + */ +#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ + DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \ + __aligned(PAGE_SIZE) + +#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ + DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \ + __aligned(PAGE_SIZE) + +/* + * Intermodule exports for per-CPU variables. + */ +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) + + +#endif /* _LINUX_PERCPU_DEFS_H */ diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9f2a3751873..26fd9d12f05 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -5,53 +5,22 @@ #include <linux/slab.h> /* For kmalloc() */ #include <linux/smp.h> #include <linux/cpumask.h> +#include <linux/pfn.h> #include <asm/percpu.h> -#ifdef CONFIG_SMP -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) \ - PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name - -#ifdef MODULE -#define SHARED_ALIGNED_SECTION ".data.percpu" -#else -#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" -#endif - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ - PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - -#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.page_aligned"))) \ - PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name -#else -#define DEFINE_PER_CPU(type, name) \ - PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) - -#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ - DEFINE_PER_CPU(type, name) -#endif - -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - -/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ -#ifndef PERCPU_ENOUGH_ROOM +/* enough to cover all DEFINE_PER_CPUs in modules */ #ifdef CONFIG_MODULES -#define PERCPU_MODULE_RESERVE 8192 +#define PERCPU_MODULE_RESERVE (8 << 10) #else -#define PERCPU_MODULE_RESERVE 0 +#define PERCPU_MODULE_RESERVE 0 #endif +#ifndef PERCPU_ENOUGH_ROOM #define PERCPU_ENOUGH_ROOM \ - (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) -#endif /* PERCPU_ENOUGH_ROOM */ + (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ + PERCPU_MODULE_RESERVE) +#endif /* * Must be an lvalue. Since @var must be a simple identifier, @@ -65,52 +34,151 @@ #ifdef CONFIG_SMP +#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA + +/* minimum unit size, also is the maximum supported allocation size */ +#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) + +/* + * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy + * back on the first chunk for dynamic percpu allocation if arch is + * manually allocating and mapping it for faster access (as a part of + * large page mapping for example). + * + * The following values give between one and two pages of free space + * after typical minimal boot (2-way SMP, single disk and NIC) with + * both defconfig and a distro config on x86_64 and 32. More + * intelligent way to determine this would be nice. + */ +#if BITS_PER_LONG > 32 +#define PERCPU_DYNAMIC_RESERVE (20 << 10) +#else +#define PERCPU_DYNAMIC_RESERVE (12 << 10) +#endif + +extern void *pcpu_base_addr; + +typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); +typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); + +extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, + size_t static_size, size_t reserved_size, + ssize_t dyn_size, ssize_t unit_size, + void *base_addr, + pcpu_populate_pte_fn_t populate_pte_fn); + +extern ssize_t __init pcpu_embed_first_chunk( + size_t static_size, size_t reserved_size, + ssize_t dyn_size, ssize_t unit_size); + +/* + * Use this to get to a cpu's version of the per-cpu object + * dynamically allocated. Non-atomic access to the current CPU's + * version should probably be combined with get_cpu()/put_cpu(). + */ +#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) + +extern void *__alloc_reserved_percpu(size_t size, size_t align); + +#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ + struct percpu_data { void *ptrs[1]; }; +/* pointer disguising messes up the kmemleak objects tracking */ +#ifndef CONFIG_DEBUG_KMEMLEAK #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) -/* - * Use this to get to a cpu's version of the per-cpu object dynamically - * allocated. Non-atomic access to the current CPU's version should - * probably be combined with get_cpu()/put_cpu(). - */ -#define percpu_ptr(ptr, cpu) \ -({ \ - struct percpu_data *__p = __percpu_disguise(ptr); \ - (__typeof__(ptr))__p->ptrs[(cpu)]; \ +#else +#define __percpu_disguise(pdata) (struct percpu_data *)(pdata) +#endif + +#define per_cpu_ptr(ptr, cpu) \ +({ \ + struct percpu_data *__p = __percpu_disguise(ptr); \ + (__typeof__(ptr))__p->ptrs[(cpu)]; \ }) -extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); -extern void percpu_free(void *__pdata); +#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ + +extern void *__alloc_percpu(size_t size, size_t align); +extern void free_percpu(void *__pdata); #else /* CONFIG_SMP */ -#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) -static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) +static inline void *__alloc_percpu(size_t size, size_t align) { - return kzalloc(size, gfp); + /* + * Can't easily make larger alignment work with kmalloc. WARN + * on it. Larger alignment should only be used for module + * percpu sections on SMP for which this path isn't used. + */ + WARN_ON_ONCE(align > SMP_CACHE_BYTES); + return kzalloc(size, GFP_KERNEL); } -static inline void percpu_free(void *__pdata) +static inline void free_percpu(void *p) { - kfree(__pdata); + kfree(p); } #endif /* CONFIG_SMP */ -#define percpu_alloc_mask(size, gfp, mask) \ - __percpu_alloc_mask((size), (gfp), &(mask)) +#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ + __alignof__(type)) + +/* + * Optional methods for optimized non-lvalue per-cpu variable access. + * + * @var can be a percpu variable or a field of it and its size should + * equal char, int or long. percpu_read() evaluates to a lvalue and + * all others to void. + * + * These operations are guaranteed to be atomic w.r.t. preemption. + * The generic versions use plain get/put_cpu_var(). Archs are + * encouraged to implement single-instruction alternatives which don't + * require preemption protection. + */ +#ifndef percpu_read +# define percpu_read(var) \ + ({ \ + typeof(per_cpu_var(var)) __tmp_var__; \ + __tmp_var__ = get_cpu_var(var); \ + put_cpu_var(var); \ + __tmp_var__; \ + }) +#endif + +#define __percpu_generic_to_op(var, val, op) \ +do { \ + get_cpu_var(var) op val; \ + put_cpu_var(var); \ +} while (0) + +#ifndef percpu_write +# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) +#endif + +#ifndef percpu_add +# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) +#endif + +#ifndef percpu_sub +# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) +#endif -#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) +#ifndef percpu_and +# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) +#endif -/* (legacy) interface for use without CPU hotplug handling */ +#ifndef percpu_or +# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) +#endif -#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ - cpu_possible_map) -#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) -#define free_percpu(ptr) percpu_free((ptr)) -#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) +#ifndef percpu_xor +# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) +#endif #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h new file mode 100644 index 00000000000..972f90d7a32 --- /dev/null +++ b/include/linux/perf_counter.h @@ -0,0 +1,807 @@ +/* + * Performance counters: + * + * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra + * + * Data type definitions, declarations, prototypes. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_PERF_COUNTER_H +#define _LINUX_PERF_COUNTER_H + +#include <linux/types.h> +#include <linux/ioctl.h> +#include <asm/byteorder.h> + +/* + * User-space ABI bits: + */ + +/* + * attr.type + */ +enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + + PERF_TYPE_MAX, /* non-ABI */ +}; + +/* + * Generalized performance counter event types, used by the + * attr.event_id parameter of the sys_perf_counter_open() + * syscall: + */ +enum perf_hw_id { + /* + * Common hardware events, generalized by the kernel: + */ + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + + PERF_COUNT_HW_MAX, /* non-ABI */ +}; + +/* + * Generalized hardware cache counters: + * + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x + * { read, write, prefetch } x + * { accesses, misses } + */ +enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + + PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + + PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ +}; + +enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ +}; + +/* + * Special "software" counters provided by the kernel, even if the hardware + * does not support performance counters. These counters measure various + * physical and sw events of the kernel (and allow the profiling of them as + * well): + */ +enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + + PERF_COUNT_SW_MAX, /* non-ABI */ +}; + +/* + * Bits that can be set in attr.sample_type to request information + * in the overflow packets. + */ +enum perf_counter_sample_format { + PERF_SAMPLE_IP = 1U << 0, + PERF_SAMPLE_TID = 1U << 1, + PERF_SAMPLE_TIME = 1U << 2, + PERF_SAMPLE_ADDR = 1U << 3, + PERF_SAMPLE_READ = 1U << 4, + PERF_SAMPLE_CALLCHAIN = 1U << 5, + PERF_SAMPLE_ID = 1U << 6, + PERF_SAMPLE_CPU = 1U << 7, + PERF_SAMPLE_PERIOD = 1U << 8, + PERF_SAMPLE_STREAM_ID = 1U << 9, + PERF_SAMPLE_RAW = 1U << 10, + + PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ +}; + +/* + * The format of the data returned by read() on a perf counter fd, + * as specified by attr.read_format: + * + * struct read_format { + * { u64 value; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 id; } && PERF_FORMAT_ID + * } && !PERF_FORMAT_GROUP + * + * { u64 nr; + * { u64 time_enabled; } && PERF_FORMAT_ENABLED + * { u64 time_running; } && PERF_FORMAT_RUNNING + * { u64 value; + * { u64 id; } && PERF_FORMAT_ID + * } cntr[nr]; + * } && PERF_FORMAT_GROUP + * }; + */ +enum perf_counter_read_format { + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, + PERF_FORMAT_ID = 1U << 2, + PERF_FORMAT_GROUP = 1U << 3, + + PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ +}; + +#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ + +/* + * Hardware event to monitor via a performance monitoring counter: + */ +struct perf_counter_attr { + + /* + * Major type: hardware/software/tracepoint/etc. + */ + __u32 type; + + /* + * Size of the attr structure, for fwd/bwd compat. + */ + __u32 size; + + /* + * Type specific configuration information. + */ + __u64 config; + + union { + __u64 sample_period; + __u64 sample_freq; + }; + + __u64 sample_type; + __u64 read_format; + + __u64 disabled : 1, /* off by default */ + inherit : 1, /* children inherit it */ + pinned : 1, /* must always be on PMU */ + exclusive : 1, /* only group on PMU */ + exclude_user : 1, /* don't count user */ + exclude_kernel : 1, /* ditto kernel */ + exclude_hv : 1, /* ditto hypervisor */ + exclude_idle : 1, /* don't count when idle */ + mmap : 1, /* include mmap data */ + comm : 1, /* include comm data */ + freq : 1, /* use freq, not period */ + inherit_stat : 1, /* per task counts */ + enable_on_exec : 1, /* next exec enables */ + task : 1, /* trace fork/exit */ + + __reserved_1 : 50; + + __u32 wakeup_events; /* wakeup every n events */ + __u32 __reserved_2; + + __u64 __reserved_3; +}; + +/* + * Ioctls that can be done on a perf counter fd: + */ +#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) +#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) +#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) +#define PERF_COUNTER_IOC_RESET _IO ('$', 3) +#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) +#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) + +enum perf_counter_ioc_flags { + PERF_IOC_FLAG_GROUP = 1U << 0, +}; + +/* + * Structure of the page that can be mapped via mmap + */ +struct perf_counter_mmap_page { + __u32 version; /* version number of this structure */ + __u32 compat_version; /* lowest version this is compat with */ + + /* + * Bits needed to read the hw counters in user-space. + * + * u32 seq; + * s64 count; + * + * do { + * seq = pc->lock; + * + * barrier() + * if (pc->index) { + * count = pmc_read(pc->index - 1); + * count += pc->offset; + * } else + * goto regular_read; + * + * barrier(); + * } while (pc->lock != seq); + * + * NOTE: for obvious reason this only works on self-monitoring + * processes. + */ + __u32 lock; /* seqlock for synchronization */ + __u32 index; /* hardware counter identifier */ + __s64 offset; /* add to hardware counter value */ + __u64 time_enabled; /* time counter active */ + __u64 time_running; /* time counter on cpu */ + + /* + * Hole for extension of the self monitor capabilities + */ + + __u64 __reserved[123]; /* align to 1k */ + + /* + * Control data for the mmap() data buffer. + * + * User-space reading the @data_head value should issue an rmb(), on + * SMP capable platforms, after reading this value -- see + * perf_counter_wakeup(). + * + * When the mapping is PROT_WRITE the @data_tail value should be + * written by userspace to reflect the last read data. In this case + * the kernel will not over-write unread data. + */ + __u64 data_head; /* head in the data section */ + __u64 data_tail; /* user-space written tail */ +}; + +#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) +#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) +#define PERF_EVENT_MISC_KERNEL (1 << 0) +#define PERF_EVENT_MISC_USER (2 << 0) +#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) + +struct perf_event_header { + __u32 type; + __u16 misc; + __u16 size; +}; + +enum perf_event_type { + + /* + * The MMAP events record the PROT_EXEC mappings so that we can + * correlate userspace IPs to code. They have the following structure: + * + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * u64 addr; + * u64 len; + * u64 pgoff; + * char filename[]; + * }; + */ + PERF_EVENT_MMAP = 1, + + /* + * struct { + * struct perf_event_header header; + * u64 id; + * u64 lost; + * }; + */ + PERF_EVENT_LOST = 2, + + /* + * struct { + * struct perf_event_header header; + * + * u32 pid, tid; + * char comm[]; + * }; + */ + PERF_EVENT_COMM = 3, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * }; + */ + PERF_EVENT_EXIT = 4, + + /* + * struct { + * struct perf_event_header header; + * u64 time; + * u64 id; + * u64 stream_id; + * }; + */ + PERF_EVENT_THROTTLE = 5, + PERF_EVENT_UNTHROTTLE = 6, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, ppid; + * u32 tid, ptid; + * }; + */ + PERF_EVENT_FORK = 7, + + /* + * struct { + * struct perf_event_header header; + * u32 pid, tid; + * + * struct read_format values; + * }; + */ + PERF_EVENT_READ = 8, + + /* + * struct { + * struct perf_event_header header; + * + * { u64 ip; } && PERF_SAMPLE_IP + * { u32 pid, tid; } && PERF_SAMPLE_TID + * { u64 time; } && PERF_SAMPLE_TIME + * { u64 addr; } && PERF_SAMPLE_ADDR + * { u64 id; } && PERF_SAMPLE_ID + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID + * { u32 cpu, res; } && PERF_SAMPLE_CPU + * { u64 period; } && PERF_SAMPLE_PERIOD + * + * { struct read_format values; } && PERF_SAMPLE_READ + * + * { u64 nr, + * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN + * + * # + * # The RAW record below is opaque data wrt the ABI + * # + * # That is, the ABI doesn't make any promises wrt to + * # the stability of its content, it may vary depending + * # on event, hardware, kernel version and phase of + * # the moon. + * # + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. + * # + * + * { u32 size; + * char data[size];}&& PERF_SAMPLE_RAW + * }; + */ + PERF_EVENT_SAMPLE = 9, + + PERF_EVENT_MAX, /* non-ABI */ +}; + +enum perf_callchain_context { + PERF_CONTEXT_HV = (__u64)-32, + PERF_CONTEXT_KERNEL = (__u64)-128, + PERF_CONTEXT_USER = (__u64)-512, + + PERF_CONTEXT_GUEST = (__u64)-2048, + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, + PERF_CONTEXT_GUEST_USER = (__u64)-2560, + + PERF_CONTEXT_MAX = (__u64)-4095, +}; + +#define PERF_FLAG_FD_NO_GROUP (1U << 0) +#define PERF_FLAG_FD_OUTPUT (1U << 1) + +#ifdef __KERNEL__ +/* + * Kernel-internal data types and definitions: + */ + +#ifdef CONFIG_PERF_COUNTERS +# include <asm/perf_counter.h> +#endif + +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/spinlock.h> +#include <linux/hrtimer.h> +#include <linux/fs.h> +#include <linux/pid_namespace.h> +#include <asm/atomic.h> + +#define PERF_MAX_STACK_DEPTH 255 + +struct perf_callchain_entry { + __u64 nr; + __u64 ip[PERF_MAX_STACK_DEPTH]; +}; + +struct perf_raw_record { + u32 size; + void *data; +}; + +struct task_struct; + +/** + * struct hw_perf_counter - performance counter hardware details: + */ +struct hw_perf_counter { +#ifdef CONFIG_PERF_COUNTERS + union { + struct { /* hardware */ + u64 config; + unsigned long config_base; + unsigned long counter_base; + int idx; + }; + union { /* software */ + atomic64_t count; + struct hrtimer hrtimer; + }; + }; + atomic64_t prev_count; + u64 sample_period; + u64 last_period; + atomic64_t period_left; + u64 interrupts; + + u64 freq_count; + u64 freq_interrupts; + u64 freq_stamp; +#endif +}; + +struct perf_counter; + +/** + * struct pmu - generic performance monitoring unit + */ +struct pmu { + int (*enable) (struct perf_counter *counter); + void (*disable) (struct perf_counter *counter); + void (*read) (struct perf_counter *counter); + void (*unthrottle) (struct perf_counter *counter); +}; + +/** + * enum perf_counter_active_state - the states of a counter + */ +enum perf_counter_active_state { + PERF_COUNTER_STATE_ERROR = -2, + PERF_COUNTER_STATE_OFF = -1, + PERF_COUNTER_STATE_INACTIVE = 0, + PERF_COUNTER_STATE_ACTIVE = 1, +}; + +struct file; + +struct perf_mmap_data { + struct rcu_head rcu_head; + int nr_pages; /* nr of data pages */ + int writable; /* are we writable */ + int nr_locked; /* nr pages mlocked */ + + atomic_t poll; /* POLL_ for wakeups */ + atomic_t events; /* event limit */ + + atomic_long_t head; /* write position */ + atomic_long_t done_head; /* completed head */ + + atomic_t lock; /* concurrent writes */ + atomic_t wakeup; /* needs a wakeup */ + atomic_t lost; /* nr records lost */ + + struct perf_counter_mmap_page *user_page; + void *data_pages[0]; +}; + +struct perf_pending_entry { + struct perf_pending_entry *next; + void (*func)(struct perf_pending_entry *); +}; + +/** + * struct perf_counter - performance counter kernel representation: + */ +struct perf_counter { +#ifdef CONFIG_PERF_COUNTERS + struct list_head list_entry; + struct list_head event_entry; + struct list_head sibling_list; + int nr_siblings; + struct perf_counter *group_leader; + struct perf_counter *output; + const struct pmu *pmu; + + enum perf_counter_active_state state; + atomic64_t count; + + /* + * These are the total time in nanoseconds that the counter + * has been enabled (i.e. eligible to run, and the task has + * been scheduled in, if this is a per-task counter) + * and running (scheduled onto the CPU), respectively. + * + * They are computed from tstamp_enabled, tstamp_running and + * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. + */ + u64 total_time_enabled; + u64 total_time_running; + + /* + * These are timestamps used for computing total_time_enabled + * and total_time_running when the counter is in INACTIVE or + * ACTIVE state, measured in nanoseconds from an arbitrary point + * in time. + * tstamp_enabled: the notional time when the counter was enabled + * tstamp_running: the notional time when the counter was scheduled on + * tstamp_stopped: in INACTIVE state, the notional time when the + * counter was scheduled off. + */ + u64 tstamp_enabled; + u64 tstamp_running; + u64 tstamp_stopped; + + struct perf_counter_attr attr; + struct hw_perf_counter hw; + + struct perf_counter_context *ctx; + struct file *filp; + + /* + * These accumulate total time (in nanoseconds) that children + * counters have been enabled and running, respectively. + */ + atomic64_t child_total_time_enabled; + atomic64_t child_total_time_running; + + /* + * Protect attach/detach and child_list: + */ + struct mutex child_mutex; + struct list_head child_list; + struct perf_counter *parent; + + int oncpu; + int cpu; + + struct list_head owner_entry; + struct task_struct *owner; + + /* mmap bits */ + struct mutex mmap_mutex; + atomic_t mmap_count; + struct perf_mmap_data *data; + + /* poll related */ + wait_queue_head_t waitq; + struct fasync_struct *fasync; + + /* delayed work for NMIs and such */ + int pending_wakeup; + int pending_kill; + int pending_disable; + struct perf_pending_entry pending; + + atomic_t event_limit; + + void (*destroy)(struct perf_counter *); + struct rcu_head rcu_head; + + struct pid_namespace *ns; + u64 id; +#endif +}; + +/** + * struct perf_counter_context - counter context structure + * + * Used as a container for task counters and CPU counters as well: + */ +struct perf_counter_context { + /* + * Protect the states of the counters in the list, + * nr_active, and the list: + */ + spinlock_t lock; + /* + * Protect the list of counters. Locking either mutex or lock + * is sufficient to ensure the list doesn't change; to change + * the list you need to lock both the mutex and the spinlock. + */ + struct mutex mutex; + + struct list_head counter_list; + struct list_head event_list; + int nr_counters; + int nr_active; + int is_active; + int nr_stat; + atomic_t refcount; + struct task_struct *task; + + /* + * Context clock, runs when context enabled. + */ + u64 time; + u64 timestamp; + + /* + * These fields let us detect when two contexts have both + * been cloned (inherited) from a common ancestor. + */ + struct perf_counter_context *parent_ctx; + u64 parent_gen; + u64 generation; + int pin_count; + struct rcu_head rcu_head; +}; + +/** + * struct perf_counter_cpu_context - per cpu counter context structure + */ +struct perf_cpu_context { + struct perf_counter_context ctx; + struct perf_counter_context *task_ctx; + int active_oncpu; + int max_pertask; + int exclusive; + + /* + * Recursion avoidance: + * + * task, softirq, irq, nmi context + */ + int recursion[4]; +}; + +#ifdef CONFIG_PERF_COUNTERS + +/* + * Set by architecture code: + */ +extern int perf_max_counters; + +extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); + +extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); +extern void perf_counter_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu); +extern void perf_counter_task_tick(struct task_struct *task, int cpu); +extern int perf_counter_init_task(struct task_struct *child); +extern void perf_counter_exit_task(struct task_struct *child); +extern void perf_counter_free_task(struct task_struct *task); +extern void set_perf_counter_pending(void); +extern void perf_counter_do_pending(void); +extern void perf_counter_print_debug(void); +extern void __perf_disable(void); +extern bool __perf_enable(void); +extern void perf_disable(void); +extern void perf_enable(void); +extern int perf_counter_task_disable(void); +extern int perf_counter_task_enable(void); +extern int hw_perf_group_sched_in(struct perf_counter *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_counter_context *ctx, int cpu); +extern void perf_counter_update_userpage(struct perf_counter *counter); + +struct perf_sample_data { + struct pt_regs *regs; + u64 addr; + u64 period; + struct perf_raw_record *raw; +}; + +extern int perf_counter_overflow(struct perf_counter *counter, int nmi, + struct perf_sample_data *data); +extern void perf_counter_output(struct perf_counter *counter, int nmi, + struct perf_sample_data *data); + +/* + * Return 1 for a software counter, 0 for a hardware counter + */ +static inline int is_software_counter(struct perf_counter *counter) +{ + return (counter->attr.type != PERF_TYPE_RAW) && + (counter->attr.type != PERF_TYPE_HARDWARE) && + (counter->attr.type != PERF_TYPE_HW_CACHE); +} + +extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; + +extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); + +static inline void +perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) +{ + if (atomic_read(&perf_swcounter_enabled[event])) + __perf_swcounter_event(event, nr, nmi, regs, addr); +} + +extern void __perf_counter_mmap(struct vm_area_struct *vma); + +static inline void perf_counter_mmap(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_EXEC) + __perf_counter_mmap(vma); +} + +extern void perf_counter_comm(struct task_struct *tsk); +extern void perf_counter_fork(struct task_struct *tsk); + +extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); + +extern int sysctl_perf_counter_paranoid; +extern int sysctl_perf_counter_mlock; +extern int sysctl_perf_counter_sample_rate; + +extern void perf_counter_init(void); +extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, + void *record, int entry_size); + +#ifndef perf_misc_flags +#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ + PERF_EVENT_MISC_KERNEL) +#define perf_instruction_pointer(regs) instruction_pointer(regs) +#endif + +#else +static inline void +perf_counter_task_sched_in(struct task_struct *task, int cpu) { } +static inline void +perf_counter_task_sched_out(struct task_struct *task, + struct task_struct *next, int cpu) { } +static inline void +perf_counter_task_tick(struct task_struct *task, int cpu) { } +static inline int perf_counter_init_task(struct task_struct *child) { return 0; } +static inline void perf_counter_exit_task(struct task_struct *child) { } +static inline void perf_counter_free_task(struct task_struct *task) { } +static inline void perf_counter_do_pending(void) { } +static inline void perf_counter_print_debug(void) { } +static inline void perf_disable(void) { } +static inline void perf_enable(void) { } +static inline int perf_counter_task_disable(void) { return -EINVAL; } +static inline int perf_counter_task_enable(void) { return -EINVAL; } + +static inline void +perf_swcounter_event(u32 event, u64 nr, int nmi, + struct pt_regs *regs, u64 addr) { } + +static inline void perf_counter_mmap(struct vm_area_struct *vma) { } +static inline void perf_counter_comm(struct task_struct *tsk) { } +static inline void perf_counter_fork(struct task_struct *tsk) { } +static inline void perf_counter_init(void) { } +#endif + +#endif /* __KERNEL__ */ +#endif /* _LINUX_PERF_COUNTER_H */ diff --git a/include/linux/personality.h b/include/linux/personality.h index a84e9ff9b27..126120819a0 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h @@ -40,7 +40,10 @@ enum { * Security-relevant compatibility flags that must be * cleared upon setuid or setgid exec: */ -#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC|ADDR_NO_RANDOMIZE) +#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \ + ADDR_NO_RANDOMIZE | \ + ADDR_COMPAT_LAYOUT | \ + MMAP_PAGE_ZERO) /* * Personality types. diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h index 01b262959f2..228b0b6306b 100644 --- a/include/linux/pfkeyv2.h +++ b/include/linux/pfkeyv2.h @@ -12,187 +12,187 @@ #define PFKEYV2_REVISION 199806L struct sadb_msg { - uint8_t sadb_msg_version; - uint8_t sadb_msg_type; - uint8_t sadb_msg_errno; - uint8_t sadb_msg_satype; - uint16_t sadb_msg_len; - uint16_t sadb_msg_reserved; - uint32_t sadb_msg_seq; - uint32_t sadb_msg_pid; + __u8 sadb_msg_version; + __u8 sadb_msg_type; + __u8 sadb_msg_errno; + __u8 sadb_msg_satype; + __u16 sadb_msg_len; + __u16 sadb_msg_reserved; + __u32 sadb_msg_seq; + __u32 sadb_msg_pid; } __attribute__((packed)); /* sizeof(struct sadb_msg) == 16 */ struct sadb_ext { - uint16_t sadb_ext_len; - uint16_t sadb_ext_type; + __u16 sadb_ext_len; + __u16 sadb_ext_type; } __attribute__((packed)); /* sizeof(struct sadb_ext) == 4 */ struct sadb_sa { - uint16_t sadb_sa_len; - uint16_t sadb_sa_exttype; + __u16 sadb_sa_len; + __u16 sadb_sa_exttype; __be32 sadb_sa_spi; - uint8_t sadb_sa_replay; - uint8_t sadb_sa_state; - uint8_t sadb_sa_auth; - uint8_t sadb_sa_encrypt; - uint32_t sadb_sa_flags; + __u8 sadb_sa_replay; + __u8 sadb_sa_state; + __u8 sadb_sa_auth; + __u8 sadb_sa_encrypt; + __u32 sadb_sa_flags; } __attribute__((packed)); /* sizeof(struct sadb_sa) == 16 */ struct sadb_lifetime { - uint16_t sadb_lifetime_len; - uint16_t sadb_lifetime_exttype; - uint32_t sadb_lifetime_allocations; - uint64_t sadb_lifetime_bytes; - uint64_t sadb_lifetime_addtime; - uint64_t sadb_lifetime_usetime; + __u16 sadb_lifetime_len; + __u16 sadb_lifetime_exttype; + __u32 sadb_lifetime_allocations; + __u64 sadb_lifetime_bytes; + __u64 sadb_lifetime_addtime; + __u64 sadb_lifetime_usetime; } __attribute__((packed)); /* sizeof(struct sadb_lifetime) == 32 */ struct sadb_address { - uint16_t sadb_address_len; - uint16_t sadb_address_exttype; - uint8_t sadb_address_proto; - uint8_t sadb_address_prefixlen; - uint16_t sadb_address_reserved; + __u16 sadb_address_len; + __u16 sadb_address_exttype; + __u8 sadb_address_proto; + __u8 sadb_address_prefixlen; + __u16 sadb_address_reserved; } __attribute__((packed)); /* sizeof(struct sadb_address) == 8 */ struct sadb_key { - uint16_t sadb_key_len; - uint16_t sadb_key_exttype; - uint16_t sadb_key_bits; - uint16_t sadb_key_reserved; + __u16 sadb_key_len; + __u16 sadb_key_exttype; + __u16 sadb_key_bits; + __u16 sadb_key_reserved; } __attribute__((packed)); /* sizeof(struct sadb_key) == 8 */ struct sadb_ident { - uint16_t sadb_ident_len; - uint16_t sadb_ident_exttype; - uint16_t sadb_ident_type; - uint16_t sadb_ident_reserved; - uint64_t sadb_ident_id; + __u16 sadb_ident_len; + __u16 sadb_ident_exttype; + __u16 sadb_ident_type; + __u16 sadb_ident_reserved; + __u64 sadb_ident_id; } __attribute__((packed)); /* sizeof(struct sadb_ident) == 16 */ struct sadb_sens { - uint16_t sadb_sens_len; - uint16_t sadb_sens_exttype; - uint32_t sadb_sens_dpd; - uint8_t sadb_sens_sens_level; - uint8_t sadb_sens_sens_len; - uint8_t sadb_sens_integ_level; - uint8_t sadb_sens_integ_len; - uint32_t sadb_sens_reserved; + __u16 sadb_sens_len; + __u16 sadb_sens_exttype; + __u32 sadb_sens_dpd; + __u8 sadb_sens_sens_level; + __u8 sadb_sens_sens_len; + __u8 sadb_sens_integ_level; + __u8 sadb_sens_integ_len; + __u32 sadb_sens_reserved; } __attribute__((packed)); /* sizeof(struct sadb_sens) == 16 */ /* followed by: - uint64_t sadb_sens_bitmap[sens_len]; - uint64_t sadb_integ_bitmap[integ_len]; */ + __u64 sadb_sens_bitmap[sens_len]; + __u64 sadb_integ_bitmap[integ_len]; */ struct sadb_prop { - uint16_t sadb_prop_len; - uint16_t sadb_prop_exttype; - uint8_t sadb_prop_replay; - uint8_t sadb_prop_reserved[3]; + __u16 sadb_prop_len; + __u16 sadb_prop_exttype; + __u8 sadb_prop_replay; + __u8 sadb_prop_reserved[3]; } __attribute__((packed)); /* sizeof(struct sadb_prop) == 8 */ /* followed by: struct sadb_comb sadb_combs[(sadb_prop_len + - sizeof(uint64_t) - sizeof(struct sadb_prop)) / + sizeof(__u64) - sizeof(struct sadb_prop)) / sizeof(struct sadb_comb)]; */ struct sadb_comb { - uint8_t sadb_comb_auth; - uint8_t sadb_comb_encrypt; - uint16_t sadb_comb_flags; - uint16_t sadb_comb_auth_minbits; - uint16_t sadb_comb_auth_maxbits; - uint16_t sadb_comb_encrypt_minbits; - uint16_t sadb_comb_encrypt_maxbits; - uint32_t sadb_comb_reserved; - uint32_t sadb_comb_soft_allocations; - uint32_t sadb_comb_hard_allocations; - uint64_t sadb_comb_soft_bytes; - uint64_t sadb_comb_hard_bytes; - uint64_t sadb_comb_soft_addtime; - uint64_t sadb_comb_hard_addtime; - uint64_t sadb_comb_soft_usetime; - uint64_t sadb_comb_hard_usetime; + __u8 sadb_comb_auth; + __u8 sadb_comb_encrypt; + __u16 sadb_comb_flags; + __u16 sadb_comb_auth_minbits; + __u16 sadb_comb_auth_maxbits; + __u16 sadb_comb_encrypt_minbits; + __u16 sadb_comb_encrypt_maxbits; + __u32 sadb_comb_reserved; + __u32 sadb_comb_soft_allocations; + __u32 sadb_comb_hard_allocations; + __u64 sadb_comb_soft_bytes; + __u64 sadb_comb_hard_bytes; + __u64 sadb_comb_soft_addtime; + __u64 sadb_comb_hard_addtime; + __u64 sadb_comb_soft_usetime; + __u64 sadb_comb_hard_usetime; } __attribute__((packed)); /* sizeof(struct sadb_comb) == 72 */ struct sadb_supported { - uint16_t sadb_supported_len; - uint16_t sadb_supported_exttype; - uint32_t sadb_supported_reserved; + __u16 sadb_supported_len; + __u16 sadb_supported_exttype; + __u32 sadb_supported_reserved; } __attribute__((packed)); /* sizeof(struct sadb_supported) == 8 */ /* followed by: struct sadb_alg sadb_algs[(sadb_supported_len + - sizeof(uint64_t) - sizeof(struct sadb_supported)) / + sizeof(__u64) - sizeof(struct sadb_supported)) / sizeof(struct sadb_alg)]; */ struct sadb_alg { - uint8_t sadb_alg_id; - uint8_t sadb_alg_ivlen; - uint16_t sadb_alg_minbits; - uint16_t sadb_alg_maxbits; - uint16_t sadb_alg_reserved; + __u8 sadb_alg_id; + __u8 sadb_alg_ivlen; + __u16 sadb_alg_minbits; + __u16 sadb_alg_maxbits; + __u16 sadb_alg_reserved; } __attribute__((packed)); /* sizeof(struct sadb_alg) == 8 */ struct sadb_spirange { - uint16_t sadb_spirange_len; - uint16_t sadb_spirange_exttype; - uint32_t sadb_spirange_min; - uint32_t sadb_spirange_max; - uint32_t sadb_spirange_reserved; + __u16 sadb_spirange_len; + __u16 sadb_spirange_exttype; + __u32 sadb_spirange_min; + __u32 sadb_spirange_max; + __u32 sadb_spirange_reserved; } __attribute__((packed)); /* sizeof(struct sadb_spirange) == 16 */ struct sadb_x_kmprivate { - uint16_t sadb_x_kmprivate_len; - uint16_t sadb_x_kmprivate_exttype; - uint32_t sadb_x_kmprivate_reserved; + __u16 sadb_x_kmprivate_len; + __u16 sadb_x_kmprivate_exttype; + __u32 sadb_x_kmprivate_reserved; } __attribute__((packed)); /* sizeof(struct sadb_x_kmprivate) == 8 */ struct sadb_x_sa2 { - uint16_t sadb_x_sa2_len; - uint16_t sadb_x_sa2_exttype; - uint8_t sadb_x_sa2_mode; - uint8_t sadb_x_sa2_reserved1; - uint16_t sadb_x_sa2_reserved2; - uint32_t sadb_x_sa2_sequence; - uint32_t sadb_x_sa2_reqid; + __u16 sadb_x_sa2_len; + __u16 sadb_x_sa2_exttype; + __u8 sadb_x_sa2_mode; + __u8 sadb_x_sa2_reserved1; + __u16 sadb_x_sa2_reserved2; + __u32 sadb_x_sa2_sequence; + __u32 sadb_x_sa2_reqid; } __attribute__((packed)); /* sizeof(struct sadb_x_sa2) == 16 */ struct sadb_x_policy { - uint16_t sadb_x_policy_len; - uint16_t sadb_x_policy_exttype; - uint16_t sadb_x_policy_type; - uint8_t sadb_x_policy_dir; - uint8_t sadb_x_policy_reserved; - uint32_t sadb_x_policy_id; - uint32_t sadb_x_policy_priority; + __u16 sadb_x_policy_len; + __u16 sadb_x_policy_exttype; + __u16 sadb_x_policy_type; + __u8 sadb_x_policy_dir; + __u8 sadb_x_policy_reserved; + __u32 sadb_x_policy_id; + __u32 sadb_x_policy_priority; } __attribute__((packed)); /* sizeof(struct sadb_x_policy) == 16 */ struct sadb_x_ipsecrequest { - uint16_t sadb_x_ipsecrequest_len; - uint16_t sadb_x_ipsecrequest_proto; - uint8_t sadb_x_ipsecrequest_mode; - uint8_t sadb_x_ipsecrequest_level; - uint16_t sadb_x_ipsecrequest_reserved1; - uint32_t sadb_x_ipsecrequest_reqid; - uint32_t sadb_x_ipsecrequest_reserved2; + __u16 sadb_x_ipsecrequest_len; + __u16 sadb_x_ipsecrequest_proto; + __u8 sadb_x_ipsecrequest_mode; + __u8 sadb_x_ipsecrequest_level; + __u16 sadb_x_ipsecrequest_reserved1; + __u32 sadb_x_ipsecrequest_reqid; + __u32 sadb_x_ipsecrequest_reserved2; } __attribute__((packed)); /* sizeof(struct sadb_x_ipsecrequest) == 16 */ @@ -200,38 +200,38 @@ struct sadb_x_ipsecrequest { * type of NAT-T is supported, draft-ietf-ipsec-udp-encaps-06 */ struct sadb_x_nat_t_type { - uint16_t sadb_x_nat_t_type_len; - uint16_t sadb_x_nat_t_type_exttype; - uint8_t sadb_x_nat_t_type_type; - uint8_t sadb_x_nat_t_type_reserved[3]; + __u16 sadb_x_nat_t_type_len; + __u16 sadb_x_nat_t_type_exttype; + __u8 sadb_x_nat_t_type_type; + __u8 sadb_x_nat_t_type_reserved[3]; } __attribute__((packed)); /* sizeof(struct sadb_x_nat_t_type) == 8 */ /* Pass a NAT Traversal port (Source or Dest port) */ struct sadb_x_nat_t_port { - uint16_t sadb_x_nat_t_port_len; - uint16_t sadb_x_nat_t_port_exttype; + __u16 sadb_x_nat_t_port_len; + __u16 sadb_x_nat_t_port_exttype; __be16 sadb_x_nat_t_port_port; - uint16_t sadb_x_nat_t_port_reserved; + __u16 sadb_x_nat_t_port_reserved; } __attribute__((packed)); /* sizeof(struct sadb_x_nat_t_port) == 8 */ /* Generic LSM security context */ struct sadb_x_sec_ctx { - uint16_t sadb_x_sec_len; - uint16_t sadb_x_sec_exttype; - uint8_t sadb_x_ctx_alg; /* LSMs: e.g., selinux == 1 */ - uint8_t sadb_x_ctx_doi; - uint16_t sadb_x_ctx_len; + __u16 sadb_x_sec_len; + __u16 sadb_x_sec_exttype; + __u8 sadb_x_ctx_alg; /* LSMs: e.g., selinux == 1 */ + __u8 sadb_x_ctx_doi; + __u16 sadb_x_ctx_len; } __attribute__((packed)); /* sizeof(struct sadb_sec_ctx) = 8 */ /* Used by MIGRATE to pass addresses IKE will use to perform * negotiation with the peer */ struct sadb_x_kmaddress { - uint16_t sadb_x_kmaddress_len; - uint16_t sadb_x_kmaddress_exttype; - uint32_t sadb_x_kmaddress_reserved; + __u16 sadb_x_kmaddress_len; + __u16 sadb_x_kmaddress_exttype; + __u32 sadb_x_kmaddress_reserved; } __attribute__((packed)); /* sizeof(struct sadb_x_kmaddress) == 8 */ diff --git a/include/linux/phonet.h b/include/linux/phonet.h index 4157faa857b..1ef5a078183 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h @@ -23,6 +23,8 @@ #ifndef LINUX_PHONET_H #define LINUX_PHONET_H +#include <linux/types.h> + /* Automatic protocol selection */ #define PN_PROTO_TRANSPORT 0 /* Phonet datagram socket */ @@ -97,6 +99,9 @@ struct sockaddr_pn { __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; } __attribute__ ((packed)); +/* Well known address */ +#define PN_DEV_PC 0x10 + static inline __u16 pn_object(__u8 addr, __u16 port) { return (addr << 8) | (port & 0x3ff); @@ -168,4 +173,21 @@ static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn) return spn->spn_resource; } +/* Phonet device ioctl requests */ +#ifdef __KERNEL__ +#define SIOCPNGAUTOCONF (SIOCDEVPRIVATE + 0) + +struct if_phonet_autoconf { + uint8_t device; +}; + +struct if_phonet_req { + char ifr_phonet_name[16]; + union { + struct if_phonet_autoconf ifru_phonet_autoconf; + } ifr_ifru; +}; +#define ifr_phonet_autoconf ifr_ifru.ifru_phonet_autoconf +#endif /* __KERNEL__ */ + #endif diff --git a/include/linux/phy.h b/include/linux/phy.h index d7e54d98869..b1368b8f657 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -79,7 +79,7 @@ typedef enum { * Need to be a little smaller than phydev->dev.bus_id to leave room * for the ":%02x" */ -#define MII_BUS_ID_SIZE (BUS_ID_SIZE - 3) +#define MII_BUS_ID_SIZE (20 - 3) /* * The Bus class for PHYs. Devices which provide access to @@ -315,8 +315,7 @@ struct phy_device { /* Interrupt and Polling infrastructure */ struct work_struct phy_queue; - struct work_struct state_queue; - struct timer_list phy_timer; + struct delayed_work state_queue; atomic_t irq_disable; struct mutex lock; @@ -389,6 +388,12 @@ struct phy_driver { /* Enables or disables interrupts */ int (*config_intr)(struct phy_device *phydev); + /* + * Checks if the PHY generated an interrupt. + * For multi-PHY devices with shared PHY interrupt pin + */ + int (*did_interrupt)(struct phy_device *phydev); + /* Clears up any memory if needed */ void (*remove)(struct phy_device *phydev); @@ -402,7 +407,7 @@ struct phy_driver { /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; - char bus_id[BUS_ID_SIZE]; + char bus_id[20]; u32 phy_uid; u32 phy_uid_mask; int (*run)(struct phy_device *phydev); @@ -439,10 +444,16 @@ static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val) int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id); struct phy_device* get_phy_device(struct mii_bus *bus, int addr); +int phy_device_register(struct phy_device *phy); int phy_clear_interrupt(struct phy_device *phydev); int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); +int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, + u32 flags, phy_interface_t interface); struct phy_device * phy_attach(struct net_device *dev, const char *bus_id, u32 flags, phy_interface_t interface); +int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, + void (*handler)(struct net_device *), u32 flags, + phy_interface_t interface); struct phy_device * phy_connect(struct net_device *dev, const char *bus_id, void (*handler)(struct net_device *), u32 flags, phy_interface_t interface); diff --git a/include/linux/pim.h b/include/linux/pim.h index 1ba0661561a..252bf6644c5 100644 --- a/include/linux/pim.h +++ b/include/linux/pim.h @@ -4,14 +4,14 @@ #include <asm/byteorder.h> /* Message types - V1 */ -#define PIM_V1_VERSION __constant_htonl(0x10000000) +#define PIM_V1_VERSION cpu_to_be32(0x10000000) #define PIM_V1_REGISTER 1 /* Message types - V2 */ #define PIM_VERSION 2 #define PIM_REGISTER 1 -#define PIM_NULL_REGISTER __constant_htonl(0x40000000) +#define PIM_NULL_REGISTER cpu_to_be32(0x40000000) /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ struct pimreghdr diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 8e4120285f7..b43a9e03905 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -134,6 +134,11 @@ struct pipe_buf_operations { memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ #define PIPE_SIZE PAGE_SIZE +/* Pipe lock and unlock operations */ +void pipe_lock(struct pipe_inode_info *); +void pipe_unlock(struct pipe_inode_info *); +void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); + /* Drop the inode semaphore and wait for a pipe event, atomically */ void pipe_wait(struct pipe_inode_info *pipe); @@ -147,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); +void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); #endif diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index e6aa8482ad7..3c842edff38 100644 --- a/include/linux/pkt_cls.h +++ b/include/linux/pkt_cls.h @@ -1,6 +1,7 @@ #ifndef __LINUX_PKT_CLS_H #define __LINUX_PKT_CLS_H +#include <linux/types.h> #include <linux/pkt_sched.h> /* I think i could have done better macros ; for now this is stolen from diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h index e3f133adba7..d51a2b3e221 100644 --- a/include/linux/pkt_sched.h +++ b/include/linux/pkt_sched.h @@ -1,6 +1,8 @@ #ifndef __LINUX_PKT_SCHED_H #define __LINUX_PKT_SCHED_H +#include <linux/types.h> + /* Logical priority bands not depending on specific packet scheduler. Every scheduler will map them to real traffic classes, if it has no more precise mechanism to classify packets. @@ -513,7 +515,7 @@ enum struct tc_drr_stats { - u32 deficit; + __u32 deficit; }; #endif diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 04b4d7330e6..d745f5b6c7b 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -113,6 +113,7 @@ struct pkt_ctrl_command { #include <linux/cdrom.h> #include <linux/kobject.h> #include <linux/sysfs.h> +#include <linux/mempool.h> /* default bio write queue congestion marks */ #define PKT_WRITE_CONGESTION_ON 10000 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 9a342699c60..8dc5123b630 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -12,6 +12,7 @@ #define _PLATFORM_DEVICE_H_ #include <linux/device.h> +#include <linux/mod_devicetable.h> struct platform_device { const char * name; @@ -19,8 +20,12 @@ struct platform_device { struct device dev; u32 num_resources; struct resource * resource; + + struct platform_device_id *id_entry; }; +#define platform_get_device_id(pdev) ((pdev)->id_entry) + #define to_platform_device(x) container_of((x), struct platform_device, dev) extern int platform_device_register(struct platform_device *); @@ -31,8 +36,8 @@ extern struct device platform_bus; extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); extern int platform_get_irq(struct platform_device *, unsigned int); -extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, char *); -extern int platform_get_irq_byname(struct platform_device *, char *); +extern struct resource *platform_get_resource_byname(struct platform_device *, unsigned int, const char *); +extern int platform_get_irq_byname(struct platform_device *, const char *); extern int platform_add_devices(struct platform_device **, int); extern struct platform_device *platform_device_register_simple(const char *, int id, @@ -56,6 +61,7 @@ struct platform_driver { int (*resume_early)(struct platform_device *); int (*resume)(struct platform_device *); struct device_driver driver; + struct platform_device_id *id_table; }; extern int platform_driver_register(struct platform_driver *); @@ -70,4 +76,46 @@ extern int platform_driver_probe(struct platform_driver *driver, #define platform_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev) #define platform_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data)) +/* early platform driver interface */ +struct early_platform_driver { + const char *class_str; + struct platform_driver *pdrv; + struct list_head list; + int requested_id; +}; + +#define EARLY_PLATFORM_ID_UNSET -2 +#define EARLY_PLATFORM_ID_ERROR -3 + +extern int early_platform_driver_register(struct early_platform_driver *epdrv, + char *buf); +extern void early_platform_add_devices(struct platform_device **devs, int num); + +static inline int is_early_platform_device(struct platform_device *pdev) +{ + return !pdev->dev.driver; +} + +extern void early_platform_driver_register_all(char *class_str); +extern int early_platform_driver_probe(char *class_str, + int nr_probe, int user_only); +extern void early_platform_cleanup(void); + + +#ifndef MODULE +#define early_platform_init(class_string, platform_driver) \ +static __initdata struct early_platform_driver early_driver = { \ + .class_str = class_string, \ + .pdrv = platform_driver, \ + .requested_id = EARLY_PLATFORM_ID_UNSET, \ +}; \ +static int __init early_platform_driver_setup_func(char *buf) \ +{ \ + return early_platform_driver_register(&early_driver, buf); \ +} \ +early_param(class_string, early_platform_driver_setup_func) +#else /* MODULE */ +#define early_platform_init(class_string, platform_driver) +#endif /* MODULE */ + #endif /* _PLATFORM_DEVICE_H_ */ diff --git a/include/linux/plist.h b/include/linux/plist.h index 85de2f05587..45926d77d6a 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -96,6 +96,10 @@ struct plist_node { # define PLIST_HEAD_LOCK_INIT(_lock) #endif +#define _PLIST_HEAD_INIT(head) \ + .prio_list = LIST_HEAD_INIT((head).prio_list), \ + .node_list = LIST_HEAD_INIT((head).node_list) + /** * PLIST_HEAD_INIT - static struct plist_head initializer * @head: struct plist_head variable name @@ -103,8 +107,7 @@ struct plist_node { */ #define PLIST_HEAD_INIT(head, _lock) \ { \ - .prio_list = LIST_HEAD_INIT((head).prio_list), \ - .node_list = LIST_HEAD_INIT((head).node_list), \ + _PLIST_HEAD_INIT(head), \ PLIST_HEAD_LOCK_INIT(&(_lock)) \ } @@ -116,7 +119,7 @@ struct plist_node { #define PLIST_NODE_INIT(node, __prio) \ { \ .prio = (__prio), \ - .plist = PLIST_HEAD_INIT((node).plist, NULL), \ + .plist = { _PLIST_HEAD_INIT((node).plist) }, \ } /** diff --git a/include/linux/pm.h b/include/linux/pm.h index de2e0a8f672..b3f74764a58 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -381,13 +381,14 @@ struct dev_pm_info { #ifdef CONFIG_PM_SLEEP extern void device_pm_lock(void); -extern void device_power_up(pm_message_t state); -extern void device_resume(pm_message_t state); +extern int sysdev_resume(void); +extern void dpm_resume_noirq(pm_message_t state); +extern void dpm_resume_end(pm_message_t state); extern void device_pm_unlock(void); -extern int device_power_down(pm_message_t state); -extern int device_suspend(pm_message_t state); -extern int device_prepare_suspend(pm_message_t state); +extern int sysdev_suspend(pm_message_t state); +extern int dpm_suspend_noirq(pm_message_t state); +extern int dpm_suspend_start(pm_message_t state); extern void __suspend_report_result(const char *function, void *fn, int ret); @@ -398,7 +399,10 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); #else /* !CONFIG_PM_SLEEP */ -static inline int device_suspend(pm_message_t state) +#define device_pm_lock() do {} while (0) +#define device_pm_unlock() do {} while (0) + +static inline int dpm_suspend_start(pm_message_t state) { return 0; } @@ -407,6 +411,14 @@ static inline int device_suspend(pm_message_t state) #endif /* !CONFIG_PM_SLEEP */ +/* How to reorder dpm_list after device_move() */ +enum dpm_order { + DPM_ORDER_NONE, + DPM_ORDER_DEV_AFTER_PARENT, + DPM_ORDER_PARENT_BEFORE_DEV, + DPM_ORDER_DEV_LAST, +}; + /* * Global Power Management flags * Used to keep APM and ACPI from both being active diff --git a/include/linux/pnp.h b/include/linux/pnp.h index ca3c8877302..b063c7328ba 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -446,6 +446,7 @@ int pnp_start_dev(struct pnp_dev *dev); int pnp_stop_dev(struct pnp_dev *dev); int pnp_activate_dev(struct pnp_dev *dev); int pnp_disable_dev(struct pnp_dev *dev); +int pnp_range_reserved(resource_size_t start, resource_size_t end); /* protocol helpers */ int pnp_is_active(struct pnp_dev *dev); @@ -476,6 +477,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; } static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; } +static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;} /* protocol helpers */ static inline int pnp_is_active(struct pnp_dev *dev) { return 0; } diff --git a/include/linux/poison.h b/include/linux/poison.h index 9f31683728f..6729f7dcd60 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -17,6 +17,9 @@ */ #define TIMER_ENTRY_STATIC ((void *) 0x74737461) +/********** mm/debug-pagealloc.c **********/ +#define PAGE_POISON 0xaa + /********** mm/slab.c **********/ /* * Magic nums for obj red zoning. diff --git a/include/linux/poll.h b/include/linux/poll.h index 8c24ef8d997..fa287f25138 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -32,6 +32,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_ typedef struct poll_table_struct { poll_queue_proc qproc; + unsigned long key; } poll_table; static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) @@ -43,10 +44,12 @@ static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_addres static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) { pt->qproc = qproc; + pt->key = ~0UL; /* all events enabled */ } struct poll_table_entry { struct file *filp; + unsigned long key; wait_queue_t wait; wait_queue_head_t *wait_address; }; diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 4bc241290c2..065a3652a3e 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -83,4 +83,78 @@ extern int posix_acl_chmod_masq(struct posix_acl *, mode_t); extern struct posix_acl *get_posix_acl(struct inode *, int); extern int set_posix_acl(struct inode *, int, struct posix_acl *); +#ifdef CONFIG_FS_POSIX_ACL +static inline struct posix_acl *get_cached_acl(struct inode *inode, int type) +{ + struct posix_acl **p, *acl; + switch (type) { + case ACL_TYPE_ACCESS: + p = &inode->i_acl; + break; + case ACL_TYPE_DEFAULT: + p = &inode->i_default_acl; + break; + default: + return ERR_PTR(-EINVAL); + } + acl = ACCESS_ONCE(*p); + if (acl) { + spin_lock(&inode->i_lock); + acl = *p; + if (acl != ACL_NOT_CACHED) + acl = posix_acl_dup(acl); + spin_unlock(&inode->i_lock); + } + return acl; +} + +static inline void set_cached_acl(struct inode *inode, + int type, + struct posix_acl *acl) +{ + struct posix_acl *old = NULL; + spin_lock(&inode->i_lock); + switch (type) { + case ACL_TYPE_ACCESS: + old = inode->i_acl; + inode->i_acl = posix_acl_dup(acl); + break; + case ACL_TYPE_DEFAULT: + old = inode->i_default_acl; + inode->i_default_acl = posix_acl_dup(acl); + break; + } + spin_unlock(&inode->i_lock); + if (old != ACL_NOT_CACHED) + posix_acl_release(old); +} + +static inline void forget_cached_acl(struct inode *inode, int type) +{ + struct posix_acl *old = NULL; + spin_lock(&inode->i_lock); + switch (type) { + case ACL_TYPE_ACCESS: + old = inode->i_acl; + inode->i_acl = ACL_NOT_CACHED; + break; + case ACL_TYPE_DEFAULT: + old = inode->i_default_acl; + inode->i_default_acl = ACL_NOT_CACHED; + break; + } + spin_unlock(&inode->i_lock); + if (old != ACL_NOT_CACHED) + posix_acl_release(old); +} +#endif + +static inline void cache_no_acl(struct inode *inode) +{ +#ifdef CONFIG_FS_POSIX_ACL + inode->i_acl = NULL; + inode->i_default_acl = NULL; +#endif +} + #endif /* __LINUX_POSIX_ACL_H */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 8ff25e0e7f7..594c494ac3f 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -73,6 +73,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_POWER_NOW, + POWER_SUPPLY_PROP_POWER_AVG, POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, POWER_SUPPLY_PROP_CHARGE_FULL, diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index a942892d6df..0d3fa63e90e 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -22,6 +22,7 @@ #include <linux/list.h> #include <linux/skbuff.h> #include <linux/poll.h> +#include <net/net_namespace.h> struct ppp_channel; @@ -39,8 +40,8 @@ struct ppp_channel { int mtu; /* max transmit packet size */ int hdrlen; /* amount of headroom channel needs */ void *ppp; /* opaque to channel */ - /* the following are not used at present */ int speed; /* transfer rate (bytes/second) */ + /* the following is not used at present */ int latency; /* overhead time in milliseconds */ }; @@ -56,6 +57,9 @@ extern void ppp_input(struct ppp_channel *, struct sk_buff *); that we may have missed a packet. */ extern void ppp_input_error(struct ppp_channel *, int code); +/* Attach a channel to a given PPP unit in specified net. */ +extern int ppp_register_net_channel(struct net *, struct ppp_channel *); + /* Attach a channel to a given PPP unit. */ extern int ppp_register_channel(struct ppp_channel *); diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h index 6e8adc77522..0f93ed6b4a8 100644 --- a/include/linux/ppp_defs.h +++ b/include/linux/ppp_defs.h @@ -25,6 +25,8 @@ * OR MODIFICATIONS. */ +#include <linux/types.h> + /* * ==FILEVERSION 20000114== * @@ -175,8 +177,8 @@ struct ppp_comp_stats { * the last NP packet was sent or received. */ struct ppp_idle { - time_t xmit_idle; /* time since last NP packet sent */ - time_t recv_idle; /* time since last NP packet received */ + __kernel_time_t xmit_idle; /* time since last NP packet sent */ + __kernel_time_t recv_idle; /* time since last NP packet received */ }; #endif /* _PPP_DEFS_H_ */ diff --git a/include/linux/pps.h b/include/linux/pps.h new file mode 100644 index 00000000000..0194ab06177 --- /dev/null +++ b/include/linux/pps.h @@ -0,0 +1,124 @@ +/* + * PPS API header + * + * Copyright (C) 2005-2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#ifndef _PPS_H_ +#define _PPS_H_ + +#include <linux/types.h> + +#define PPS_VERSION "5.3.6" +#define PPS_MAX_SOURCES 16 /* should be enough... */ + +/* Implementation note: the logical states ``assert'' and ``clear'' + * are implemented in terms of the chip register, i.e. ``assert'' + * means the bit is set. */ + +/* + * 3.2 New data structures + */ + +#define PPS_API_VERS_1 1 +#define PPS_API_VERS PPS_API_VERS_1 /* we use API version 1 */ +#define PPS_MAX_NAME_LEN 32 + +/* 32-bit vs. 64-bit compatibility. + * + * 0n i386, the alignment of a uint64_t is only 4 bytes, while on most other + * architectures it's 8 bytes. On i386, there will be no padding between the + * two consecutive 'struct pps_ktime' members of struct pps_kinfo and struct + * pps_kparams. But on most platforms there will be padding to ensure correct + * alignment. + * + * The simple fix is probably to add an explicit padding. + * [David Woodhouse] + */ +struct pps_ktime { + __s64 sec; + __s32 nsec; + __u32 flags; +}; +#define PPS_TIME_INVALID (1<<0) /* used to specify timeout==NULL */ + +struct pps_kinfo { + __u32 assert_sequence; /* seq. num. of assert event */ + __u32 clear_sequence; /* seq. num. of clear event */ + struct pps_ktime assert_tu; /* time of assert event */ + struct pps_ktime clear_tu; /* time of clear event */ + int current_mode; /* current mode bits */ +}; + +struct pps_kparams { + int api_version; /* API version # */ + int mode; /* mode bits */ + struct pps_ktime assert_off_tu; /* offset compensation for assert */ + struct pps_ktime clear_off_tu; /* offset compensation for clear */ +}; + +/* + * 3.3 Mode bit definitions + */ + +/* Device/implementation parameters */ +#define PPS_CAPTUREASSERT 0x01 /* capture assert events */ +#define PPS_CAPTURECLEAR 0x02 /* capture clear events */ +#define PPS_CAPTUREBOTH 0x03 /* capture assert and clear events */ + +#define PPS_OFFSETASSERT 0x10 /* apply compensation for assert ev. */ +#define PPS_OFFSETCLEAR 0x20 /* apply compensation for clear ev. */ + +#define PPS_CANWAIT 0x100 /* can we wait for an event? */ +#define PPS_CANPOLL 0x200 /* bit reserved for future use */ + +/* Kernel actions */ +#define PPS_ECHOASSERT 0x40 /* feed back assert event to output */ +#define PPS_ECHOCLEAR 0x80 /* feed back clear event to output */ + +/* Timestamp formats */ +#define PPS_TSFMT_TSPEC 0x1000 /* select timespec format */ +#define PPS_TSFMT_NTPFP 0x2000 /* select NTP format */ + +/* + * 3.4.4 New functions: disciplining the kernel timebase + */ + +/* Kernel consumers */ +#define PPS_KC_HARDPPS 0 /* hardpps() (or equivalent) */ +#define PPS_KC_HARDPPS_PLL 1 /* hardpps() constrained to + use a phase-locked loop */ +#define PPS_KC_HARDPPS_FLL 2 /* hardpps() constrained to + use a frequency-locked loop */ +/* + * Here begins the implementation-specific part! + */ + +struct pps_fdata { + struct pps_kinfo info; + struct pps_ktime timeout; +}; + +#include <linux/ioctl.h> + +#define PPS_GETPARAMS _IOR('p', 0xa1, struct pps_kparams *) +#define PPS_SETPARAMS _IOW('p', 0xa2, struct pps_kparams *) +#define PPS_GETCAP _IOR('p', 0xa3, int *) +#define PPS_FETCH _IOWR('p', 0xa4, struct pps_fdata *) + +#endif /* _PPS_H_ */ diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h new file mode 100644 index 00000000000..e0a193f830e --- /dev/null +++ b/include/linux/pps_kernel.h @@ -0,0 +1,89 @@ +/* + * PPS API kernel header + * + * Copyright (C) 2009 Rodolfo Giometti <giometti@linux.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/pps.h> + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/time.h> + +/* + * Global defines + */ + +/* The specific PPS source info */ +struct pps_source_info { + char name[PPS_MAX_NAME_LEN]; /* simbolic name */ + char path[PPS_MAX_NAME_LEN]; /* path of connected device */ + int mode; /* PPS's allowed mode */ + + void (*echo)(int source, int event, void *data); /* PPS echo function */ + + struct module *owner; + struct device *dev; +}; + +/* The main struct */ +struct pps_device { + struct pps_source_info info; /* PSS source info */ + + struct pps_kparams params; /* PPS's current params */ + + __u32 assert_sequence; /* PPS' assert event seq # */ + __u32 clear_sequence; /* PPS' clear event seq # */ + struct pps_ktime assert_tu; + struct pps_ktime clear_tu; + int current_mode; /* PPS mode at event time */ + + int go; /* PPS event is arrived? */ + wait_queue_head_t queue; /* PPS event queue */ + + unsigned int id; /* PPS source unique ID */ + struct cdev cdev; + struct device *dev; + int devno; + struct fasync_struct *async_queue; /* fasync method */ + spinlock_t lock; + + atomic_t usage; /* usage count */ +}; + +/* + * Global variables + */ + +extern spinlock_t pps_idr_lock; +extern struct idr pps_idr; +extern struct timespec pps_irq_ts[]; + +extern struct device_attribute pps_attrs[]; + +/* + * Exported functions + */ + +struct pps_device *pps_get_source(int source); +extern void pps_put_source(struct pps_device *pps); +extern int pps_register_source(struct pps_source_info *info, + int default_params); +extern void pps_unregister_source(int source); +extern int pps_register_cdev(struct pps_device *pps); +extern void pps_unregister_cdev(struct pps_device *pps); +extern void pps_event(int source, struct pps_ktime *ts, int event, void *data); diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 48d887e3c6e..b00df4c79c6 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h @@ -85,4 +85,7 @@ #define PR_SET_TIMERSLACK 29 #define PR_GET_TIMERSLACK 30 +#define PR_TASK_PERF_COUNTERS_DISABLE 31 +#define PR_TASK_PERF_COUNTERS_ENABLE 32 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index b8bdb96eff7..e6e77d31c41 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -41,9 +41,6 @@ enum { * while parent/subdir create the directory structure (every * /proc file has a parent, but "subdir" is NULL for all * non-directory entries). - * - * "owner" is used to protect module - * from unloading while proc_dir_entry is in use */ typedef int (read_proc_t)(char *page, char **start, off_t off, @@ -70,7 +67,6 @@ struct proc_dir_entry { * somewhere. */ const struct file_operations *proc_fops; - struct module *owner; struct proc_dir_entry *next, *parent, *subdir; void *data; read_proc_t *read_proc; @@ -97,20 +93,9 @@ struct vmcore { #ifdef CONFIG_PROC_FS -extern spinlock_t proc_subdir_lock; - extern void proc_root_init(void); void proc_flush_task(struct task_struct *task); -struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); -int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); -unsigned long task_vsize(struct mm_struct *); -int task_statm(struct mm_struct *, int *, int *, int *, int *); -void task_mem(struct seq_file *, struct mm_struct *); -void clear_refs_smap(struct mm_struct *mm); - -struct proc_dir_entry *de_get(struct proc_dir_entry *de); -void de_put(struct proc_dir_entry *de); extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent); @@ -120,20 +105,7 @@ struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, void *data); extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); -extern struct vfsmount *proc_mnt; struct pid_namespace; -extern int proc_fill_super(struct super_block *); -extern struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *); - -/* - * These are generic /proc routines that use the internal - * "struct proc_dir_entry" tree to traverse the filesystem. - * - * The /proc root directory has extended versions to take care - * of the /proc/<pid> subdirectories. - */ -extern int proc_readdir(struct file *, void *, filldir_t); -extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); extern int pid_ns_prepare_proc(struct pid_namespace *ns); extern void pid_ns_release_proc(struct pid_namespace *ns); diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 98b93ca4db0..7456d7d87a1 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -81,7 +81,6 @@ extern long arch_ptrace(struct task_struct *child, long request, long addr, long data); -extern struct task_struct *ptrace_get_task_struct(pid_t pid); extern int ptrace_traceme(void); extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); @@ -94,7 +93,7 @@ extern void ptrace_notify(int exit_code); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent); extern void __ptrace_unlink(struct task_struct *child); -extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); +extern void exit_ptrace(struct task_struct *tracer); #define PTRACE_MODE_READ 1 #define PTRACE_MODE_ATTACH 2 /* Returns 0 on success, -errno on denial. */ @@ -326,15 +325,6 @@ static inline void user_enable_block_step(struct task_struct *task) #define arch_ptrace_untrace(task) do { } while (0) #endif -#ifndef arch_ptrace_fork -/* - * Do machine-specific work to initialize a new task. - * - * This is called from copy_process(). - */ -#define arch_ptrace_fork(child, clone_flags) do { } while (0) -#endif - extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 3945f803d51..7c775751392 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm); */ void pwm_disable(struct pwm_device *pwm); -#endif /* __ASM_ARCH_PWM_H */ +#endif /* __LINUX_PWM_H */ diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h index 787d19ea9f4..8b9aee1a9ce 100644 --- a/include/linux/qnx4_fs.h +++ b/include/linux/qnx4_fs.h @@ -85,65 +85,4 @@ struct qnx4_super_block { struct qnx4_inode_entry AltBoot; }; -#ifdef __KERNEL__ - -#define QNX4_DEBUG 0 - -#if QNX4_DEBUG -#define QNX4DEBUG(X) printk X -#else -#define QNX4DEBUG(X) (void) 0 -#endif - -struct qnx4_sb_info { - struct buffer_head *sb_buf; /* superblock buffer */ - struct qnx4_super_block *sb; /* our superblock */ - unsigned int Version; /* may be useful */ - struct qnx4_inode_entry *BitMap; /* useful */ -}; - -struct qnx4_inode_info { - struct qnx4_inode_entry raw; - loff_t mmu_private; - struct inode vfs_inode; -}; - -extern struct inode *qnx4_iget(struct super_block *, unsigned long); -extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd); -extern unsigned long qnx4_count_free_blocks(struct super_block *sb); -extern unsigned long qnx4_block_map(struct inode *inode, long iblock); - -extern struct buffer_head *qnx4_bread(struct inode *, int, int); - -extern const struct inode_operations qnx4_file_inode_operations; -extern const struct inode_operations qnx4_dir_inode_operations; -extern const struct file_operations qnx4_file_operations; -extern const struct file_operations qnx4_dir_operations; -extern int qnx4_is_free(struct super_block *sb, long block); -extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy); -extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd); -extern void qnx4_truncate(struct inode *inode); -extern void qnx4_free_inode(struct inode *inode); -extern int qnx4_unlink(struct inode *dir, struct dentry *dentry); -extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry); -extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int); -extern int qnx4_sync_inode(struct inode *inode); - -static inline struct qnx4_sb_info *qnx4_sb(struct super_block *sb) -{ - return sb->s_fs_info; -} - -static inline struct qnx4_inode_info *qnx4_i(struct inode *inode) -{ - return container_of(inode, struct qnx4_inode_info, vfs_inode); -} - -static inline struct qnx4_inode_entry *qnx4_raw_inode(struct inode *inode) -{ - return &qnx4_i(inode)->raw; -} - -#endif /* __KERNEL__ */ - #endif diff --git a/include/linux/quota.h b/include/linux/quota.h index d72d5d84fde..78c48895b12 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -198,6 +198,7 @@ struct mem_dqblk { qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */ qsize_t dqb_curspace; /* current used space */ + qsize_t dqb_rsvspace; /* current reserved space for delalloc*/ qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */ qsize_t dqb_isoftlimit; /* preferred inode limit */ qsize_t dqb_curinodes; /* current # allocated inodes */ @@ -276,8 +277,6 @@ struct dquot { struct mem_dqblk dq_dqb; /* Diskquota usage */ }; -#define NODQUOT (struct dquot *)NULL - #define QUOTA_OK 0 #define NO_QUOTA 1 @@ -308,6 +307,14 @@ struct dquot_operations { int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ + /* reserve quota for delayed block allocation */ + int (*reserve_space) (struct inode *, qsize_t, int); + /* claim reserved quota for delayed alloc */ + int (*claim_space) (struct inode *, qsize_t); + /* release rsved quota for delayed alloc */ + void (*release_rsv) (struct inode *, qsize_t); + /* get reserved quota for delayed alloc */ + qsize_t (*get_reserved_space) (struct inode *); }; /* Operations handling requests from userspace */ diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 21b781a3350..26361c4c037 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h @@ -7,7 +7,6 @@ #ifndef _LINUX_QUOTAOPS_ #define _LINUX_QUOTAOPS_ -#include <linux/smp_lock.h> #include <linux/fs.h> static inline struct quota_info *sb_dqopt(struct super_block *sb) @@ -20,14 +19,17 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) /* * declaration of quota_function calls in kernel. */ -void sync_dquots(struct super_block *sb, int type); +void sync_quota_sb(struct super_block *sb, int type); +static inline void writeout_quota_sb(struct super_block *sb, int type) +{ + if (sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, type); +} int dquot_initialize(struct inode *inode, int type); int dquot_drop(struct inode *inode); -int dquot_drop_locked(struct inode *inode); struct dquot *dqget(struct super_block *sb, unsigned int id, int type); void dqput(struct dquot *dquot); -int dquot_is_cached(struct super_block *sb, unsigned int id, int type); int dquot_scan_active(struct super_block *sb, int (*fn)(struct dquot *dquot, unsigned long priv), unsigned long priv); @@ -37,6 +39,11 @@ void dquot_destroy(struct dquot *dquot); int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); int dquot_alloc_inode(const struct inode *inode, qsize_t number); +int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc); +int dquot_claim_space(struct inode *inode, qsize_t number); +void dquot_release_reserved_space(struct inode *inode, qsize_t number); +qsize_t dquot_get_reserved_space(struct inode *inode); + int dquot_free_space(struct inode *inode, qsize_t number); int dquot_free_inode(const struct inode *inode, qsize_t number); @@ -185,6 +192,16 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) return ret; } +static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) { + /* Used space is updated in alloc_space() */ + if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA) + return 1; + } + return 0; +} + static inline int vfs_dq_alloc_inode(struct inode *inode) { if (sb_any_quota_active(inode->i_sb)) { @@ -195,6 +212,31 @@ static inline int vfs_dq_alloc_inode(struct inode *inode) return 0; } +/* + * Convert in-memory reserved quotas to real consumed quotas + */ +static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) { + if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) + return 1; + } else + inode_add_bytes(inode, nr); + + mark_inode_dirty(inode); + return 0; +} + +/* + * Release reserved (in-memory) quotas + */ +static inline +void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) +{ + if (sb_any_quota_active(inode->i_sb)) + inode->i_sb->dq_op->release_rsv(inode, nr); +} + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { if (sb_any_quota_active(inode->i_sb)) @@ -215,12 +257,7 @@ static inline void vfs_dq_free_inode(struct inode *inode) inode->i_sb->dq_op->free_inode(inode, 1); } -/* The following two functions cannot be called inside a transaction */ -static inline void vfs_dq_sync(struct super_block *sb) -{ - sync_dquots(sb, -1); -} - +/* Cannot be called inside a transaction */ static inline int vfs_dq_off(struct super_block *sb, int remount) { int ret = -ENOSYS; @@ -296,7 +333,11 @@ static inline void vfs_dq_free_inode(struct inode *inode) { } -static inline void vfs_dq_sync(struct super_block *sb) +static inline void sync_quota_sb(struct super_block *sb, int type) +{ +} + +static inline void writeout_quota_sb(struct super_block *sb, int type) { } @@ -341,6 +382,22 @@ static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) return 0; } +static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) +{ + return 0; +} + +static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) +{ + return vfs_dq_alloc_space(inode, nr); +} + +static inline +int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) +{ + return 0; +} + static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) { inode_sub_bytes(inode, nr); @@ -356,67 +413,48 @@ static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr) { - return vfs_dq_prealloc_space_nodirty(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits); } static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr) { - return vfs_dq_prealloc_space(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits); } static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr) { - return vfs_dq_alloc_space_nodirty(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits); } static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) { - return vfs_dq_alloc_space(inode, - nr << inode->i_sb->s_blocksize_bits); + return vfs_dq_alloc_space(inode, nr << inode->i_blkbits); +} + +static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr) +{ + return vfs_dq_reserve_space(inode, nr << inode->i_blkbits); +} + +static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr) +{ + return vfs_dq_claim_space(inode, nr << inode->i_blkbits); +} + +static inline +void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr) +{ + vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits); } static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) { - vfs_dq_free_space_nodirty(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits); } static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) { - vfs_dq_free_space(inode, nr << inode->i_sb->s_blocksize_bits); + vfs_dq_free_space(inode, nr << inode->i_blkbits); } -/* - * Define uppercase equivalents for compatibility with old function names - * Can go away when we think all users have been converted (15/04/2008) - */ -#define DQUOT_INIT(inode) vfs_dq_init(inode) -#define DQUOT_DROP(inode) vfs_dq_drop(inode) -#define DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr) \ - vfs_dq_prealloc_space_nodirty(inode, nr) -#define DQUOT_PREALLOC_SPACE(inode, nr) vfs_dq_prealloc_space(inode, nr) -#define DQUOT_ALLOC_SPACE_NODIRTY(inode, nr) \ - vfs_dq_alloc_space_nodirty(inode, nr) -#define DQUOT_ALLOC_SPACE(inode, nr) vfs_dq_alloc_space(inode, nr) -#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr) \ - vfs_dq_prealloc_block_nodirty(inode, nr) -#define DQUOT_PREALLOC_BLOCK(inode, nr) vfs_dq_prealloc_block(inode, nr) -#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) \ - vfs_dq_alloc_block_nodirty(inode, nr) -#define DQUOT_ALLOC_BLOCK(inode, nr) vfs_dq_alloc_block(inode, nr) -#define DQUOT_ALLOC_INODE(inode) vfs_dq_alloc_inode(inode) -#define DQUOT_FREE_SPACE_NODIRTY(inode, nr) \ - vfs_dq_free_space_nodirty(inode, nr) -#define DQUOT_FREE_SPACE(inode, nr) vfs_dq_free_space(inode, nr) -#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) \ - vfs_dq_free_block_nodirty(inode, nr) -#define DQUOT_FREE_BLOCK(inode, nr) vfs_dq_free_block(inode, nr) -#define DQUOT_FREE_INODE(inode) vfs_dq_free_inode(inode) -#define DQUOT_TRANSFER(inode, iattr) vfs_dq_transfer(inode, iattr) -#define DQUOT_SYNC(sb) vfs_dq_sync(sb) -#define DQUOT_OFF(sb, remount) vfs_dq_off(sb, remount) -#define DQUOT_ON_REMOUNT(sb) vfs_dq_quota_on_remount(sb) - #endif /* _LINUX_QUOTAOPS_ */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 355f6e80db0..c5da7491809 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -167,6 +167,8 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long first_index, unsigned int max_items); unsigned long radix_tree_next_hole(struct radix_tree_root *root, unsigned long index, unsigned long max_scan); +unsigned long radix_tree_prev_hole(struct radix_tree_root *root, + unsigned long index, unsigned long max_scan); int radix_tree_preload(gfp_t gfp_mask); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h deleted file mode 100644 index e98900671ca..00000000000 --- a/include/linux/raid/bitmap.h +++ /dev/null @@ -1,288 +0,0 @@ -/* - * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 - * - * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. - */ -#ifndef BITMAP_H -#define BITMAP_H 1 - -#define BITMAP_MAJOR_LO 3 -/* version 4 insists the bitmap is in little-endian order - * with version 3, it is host-endian which is non-portable - */ -#define BITMAP_MAJOR_HI 4 -#define BITMAP_MAJOR_HOSTENDIAN 3 - -#define BITMAP_MINOR 39 - -/* - * in-memory bitmap: - * - * Use 16 bit block counters to track pending writes to each "chunk". - * The 2 high order bits are special-purpose, the first is a flag indicating - * whether a resync is needed. The second is a flag indicating whether a - * resync is active. - * This means that the counter is actually 14 bits: - * - * +--------+--------+------------------------------------------------+ - * | resync | resync | counter | - * | needed | active | | - * | (0-1) | (0-1) | (0-16383) | - * +--------+--------+------------------------------------------------+ - * - * The "resync needed" bit is set when: - * a '1' bit is read from storage at startup. - * a write request fails on some drives - * a resync is aborted on a chunk with 'resync active' set - * It is cleared (and resync-active set) when a resync starts across all drives - * of the chunk. - * - * - * The "resync active" bit is set when: - * a resync is started on all drives, and resync_needed is set. - * resync_needed will be cleared (as long as resync_active wasn't already set). - * It is cleared when a resync completes. - * - * The counter counts pending write requests, plus the on-disk bit. - * When the counter is '1' and the resync bits are clear, the on-disk - * bit can be cleared aswell, thus setting the counter to 0. - * When we set a bit, or in the counter (to start a write), if the fields is - * 0, we first set the disk bit and set the counter to 1. - * - * If the counter is 0, the on-disk bit is clear and the stipe is clean - * Anything that dirties the stipe pushes the counter to 2 (at least) - * and sets the on-disk bit (lazily). - * If a periodic sweep find the counter at 2, it is decremented to 1. - * If the sweep find the counter at 1, the on-disk bit is cleared and the - * counter goes to zero. - * - * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block - * counters as a fallback when "page" memory cannot be allocated: - * - * Normal case (page memory allocated): - * - * page pointer (32-bit) - * - * [ ] ------+ - * | - * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) - * c1 c2 c2048 - * - * Hijacked case (page memory allocation failed): - * - * hijacked page pointer (32-bit) - * - * [ ][ ] (no page memory allocated) - * counter #1 (16-bit) counter #2 (16-bit) - * - */ - -#ifdef __KERNEL__ - -#define PAGE_BITS (PAGE_SIZE << 3) -#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) - -typedef __u16 bitmap_counter_t; -#define COUNTER_BITS 16 -#define COUNTER_BIT_SHIFT 4 -#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8) -#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) - -#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) -#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) -#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) -#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) -#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) -#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) - -/* how many counters per page? */ -#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) -/* same, except a shift value for more efficient bitops */ -#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) -/* same, except a mask value for more efficient bitops */ -#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) - -#define BITMAP_BLOCK_SIZE 512 -#define BITMAP_BLOCK_SHIFT 9 - -/* how many blocks per chunk? (this is variable) */ -#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT) -#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT) -#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1) - -/* when hijacked, the counters and bits represent even larger "chunks" */ -/* there will be 1024 chunks represented by each counter in the page pointers */ -#define PAGEPTR_BLOCK_RATIO(bitmap) \ - (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1) -#define PAGEPTR_BLOCK_SHIFT(bitmap) \ - (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1) -#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1) - -/* - * on-disk bitmap: - * - * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap - * file a page at a time. There's a superblock at the start of the file. - */ - -/* map chunks (bits) to file pages - offset by the size of the superblock */ -#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3)) - -#endif - -/* - * bitmap structures: - */ - -#define BITMAP_MAGIC 0x6d746962 - -/* use these for bitmap->flags and bitmap->sb->state bit-fields */ -enum bitmap_state { - BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ - BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */ - BITMAP_HOSTENDIAN = 0x8000, -}; - -/* the superblock at the front of the bitmap file -- little endian */ -typedef struct bitmap_super_s { - __le32 magic; /* 0 BITMAP_MAGIC */ - __le32 version; /* 4 the bitmap major for now, could change... */ - __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */ - __le64 events; /* 24 event counter for the bitmap (1)*/ - __le64 events_cleared;/*32 event counter when last bit cleared (2) */ - __le64 sync_size; /* 40 the size of the md device's sync range(3) */ - __le32 state; /* 48 bitmap state information */ - __le32 chunksize; /* 52 the bitmap chunk size in bytes */ - __le32 daemon_sleep; /* 56 seconds between disk flushes */ - __le32 write_behind; /* 60 number of outstanding write-behind writes */ - - __u8 pad[256 - 64]; /* set to zero */ -} bitmap_super_t; - -/* notes: - * (1) This event counter is updated before the eventcounter in the md superblock - * When a bitmap is loaded, it is only accepted if this event counter is equal - * to, or one greater than, the event counter in the superblock. - * (2) This event counter is updated when the other one is *if*and*only*if* the - * array is not degraded. As bits are not cleared when the array is degraded, - * this represents the last time that any bits were cleared. - * If a device is being added that has an event count with this value or - * higher, it is accepted as conforming to the bitmap. - * (3)This is the number of sectors represented by the bitmap, and is the range that - * resync happens across. For raid1 and raid5/6 it is the size of individual - * devices. For raid10 it is the size of the array. - */ - -#ifdef __KERNEL__ - -/* the in-memory bitmap is represented by bitmap_pages */ -struct bitmap_page { - /* - * map points to the actual memory page - */ - char *map; - /* - * in emergencies (when map cannot be alloced), hijack the map - * pointer and use it as two counters itself - */ - unsigned int hijacked:1; - /* - * count of dirty bits on the page - */ - unsigned int count:31; -}; - -/* keep track of bitmap file pages that have pending writes on them */ -struct page_list { - struct list_head list; - struct page *page; -}; - -/* the main bitmap structure - one per mddev */ -struct bitmap { - struct bitmap_page *bp; - unsigned long pages; /* total number of pages in the bitmap */ - unsigned long missing_pages; /* number of pages not yet allocated */ - - mddev_t *mddev; /* the md device that the bitmap is for */ - - int counter_bits; /* how many bits per block counter */ - - /* bitmap chunksize -- how much data does each bit represent? */ - unsigned long chunksize; - unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ - unsigned long chunks; /* total number of data chunks for the array */ - - /* We hold a count on the chunk currently being synced, and drop - * it when the last block is started. If the resync is aborted - * midway, we need to be able to drop that count, so we remember - * the counted chunk.. - */ - unsigned long syncchunk; - - __u64 events_cleared; - int need_sync; - - /* bitmap spinlock */ - spinlock_t lock; - - long offset; /* offset from superblock if file is NULL */ - struct file *file; /* backing disk file */ - struct page *sb_page; /* cached copy of the bitmap file superblock */ - struct page **filemap; /* list of cache pages for the file */ - unsigned long *filemap_attr; /* attributes associated w/ filemap pages */ - unsigned long file_pages; /* number of pages in the file */ - int last_page_size; /* bytes in the last page */ - - unsigned long flags; - - int allclean; - - unsigned long max_write_behind; /* write-behind mode */ - atomic_t behind_writes; - - /* - * the bitmap daemon - periodically wakes up and sweeps the bitmap - * file, cleaning up bits and flushing out pages to disk as necessary - */ - unsigned long daemon_lastrun; /* jiffies of last run */ - unsigned long daemon_sleep; /* how many seconds between updates? */ - unsigned long last_end_sync; /* when we lasted called end_sync to - * update bitmap with resync progress */ - - atomic_t pending_writes; /* pending writes to the bitmap file */ - wait_queue_head_t write_wait; - wait_queue_head_t overflow_wait; - -}; - -/* the bitmap API */ - -/* these are used only by md/bitmap */ -int bitmap_create(mddev_t *mddev); -void bitmap_flush(mddev_t *mddev); -void bitmap_destroy(mddev_t *mddev); - -void bitmap_print_sb(struct bitmap *bitmap); -void bitmap_update_sb(struct bitmap *bitmap); - -int bitmap_setallbits(struct bitmap *bitmap); -void bitmap_write_all(struct bitmap *bitmap); - -void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e); - -/* these are exported */ -int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, - unsigned long sectors, int behind); -void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, - unsigned long sectors, int success, int behind); -int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded); -void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); -void bitmap_close_sync(struct bitmap *bitmap); -void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); - -void bitmap_unplug(struct bitmap *bitmap); -void bitmap_daemon_work(struct bitmap *bitmap); -#endif - -#endif diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h deleted file mode 100644 index f38b9c586af..00000000000 --- a/include/linux/raid/linear.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _LINEAR_H -#define _LINEAR_H - -#include <linux/raid/md.h> - -struct dev_info { - mdk_rdev_t *rdev; - sector_t num_sectors; - sector_t start_sector; -}; - -typedef struct dev_info dev_info_t; - -struct linear_private_data -{ - struct linear_private_data *prev; /* earlier version */ - dev_info_t **hash_table; - sector_t spacing; - sector_t array_sectors; - int sector_shift; /* shift before dividing - * by spacing - */ - dev_info_t disks[0]; -}; - - -typedef struct linear_private_data linear_conf_t; - -#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private) - -#endif diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h deleted file mode 100644 index 82bea14cae1..00000000000 --- a/include/linux/raid/md.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - md.h : Multiple Devices driver for Linux - Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman - Copyright (C) 1994-96 Marc ZYNGIER - <zyngier@ufr-info-p7.ibp.fr> or - <maz@gloups.fdn.fr> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - You should have received a copy of the GNU General Public License - (for example /usr/src/linux/COPYING); if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ - -#ifndef _MD_H -#define _MD_H - -#include <linux/blkdev.h> -#include <linux/seq_file.h> - -/* - * 'md_p.h' holds the 'physical' layout of RAID devices - * 'md_u.h' holds the user <=> kernel API - * - * 'md_k.h' holds kernel internal definitions - */ - -#include <linux/raid/md_p.h> -#include <linux/raid/md_u.h> -#include <linux/raid/md_k.h> - -#ifdef CONFIG_MD - -/* - * Different major versions are not compatible. - * Different minor versions are only downward compatible. - * Different patchlevel versions are downward and upward compatible. - */ -#define MD_MAJOR_VERSION 0 -#define MD_MINOR_VERSION 90 -/* - * MD_PATCHLEVEL_VERSION indicates kernel functionality. - * >=1 means different superblock formats are selectable using SET_ARRAY_INFO - * and major_version/minor_version accordingly - * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT - * in the super status byte - * >=3 means that bitmap superblock version 4 is supported, which uses - * little-ending representation rather than host-endian - */ -#define MD_PATCHLEVEL_VERSION 3 - -extern int mdp_major; - -extern int register_md_personality(struct mdk_personality *p); -extern int unregister_md_personality(struct mdk_personality *p); -extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), - mddev_t *mddev, const char *name); -extern void md_unregister_thread(mdk_thread_t *thread); -extern void md_wakeup_thread(mdk_thread_t *thread); -extern void md_check_recovery(mddev_t *mddev); -extern void md_write_start(mddev_t *mddev, struct bio *bi); -extern void md_write_end(mddev_t *mddev); -extern void md_done_sync(mddev_t *mddev, int blocks, int ok); -extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); - -extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, - sector_t sector, int size, struct page *page); -extern void md_super_wait(mddev_t *mddev); -extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, - struct page *page, int rw); -extern void md_do_sync(mddev_t *mddev); -extern void md_new_event(mddev_t *mddev); -extern int md_allow_write(mddev_t *mddev); -extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); - -#endif /* CONFIG_MD */ -#endif - diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h deleted file mode 100644 index 9743e4dbc91..00000000000 --- a/include/linux/raid/md_k.h +++ /dev/null @@ -1,402 +0,0 @@ -/* - md_k.h : kernel internal structure of the Linux MD driver - Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - You should have received a copy of the GNU General Public License - (for example /usr/src/linux/COPYING); if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ - -#ifndef _MD_K_H -#define _MD_K_H - -/* and dm-bio-list.h is not under include/linux because.... ??? */ -#include "../../../drivers/md/dm-bio-list.h" - -#ifdef CONFIG_BLOCK - -#define LEVEL_MULTIPATH (-4) -#define LEVEL_LINEAR (-1) -#define LEVEL_FAULTY (-5) - -/* we need a value for 'no level specified' and 0 - * means 'raid0', so we need something else. This is - * for internal use only - */ -#define LEVEL_NONE (-1000000) - -#define MaxSector (~(sector_t)0) - -typedef struct mddev_s mddev_t; -typedef struct mdk_rdev_s mdk_rdev_t; - -/* - * options passed in raidrun: - */ - -/* Currently this must fit in an 'int' */ -#define MAX_CHUNK_SIZE (1<<30) - -/* - * MD's 'extended' device - */ -struct mdk_rdev_s -{ - struct list_head same_set; /* RAID devices within the same set */ - - sector_t size; /* Device size (in blocks) */ - mddev_t *mddev; /* RAID array if running */ - long last_events; /* IO event timestamp */ - - struct block_device *bdev; /* block device handle */ - - struct page *sb_page; - int sb_loaded; - __u64 sb_events; - sector_t data_offset; /* start of data in array */ - sector_t sb_start; /* offset of the super block (in 512byte sectors) */ - int sb_size; /* bytes in the superblock */ - int preferred_minor; /* autorun support */ - - struct kobject kobj; - - /* A device can be in one of three states based on two flags: - * Not working: faulty==1 in_sync==0 - * Fully working: faulty==0 in_sync==1 - * Working, but not - * in sync with array - * faulty==0 in_sync==0 - * - * It can never have faulty==1, in_sync==1 - * This reduces the burden of testing multiple flags in many cases - */ - - unsigned long flags; -#define Faulty 1 /* device is known to have a fault */ -#define In_sync 2 /* device is in_sync with rest of array */ -#define WriteMostly 4 /* Avoid reading if at all possible */ -#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ -#define AllReserved 6 /* If whole device is reserved for - * one array */ -#define AutoDetected 7 /* added by auto-detect */ -#define Blocked 8 /* An error occured on an externally - * managed array, don't allow writes - * until it is cleared */ -#define StateChanged 9 /* Faulty or Blocked has changed during - * interrupt, so it needs to be - * notified by the thread */ - wait_queue_head_t blocked_wait; - - int desc_nr; /* descriptor index in the superblock */ - int raid_disk; /* role of device in array */ - int saved_raid_disk; /* role that device used to have in the - * array and could again if we did a partial - * resync from the bitmap - */ - sector_t recovery_offset;/* If this device has been partially - * recovered, this is where we were - * up to. - */ - - atomic_t nr_pending; /* number of pending requests. - * only maintained for arrays that - * support hot removal - */ - atomic_t read_errors; /* number of consecutive read errors that - * we have tried to ignore. - */ - atomic_t corrected_errors; /* number of corrected read errors, - * for reporting to userspace and storing - * in superblock. - */ - struct work_struct del_work; /* used for delayed sysfs removal */ - - struct sysfs_dirent *sysfs_state; /* handle for 'state' - * sysfs entry */ -}; - -struct mddev_s -{ - void *private; - struct mdk_personality *pers; - dev_t unit; - int md_minor; - struct list_head disks; - unsigned long flags; -#define MD_CHANGE_DEVS 0 /* Some device status has changed */ -#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ -#define MD_CHANGE_PENDING 2 /* superblock update in progress */ - - int ro; - - struct gendisk *gendisk; - - struct kobject kobj; - int hold_active; -#define UNTIL_IOCTL 1 -#define UNTIL_STOP 2 - - /* Superblock information */ - int major_version, - minor_version, - patch_version; - int persistent; - int external; /* metadata is - * managed externally */ - char metadata_type[17]; /* externally set*/ - int chunk_size; - time_t ctime, utime; - int level, layout; - char clevel[16]; - int raid_disks; - int max_disks; - sector_t size; /* used size of component devices */ - sector_t array_sectors; /* exported array size */ - __u64 events; - - char uuid[16]; - - /* If the array is being reshaped, we need to record the - * new shape and an indication of where we are up to. - * This is written to the superblock. - * If reshape_position is MaxSector, then no reshape is happening (yet). - */ - sector_t reshape_position; - int delta_disks, new_level, new_layout, new_chunk; - - struct mdk_thread_s *thread; /* management thread */ - struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ - sector_t curr_resync; /* last block scheduled */ - unsigned long resync_mark; /* a recent timestamp */ - sector_t resync_mark_cnt;/* blocks written at resync_mark */ - sector_t curr_mark_cnt; /* blocks scheduled now */ - - sector_t resync_max_sectors; /* may be set by personality */ - - sector_t resync_mismatches; /* count of sectors where - * parity/replica mismatch found - */ - - /* allow user-space to request suspension of IO to regions of the array */ - sector_t suspend_lo; - sector_t suspend_hi; - /* if zero, use the system-wide default */ - int sync_speed_min; - int sync_speed_max; - - /* resync even though the same disks are shared among md-devices */ - int parallel_resync; - - int ok_start_degraded; - /* recovery/resync flags - * NEEDED: we might need to start a resync/recover - * RUNNING: a thread is running, or about to be started - * SYNC: actually doing a resync, not a recovery - * RECOVER: doing recovery, or need to try it. - * INTR: resync needs to be aborted for some reason - * DONE: thread is done and is waiting to be reaped - * REQUEST: user-space has requested a sync (used with SYNC) - * CHECK: user-space request for for check-only, no repair - * RESHAPE: A reshape is happening - * - * If neither SYNC or RESHAPE are set, then it is a recovery. - */ -#define MD_RECOVERY_RUNNING 0 -#define MD_RECOVERY_SYNC 1 -#define MD_RECOVERY_RECOVER 2 -#define MD_RECOVERY_INTR 3 -#define MD_RECOVERY_DONE 4 -#define MD_RECOVERY_NEEDED 5 -#define MD_RECOVERY_REQUESTED 6 -#define MD_RECOVERY_CHECK 7 -#define MD_RECOVERY_RESHAPE 8 -#define MD_RECOVERY_FROZEN 9 - - unsigned long recovery; - int recovery_disabled; /* if we detect that recovery - * will always fail, set this - * so we don't loop trying */ - - int in_sync; /* know to not need resync */ - struct mutex reconfig_mutex; - atomic_t active; /* general refcount */ - atomic_t openers; /* number of active opens */ - - int changed; /* true if we might need to reread partition info */ - int degraded; /* whether md should consider - * adding a spare - */ - int barriers_work; /* initialised to true, cleared as soon - * as a barrier request to slave - * fails. Only supported - */ - struct bio *biolist; /* bios that need to be retried - * because BIO_RW_BARRIER is not supported - */ - - atomic_t recovery_active; /* blocks scheduled, but not written */ - wait_queue_head_t recovery_wait; - sector_t recovery_cp; - sector_t resync_min; /* user requested sync - * starts here */ - sector_t resync_max; /* resync should pause - * when it gets here */ - - struct sysfs_dirent *sysfs_state; /* handle for 'array_state' - * file in sysfs. - */ - struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ - - struct work_struct del_work; /* used for delayed sysfs removal */ - - spinlock_t write_lock; - wait_queue_head_t sb_wait; /* for waiting on superblock updates */ - atomic_t pending_writes; /* number of active superblock writes */ - - unsigned int safemode; /* if set, update "clean" superblock - * when no writes pending. - */ - unsigned int safemode_delay; - struct timer_list safemode_timer; - atomic_t writes_pending; - struct request_queue *queue; /* for plugging ... */ - - atomic_t write_behind; /* outstanding async IO */ - unsigned int max_write_behind; /* 0 = sync */ - - struct bitmap *bitmap; /* the bitmap for the device */ - struct file *bitmap_file; /* the bitmap file */ - long bitmap_offset; /* offset from superblock of - * start of bitmap. May be - * negative, but not '0' - */ - long default_bitmap_offset; /* this is the offset to use when - * hot-adding a bitmap. It should - * eventually be settable by sysfs. - */ - - struct list_head all_mddevs; -}; - - -static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) -{ - int faulty = test_bit(Faulty, &rdev->flags); - if (atomic_dec_and_test(&rdev->nr_pending) && faulty) - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); -} - -static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) -{ - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); -} - -struct mdk_personality -{ - char *name; - int level; - struct list_head list; - struct module *owner; - int (*make_request)(struct request_queue *q, struct bio *bio); - int (*run)(mddev_t *mddev); - int (*stop)(mddev_t *mddev); - void (*status)(struct seq_file *seq, mddev_t *mddev); - /* error_handler must set ->faulty and clear ->in_sync - * if appropriate, and should abort recovery if needed - */ - void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); - int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); - int (*hot_remove_disk) (mddev_t *mddev, int number); - int (*spare_active) (mddev_t *mddev); - sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); - int (*resize) (mddev_t *mddev, sector_t sectors); - int (*check_reshape) (mddev_t *mddev); - int (*start_reshape) (mddev_t *mddev); - int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); - /* quiesce moves between quiescence states - * 0 - fully active - * 1 - no new requests allowed - * others - reserved - */ - void (*quiesce) (mddev_t *mddev, int state); -}; - - -struct md_sysfs_entry { - struct attribute attr; - ssize_t (*show)(mddev_t *, char *); - ssize_t (*store)(mddev_t *, const char *, size_t); -}; - - -static inline char * mdname (mddev_t * mddev) -{ - return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; -} - -/* - * iterates through some rdev ringlist. It's safe to remove the - * current 'rdev'. Dont touch 'tmp' though. - */ -#define rdev_for_each_list(rdev, tmp, head) \ - list_for_each_entry_safe(rdev, tmp, head, same_set) - -/* - * iterates through the 'same array disks' ringlist - */ -#define rdev_for_each(rdev, tmp, mddev) \ - list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) - -#define rdev_for_each_rcu(rdev, mddev) \ - list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) - -typedef struct mdk_thread_s { - void (*run) (mddev_t *mddev); - mddev_t *mddev; - wait_queue_head_t wqueue; - unsigned long flags; - struct task_struct *tsk; - unsigned long timeout; -} mdk_thread_t; - -#define THREAD_WAKEUP 0 - -#define __wait_event_lock_irq(wq, condition, lock, cmd) \ -do { \ - wait_queue_t __wait; \ - init_waitqueue_entry(&__wait, current); \ - \ - add_wait_queue(&wq, &__wait); \ - for (;;) { \ - set_current_state(TASK_UNINTERRUPTIBLE); \ - if (condition) \ - break; \ - spin_unlock_irq(&lock); \ - cmd; \ - schedule(); \ - spin_lock_irq(&lock); \ - } \ - current->state = TASK_RUNNING; \ - remove_wait_queue(&wq, &__wait); \ -} while (0) - -#define wait_event_lock_irq(wq, condition, lock, cmd) \ -do { \ - if (condition) \ - break; \ - __wait_event_lock_irq(wq, condition, lock, cmd); \ -} while (0) - -static inline void safe_put_page(struct page *p) -{ - if (p) put_page(p); -} - -#endif /* CONFIG_BLOCK */ -#endif - diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index 9491026afe6..ffa2efbbe38 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h @@ -15,6 +15,8 @@ #ifndef _MD_P_H #define _MD_P_H +#include <linux/types.h> + /* * RAID superblock. * @@ -230,7 +232,7 @@ struct mdp_superblock_1 { __le64 reshape_position; /* next address in array-space for reshape */ __le32 delta_disks; /* change in number of raid_disks */ __le32 new_layout; /* new layout */ - __le32 new_chunk; /* new chunk size (bytes) */ + __le32 new_chunk; /* new chunk size (512byte sectors) */ __u8 pad1[128-124]; /* set to 0 when written */ /* constant this-device information - 64 bytes */ diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h index 7192035fc4b..fb1abb3367e 100644 --- a/include/linux/raid/md_u.h +++ b/include/linux/raid/md_u.h @@ -15,6 +15,24 @@ #ifndef _MD_U_H #define _MD_U_H +/* + * Different major versions are not compatible. + * Different minor versions are only downward compatible. + * Different patchlevel versions are downward and upward compatible. + */ +#define MD_MAJOR_VERSION 0 +#define MD_MINOR_VERSION 90 +/* + * MD_PATCHLEVEL_VERSION indicates kernel functionality. + * >=1 means different superblock formats are selectable using SET_ARRAY_INFO + * and major_version/minor_version accordingly + * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT + * in the super status byte + * >=3 means that bitmap superblock version 4 is supported, which uses + * little-ending representation rather than host-endian + */ +#define MD_PATCHLEVEL_VERSION 3 + /* ioctls */ /* status */ @@ -46,6 +64,12 @@ #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) +/* 63 partitions with the alternate major number (mdp) */ +#define MdpMinorShift 6 +#ifdef __KERNEL__ +extern int mdp_major; +#endif + typedef struct mdu_version_s { int major; int minor; @@ -85,6 +109,17 @@ typedef struct mdu_array_info_s { } mdu_array_info_t; +/* non-obvious values for 'level' */ +#define LEVEL_MULTIPATH (-4) +#define LEVEL_LINEAR (-1) +#define LEVEL_FAULTY (-5) + +/* we need a value for 'no level specified' and 0 + * means 'raid0', so we need something else. This is + * for internal use only + */ +#define LEVEL_NONE (-1000000) + typedef struct mdu_disk_info_s { /* * configuration/status of one particular disk diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h deleted file mode 100644 index 6f53fc177a4..00000000000 --- a/include/linux/raid/multipath.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _MULTIPATH_H -#define _MULTIPATH_H - -#include <linux/raid/md.h> - -struct multipath_info { - mdk_rdev_t *rdev; -}; - -struct multipath_private_data { - mddev_t *mddev; - struct multipath_info *multipaths; - int raid_disks; - int working_disks; - spinlock_t device_lock; - struct list_head retry_list; - - mempool_t *pool; -}; - -typedef struct multipath_private_data multipath_conf_t; - -/* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private) - -/* - * this is our 'private' 'collective' MULTIPATH buffer head. - * it contains information about what kind of IO operations were started - * for this MULTIPATH operation, and about their status: - */ - -struct multipath_bh { - mddev_t *mddev; - struct bio *master_bio; - struct bio bio; - int path; - struct list_head retry_list; -}; -#endif diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h new file mode 100644 index 00000000000..d92480f8285 --- /dev/null +++ b/include/linux/raid/pq.h @@ -0,0 +1,132 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2003 H. Peter Anvin - All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 53 Temple Place Ste 330, + * Boston MA 02111-1307, USA; either version 2 of the License, or + * (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +#ifndef LINUX_RAID_RAID6_H +#define LINUX_RAID_RAID6_H + +#ifdef __KERNEL__ + +/* Set to 1 to use kernel-wide empty_zero_page */ +#define RAID6_USE_EMPTY_ZERO_PAGE 0 +#include <linux/blkdev.h> + +/* We need a pre-zeroed page... if we don't want to use the kernel-provided + one define it here */ +#if RAID6_USE_EMPTY_ZERO_PAGE +# define raid6_empty_zero_page empty_zero_page +#else +extern const char raid6_empty_zero_page[PAGE_SIZE]; +#endif + +#else /* ! __KERNEL__ */ +/* Used for testing in user space */ + +#include <errno.h> +#include <inttypes.h> +#include <limits.h> +#include <stddef.h> +#include <sys/mman.h> +#include <sys/types.h> + +/* Not standard, but glibc defines it */ +#define BITS_PER_LONG __WORDSIZE + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +#ifndef PAGE_SIZE +# define PAGE_SIZE 4096 +#endif +extern const char raid6_empty_zero_page[PAGE_SIZE]; + +#define __init +#define __exit +#define __attribute_const__ __attribute__((const)) +#define noinline __attribute__((noinline)) + +#define preempt_enable() +#define preempt_disable() +#define cpu_has_feature(x) 1 +#define enable_kernel_altivec() +#define disable_kernel_altivec() + +#define EXPORT_SYMBOL(sym) +#define MODULE_LICENSE(licence) +#define subsys_initcall(x) +#define module_exit(x) +#endif /* __KERNEL__ */ + +/* Routine choices */ +struct raid6_calls { + void (*gen_syndrome)(int, size_t, void **); + int (*valid)(void); /* Returns 1 if this routine set is usable */ + const char *name; /* Name of this routine set */ + int prefer; /* Has special performance attribute */ +}; + +/* Selected algorithm */ +extern struct raid6_calls raid6_call; + +/* Algorithm list */ +extern const struct raid6_calls * const raid6_algos[]; +int raid6_select_algo(void); + +/* Return values from chk_syndrome */ +#define RAID6_OK 0 +#define RAID6_P_BAD 1 +#define RAID6_Q_BAD 2 +#define RAID6_PQ_BAD 3 + +/* Galois field tables */ +extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); +extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); +extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); +extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); + +/* Recovery routines */ +void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); +void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs); +void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, + void **ptrs); + +/* Some definitions to allow code to be compiled for testing in userspace */ +#ifndef __KERNEL__ + +# define jiffies raid6_jiffies() +# define printk printf +# define GFP_KERNEL 0 +# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \ + PROT_READ|PROT_WRITE, \ + MAP_PRIVATE|MAP_ANONYMOUS,\ + 0, 0)) +# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE) + +static inline void cpu_relax(void) +{ + /* Nothing */ +} + +#undef HZ +#define HZ 1000 +static inline uint32_t raid6_jiffies(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv.tv_sec*1000 + tv.tv_usec/1000; +} + +#endif /* ! __KERNEL__ */ + +#endif /* LINUX_RAID_RAID6_H */ diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h deleted file mode 100644 index fd42aa87c39..00000000000 --- a/include/linux/raid/raid0.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _RAID0_H -#define _RAID0_H - -#include <linux/raid/md.h> - -struct strip_zone -{ - sector_t zone_start; /* Zone offset in md_dev (in sectors) */ - sector_t dev_start; /* Zone offset in real dev (in sectors) */ - sector_t sectors; /* Zone size in sectors */ - int nb_dev; /* # of devices attached to the zone */ - mdk_rdev_t **dev; /* Devices attached to the zone */ -}; - -struct raid0_private_data -{ - struct strip_zone **hash_table; /* Table of indexes into strip_zone */ - struct strip_zone *strip_zone; - mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ - int nr_strip_zones; - - sector_t spacing; - int sector_shift; /* shift this before divide by spacing */ -}; - -typedef struct raid0_private_data raid0_conf_t; - -#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private) - -#endif diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h deleted file mode 100644 index 0a9ba7c3302..00000000000 --- a/include/linux/raid/raid1.h +++ /dev/null @@ -1,134 +0,0 @@ -#ifndef _RAID1_H -#define _RAID1_H - -#include <linux/raid/md.h> - -typedef struct mirror_info mirror_info_t; - -struct mirror_info { - mdk_rdev_t *rdev; - sector_t head_position; -}; - -/* - * memory pools need a pointer to the mddev, so they can force an unplug - * when memory is tight, and a count of the number of drives that the - * pool was allocated for, so they know how much to allocate and free. - * mddev->raid_disks cannot be used, as it can change while a pool is active - * These two datums are stored in a kmalloced struct. - */ - -struct pool_info { - mddev_t *mddev; - int raid_disks; -}; - - -typedef struct r1bio_s r1bio_t; - -struct r1_private_data_s { - mddev_t *mddev; - mirror_info_t *mirrors; - int raid_disks; - int last_used; - sector_t next_seq_sect; - spinlock_t device_lock; - - struct list_head retry_list; - /* queue pending writes and submit them on unplug */ - struct bio_list pending_bio_list; - /* queue of writes that have been unplugged */ - struct bio_list flushing_bio_list; - - /* for use when syncing mirrors: */ - - spinlock_t resync_lock; - int nr_pending; - int nr_waiting; - int nr_queued; - int barrier; - sector_t next_resync; - int fullsync; /* set to 1 if a full sync is needed, - * (fresh device added). - * Cleared when a sync completes. - */ - - wait_queue_head_t wait_barrier; - - struct pool_info *poolinfo; - - struct page *tmppage; - - mempool_t *r1bio_pool; - mempool_t *r1buf_pool; -}; - -typedef struct r1_private_data_s conf_t; - -/* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((conf_t *) mddev->private) - -/* - * this is our 'private' RAID1 bio. - * - * it contains information about what kind of IO operations were started - * for this RAID1 operation, and about their status: - */ - -struct r1bio_s { - atomic_t remaining; /* 'have we finished' count, - * used from IRQ handlers - */ - atomic_t behind_remaining; /* number of write-behind ios remaining - * in this BehindIO request - */ - sector_t sector; - int sectors; - unsigned long state; - mddev_t *mddev; - /* - * original bio going to /dev/mdx - */ - struct bio *master_bio; - /* - * if the IO is in READ direction, then this is where we read - */ - int read_disk; - - struct list_head retry_list; - struct bitmap_update *bitmap_update; - /* - * if the IO is in WRITE direction, then multiple bios are used. - * We choose the number when they are allocated. - */ - struct bio *bios[0]; - /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ -}; - -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) - -/* bits for r1bio.state */ -#define R1BIO_Uptodate 0 -#define R1BIO_IsSync 1 -#define R1BIO_Degraded 2 -#define R1BIO_BehindIO 3 -#define R1BIO_Barrier 4 -#define R1BIO_BarrierRetry 5 -/* For write-behind requests, we call bi_end_io when - * the last non-write-behind device completes, providing - * any write was successful. Otherwise we call when - * any write-behind write succeeds, otherwise we call - * with failure when last write completes (and all failed). - * Record that bi_end_io was called with this flag... - */ -#define R1BIO_Returned 6 - -#endif diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h deleted file mode 100644 index e9091cfeb28..00000000000 --- a/include/linux/raid/raid10.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef _RAID10_H -#define _RAID10_H - -#include <linux/raid/md.h> - -typedef struct mirror_info mirror_info_t; - -struct mirror_info { - mdk_rdev_t *rdev; - sector_t head_position; -}; - -typedef struct r10bio_s r10bio_t; - -struct r10_private_data_s { - mddev_t *mddev; - mirror_info_t *mirrors; - int raid_disks; - spinlock_t device_lock; - - /* geometry */ - int near_copies; /* number of copies layed out raid0 style */ - int far_copies; /* number of copies layed out - * at large strides across drives - */ - int far_offset; /* far_copies are offset by 1 stripe - * instead of many - */ - int copies; /* near_copies * far_copies. - * must be <= raid_disks - */ - sector_t stride; /* distance between far copies. - * This is size / far_copies unless - * far_offset, in which case it is - * 1 stripe. - */ - - int chunk_shift; /* shift from chunks to sectors */ - sector_t chunk_mask; - - struct list_head retry_list; - /* queue pending writes and submit them on unplug */ - struct bio_list pending_bio_list; - - - spinlock_t resync_lock; - int nr_pending; - int nr_waiting; - int nr_queued; - int barrier; - sector_t next_resync; - int fullsync; /* set to 1 if a full sync is needed, - * (fresh device added). - * Cleared when a sync completes. - */ - - wait_queue_head_t wait_barrier; - - mempool_t *r10bio_pool; - mempool_t *r10buf_pool; - struct page *tmppage; -}; - -typedef struct r10_private_data_s conf_t; - -/* - * this is the only point in the RAID code where we violate - * C type safety. mddev->private is an 'opaque' pointer. - */ -#define mddev_to_conf(mddev) ((conf_t *) mddev->private) - -/* - * this is our 'private' RAID10 bio. - * - * it contains information about what kind of IO operations were started - * for this RAID10 operation, and about their status: - */ - -struct r10bio_s { - atomic_t remaining; /* 'have we finished' count, - * used from IRQ handlers - */ - sector_t sector; /* virtual sector number */ - int sectors; - unsigned long state; - mddev_t *mddev; - /* - * original bio going to /dev/mdx - */ - struct bio *master_bio; - /* - * if the IO is in READ direction, then this is where we read - */ - int read_slot; - - struct list_head retry_list; - /* - * if the IO is in WRITE direction, then multiple bios are used, - * one for each copy. - * When resyncing we also use one for each copy. - * When reconstructing, we use 2 bios, one for read, one for write. - * We choose the number when they are allocated. - */ - struct { - struct bio *bio; - sector_t addr; - int devnum; - } devs[0]; -}; - -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) - -/* bits for r10bio.state */ -#define R10BIO_Uptodate 0 -#define R10BIO_IsSync 1 -#define R10BIO_IsRecover 2 -#define R10BIO_Degraded 3 -#endif diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h deleted file mode 100644 index 3b267279245..00000000000 --- a/include/linux/raid/raid5.h +++ /dev/null @@ -1,402 +0,0 @@ -#ifndef _RAID5_H -#define _RAID5_H - -#include <linux/raid/md.h> -#include <linux/raid/xor.h> - -/* - * - * Each stripe contains one buffer per disc. Each buffer can be in - * one of a number of states stored in "flags". Changes between - * these states happen *almost* exclusively under a per-stripe - * spinlock. Some very specific changes can happen in bi_end_io, and - * these are not protected by the spin lock. - * - * The flag bits that are used to represent these states are: - * R5_UPTODATE and R5_LOCKED - * - * State Empty == !UPTODATE, !LOCK - * We have no data, and there is no active request - * State Want == !UPTODATE, LOCK - * A read request is being submitted for this block - * State Dirty == UPTODATE, LOCK - * Some new data is in this buffer, and it is being written out - * State Clean == UPTODATE, !LOCK - * We have valid data which is the same as on disc - * - * The possible state transitions are: - * - * Empty -> Want - on read or write to get old data for parity calc - * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE) - * Empty -> Clean - on compute_block when computing a block for failed drive - * Want -> Empty - on failed read - * Want -> Clean - on successful completion of read request - * Dirty -> Clean - on successful completion of write request - * Dirty -> Clean - on failed write - * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW) - * - * The Want->Empty, Want->Clean, Dirty->Clean, transitions - * all happen in b_end_io at interrupt time. - * Each sets the Uptodate bit before releasing the Lock bit. - * This leaves one multi-stage transition: - * Want->Dirty->Clean - * This is safe because thinking that a Clean buffer is actually dirty - * will at worst delay some action, and the stripe will be scheduled - * for attention after the transition is complete. - * - * There is one possibility that is not covered by these states. That - * is if one drive has failed and there is a spare being rebuilt. We - * can't distinguish between a clean block that has been generated - * from parity calculations, and a clean block that has been - * successfully written to the spare ( or to parity when resyncing). - * To distingush these states we have a stripe bit STRIPE_INSYNC that - * is set whenever a write is scheduled to the spare, or to the parity - * disc if there is no spare. A sync request clears this bit, and - * when we find it set with no buffers locked, we know the sync is - * complete. - * - * Buffers for the md device that arrive via make_request are attached - * to the appropriate stripe in one of two lists linked on b_reqnext. - * One list (bh_read) for read requests, one (bh_write) for write. - * There should never be more than one buffer on the two lists - * together, but we are not guaranteed of that so we allow for more. - * - * If a buffer is on the read list when the associated cache buffer is - * Uptodate, the data is copied into the read buffer and it's b_end_io - * routine is called. This may happen in the end_request routine only - * if the buffer has just successfully been read. end_request should - * remove the buffers from the list and then set the Uptodate bit on - * the buffer. Other threads may do this only if they first check - * that the Uptodate bit is set. Once they have checked that they may - * take buffers off the read queue. - * - * When a buffer on the write list is committed for write it is copied - * into the cache buffer, which is then marked dirty, and moved onto a - * third list, the written list (bh_written). Once both the parity - * block and the cached buffer are successfully written, any buffer on - * a written list can be returned with b_end_io. - * - * The write list and read list both act as fifos. The read list is - * protected by the device_lock. The write and written lists are - * protected by the stripe lock. The device_lock, which can be - * claimed while the stipe lock is held, is only for list - * manipulations and will only be held for a very short time. It can - * be claimed from interrupts. - * - * - * Stripes in the stripe cache can be on one of two lists (or on - * neither). The "inactive_list" contains stripes which are not - * currently being used for any request. They can freely be reused - * for another stripe. The "handle_list" contains stripes that need - * to be handled in some way. Both of these are fifo queues. Each - * stripe is also (potentially) linked to a hash bucket in the hash - * table so that it can be found by sector number. Stripes that are - * not hashed must be on the inactive_list, and will normally be at - * the front. All stripes start life this way. - * - * The inactive_list, handle_list and hash bucket lists are all protected by the - * device_lock. - * - stripes on the inactive_list never have their stripe_lock held. - * - stripes have a reference counter. If count==0, they are on a list. - * - If a stripe might need handling, STRIPE_HANDLE is set. - * - When refcount reaches zero, then if STRIPE_HANDLE it is put on - * handle_list else inactive_list - * - * This, combined with the fact that STRIPE_HANDLE is only ever - * cleared while a stripe has a non-zero count means that if the - * refcount is 0 and STRIPE_HANDLE is set, then it is on the - * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then - * the stripe is on inactive_list. - * - * The possible transitions are: - * activate an unhashed/inactive stripe (get_active_stripe()) - * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev - * activate a hashed, possibly active stripe (get_active_stripe()) - * lockdev check-hash if(!cnt++)unlink-stripe unlockdev - * attach a request to an active stripe (add_stripe_bh()) - * lockdev attach-buffer unlockdev - * handle a stripe (handle_stripe()) - * lockstripe clrSTRIPE_HANDLE ... - * (lockdev check-buffers unlockdev) .. - * change-state .. - * record io/ops needed unlockstripe schedule io/ops - * release an active stripe (release_stripe()) - * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev - * - * The refcount counts each thread that have activated the stripe, - * plus raid5d if it is handling it, plus one for each active request - * on a cached buffer, and plus one if the stripe is undergoing stripe - * operations. - * - * Stripe operations are performed outside the stripe lock, - * the stripe operations are: - * -copying data between the stripe cache and user application buffers - * -computing blocks to save a disk access, or to recover a missing block - * -updating the parity on a write operation (reconstruct write and - * read-modify-write) - * -checking parity correctness - * -running i/o to disk - * These operations are carried out by raid5_run_ops which uses the async_tx - * api to (optionally) offload operations to dedicated hardware engines. - * When requesting an operation handle_stripe sets the pending bit for the - * operation and increments the count. raid5_run_ops is then run whenever - * the count is non-zero. - * There are some critical dependencies between the operations that prevent some - * from being requested while another is in flight. - * 1/ Parity check operations destroy the in cache version of the parity block, - * so we prevent parity dependent operations like writes and compute_blocks - * from starting while a check is in progress. Some dma engines can perform - * the check without damaging the parity block, in these cases the parity - * block is re-marked up to date (assuming the check was successful) and is - * not re-read from disk. - * 2/ When a write operation is requested we immediately lock the affected - * blocks, and mark them as not up to date. This causes new read requests - * to be held off, as well as parity checks and compute block operations. - * 3/ Once a compute block operation has been requested handle_stripe treats - * that block as if it is up to date. raid5_run_ops guaruntees that any - * operation that is dependent on the compute block result is initiated after - * the compute block completes. - */ - -/* - * Operations state - intermediate states that are visible outside of sh->lock - * In general _idle indicates nothing is running, _run indicates a data - * processing operation is active, and _result means the data processing result - * is stable and can be acted upon. For simple operations like biofill and - * compute that only have an _idle and _run state they are indicated with - * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) - */ -/** - * enum check_states - handles syncing / repairing a stripe - * @check_state_idle - check operations are quiesced - * @check_state_run - check operation is running - * @check_state_result - set outside lock when check result is valid - * @check_state_compute_run - check failed and we are repairing - * @check_state_compute_result - set outside lock when compute result is valid - */ -enum check_states { - check_state_idle = 0, - check_state_run, /* parity check */ - check_state_check_result, - check_state_compute_run, /* parity repair */ - check_state_compute_result, -}; - -/** - * enum reconstruct_states - handles writing or expanding a stripe - */ -enum reconstruct_states { - reconstruct_state_idle = 0, - reconstruct_state_prexor_drain_run, /* prexor-write */ - reconstruct_state_drain_run, /* write */ - reconstruct_state_run, /* expand */ - reconstruct_state_prexor_drain_result, - reconstruct_state_drain_result, - reconstruct_state_result, -}; - -struct stripe_head { - struct hlist_node hash; - struct list_head lru; /* inactive_list or handle_list */ - struct raid5_private_data *raid_conf; - sector_t sector; /* sector of this row */ - int pd_idx; /* parity disk index */ - unsigned long state; /* state flags */ - atomic_t count; /* nr of active thread/requests */ - spinlock_t lock; - int bm_seq; /* sequence number for bitmap flushes */ - int disks; /* disks in stripe */ - enum check_states check_state; - enum reconstruct_states reconstruct_state; - /* stripe_operations - * @target - STRIPE_OP_COMPUTE_BLK target - */ - struct stripe_operations { - int target; - u32 zero_sum_result; - } ops; - struct r5dev { - struct bio req; - struct bio_vec vec; - struct page *page; - struct bio *toread, *read, *towrite, *written; - sector_t sector; /* sector of this page */ - unsigned long flags; - } dev[1]; /* allocated with extra space depending of RAID geometry */ -}; - -/* stripe_head_state - collects and tracks the dynamic state of a stripe_head - * for handle_stripe. It is only valid under spin_lock(sh->lock); - */ -struct stripe_head_state { - int syncing, expanding, expanded; - int locked, uptodate, to_read, to_write, failed, written; - int to_fill, compute, req_compute, non_overwrite; - int failed_num; - unsigned long ops_request; -}; - -/* r6_state - extra state data only relevant to r6 */ -struct r6_state { - int p_failed, q_failed, qd_idx, failed_num[2]; -}; - -/* Flags */ -#define R5_UPTODATE 0 /* page contains current data */ -#define R5_LOCKED 1 /* IO has been submitted on "req" */ -#define R5_OVERWRITE 2 /* towrite covers whole page */ -/* and some that are internal to handle_stripe */ -#define R5_Insync 3 /* rdev && rdev->in_sync at start */ -#define R5_Wantread 4 /* want to schedule a read */ -#define R5_Wantwrite 5 -#define R5_Overlap 7 /* There is a pending overlapping request on this block */ -#define R5_ReadError 8 /* seen a read error here recently */ -#define R5_ReWrite 9 /* have tried to over-write the readerror */ - -#define R5_Expanded 10 /* This block now has post-expand data */ -#define R5_Wantcompute 11 /* compute_block in progress treat as - * uptodate - */ -#define R5_Wantfill 12 /* dev->toread contains a bio that needs - * filling - */ -#define R5_Wantdrain 13 /* dev->towrite needs to be drained */ -/* - * Write method - */ -#define RECONSTRUCT_WRITE 1 -#define READ_MODIFY_WRITE 2 -/* not a write method, but a compute_parity mode */ -#define CHECK_PARITY 3 - -/* - * Stripe state - */ -#define STRIPE_HANDLE 2 -#define STRIPE_SYNCING 3 -#define STRIPE_INSYNC 4 -#define STRIPE_PREREAD_ACTIVE 5 -#define STRIPE_DELAYED 6 -#define STRIPE_DEGRADED 7 -#define STRIPE_BIT_DELAY 8 -#define STRIPE_EXPANDING 9 -#define STRIPE_EXPAND_SOURCE 10 -#define STRIPE_EXPAND_READY 11 -#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ -#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ -#define STRIPE_BIOFILL_RUN 14 -#define STRIPE_COMPUTE_RUN 15 -/* - * Operation request flags - */ -#define STRIPE_OP_BIOFILL 0 -#define STRIPE_OP_COMPUTE_BLK 1 -#define STRIPE_OP_PREXOR 2 -#define STRIPE_OP_BIODRAIN 3 -#define STRIPE_OP_POSTXOR 4 -#define STRIPE_OP_CHECK 5 - -/* - * Plugging: - * - * To improve write throughput, we need to delay the handling of some - * stripes until there has been a chance that several write requests - * for the one stripe have all been collected. - * In particular, any write request that would require pre-reading - * is put on a "delayed" queue until there are no stripes currently - * in a pre-read phase. Further, if the "delayed" queue is empty when - * a stripe is put on it then we "plug" the queue and do not process it - * until an unplug call is made. (the unplug_io_fn() is called). - * - * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add - * it to the count of prereading stripes. - * When write is initiated, or the stripe refcnt == 0 (just in case) we - * clear the PREREAD_ACTIVE flag and decrement the count - * Whenever the 'handle' queue is empty and the device is not plugged, we - * move any strips from delayed to handle and clear the DELAYED flag and set - * PREREAD_ACTIVE. - * In stripe_handle, if we find pre-reading is necessary, we do it if - * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. - * HANDLE gets cleared if stripe_handle leave nothing locked. - */ - - -struct disk_info { - mdk_rdev_t *rdev; -}; - -struct raid5_private_data { - struct hlist_head *stripe_hashtbl; - mddev_t *mddev; - struct disk_info *spare; - int chunk_size, level, algorithm; - int max_degraded; - int raid_disks; - int max_nr_stripes; - - /* used during an expand */ - sector_t expand_progress; /* MaxSector when no expand happening */ - sector_t expand_lo; /* from here up to expand_progress it out-of-bounds - * as we haven't flushed the metadata yet - */ - int previous_raid_disks; - - struct list_head handle_list; /* stripes needing handling */ - struct list_head hold_list; /* preread ready stripes */ - struct list_head delayed_list; /* stripes that have plugged requests */ - struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ - struct bio *retry_read_aligned; /* currently retrying aligned bios */ - struct bio *retry_read_aligned_list; /* aligned bios retry list */ - atomic_t preread_active_stripes; /* stripes with scheduled io */ - atomic_t active_aligned_reads; - atomic_t pending_full_writes; /* full write backlog */ - int bypass_count; /* bypassed prereads */ - int bypass_threshold; /* preread nice */ - struct list_head *last_hold; /* detect hold_list promotions */ - - atomic_t reshape_stripes; /* stripes with pending writes for reshape */ - /* unfortunately we need two cache names as we temporarily have - * two caches. - */ - int active_name; - char cache_name[2][20]; - struct kmem_cache *slab_cache; /* for allocating stripes */ - - int seq_flush, seq_write; - int quiesce; - - int fullsync; /* set to 1 if a full sync is needed, - * (fresh device added). - * Cleared when a sync completes. - */ - - struct page *spare_page; /* Used when checking P/Q in raid6 */ - - /* - * Free stripes pool - */ - atomic_t active_stripes; - struct list_head inactive_list; - wait_queue_head_t wait_for_stripe; - wait_queue_head_t wait_for_overlap; - int inactive_blocked; /* release of inactive stripes blocked, - * waiting for 25% to be free - */ - int pool_size; /* number of disks in stripeheads in pool */ - spinlock_t device_lock; - struct disk_info *disks; -}; - -typedef struct raid5_private_data raid5_conf_t; - -#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private) - -/* - * Our supported algorithms - */ -#define ALGORITHM_LEFT_ASYMMETRIC 0 -#define ALGORITHM_RIGHT_ASYMMETRIC 1 -#define ALGORITHM_LEFT_SYMMETRIC 2 -#define ALGORITHM_RIGHT_SYMMETRIC 3 - -#endif diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 3e120587ead..5a210959e3f 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -1,8 +1,6 @@ #ifndef _XOR_H #define _XOR_H -#include <linux/raid/md.h> - #define MAX_XOR_BLOCKS 4 extern void xor_blocks(unsigned int count, unsigned int bytes, diff --git a/include/linux/random.h b/include/linux/random.h index 407ea3646f8..25d02fe5c9b 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -7,6 +7,7 @@ #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H +#include <linux/types.h> #include <linux/ioctl.h> #include <linux/irqnr.h> diff --git a/include/linux/rational.h b/include/linux/rational.h new file mode 100644 index 00000000000..4f532fcd9ee --- /dev/null +++ b/include/linux/rational.h @@ -0,0 +1,19 @@ +/* + * rational fractions + * + * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> + * + * helper functions when coping with rational numbers, + * e.g. when calculating optimum numerator/denominator pairs for + * pll configuration taking into account restricted register size + */ + +#ifndef _LINUX_RATIONAL_H +#define _LINUX_RATIONAL_H + +void rational_best_approximation( + unsigned long given_numerator, unsigned long given_denominator, + unsigned long max_numerator, unsigned long max_denominator, + unsigned long *best_numerator, unsigned long *best_denominator); + +#endif /* _LINUX_RATIONAL_H */ diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h deleted file mode 100644 index f3f697df1d7..00000000000 --- a/include/linux/rcuclassic.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (classic version) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2001 - * - * Author: Dipankar Sarma <dipankar@in.ibm.com> - * - * Based on the original work by Paul McKenney <paulmck@us.ibm.com> - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU - * - */ - -#ifndef __LINUX_RCUCLASSIC_H -#define __LINUX_RCUCLASSIC_H - -#include <linux/cache.h> -#include <linux/spinlock.h> -#include <linux/threads.h> -#include <linux/percpu.h> -#include <linux/cpumask.h> -#include <linux/seqlock.h> - -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR -#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */ -#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ - -/* Global control variables for rcupdate callback mechanism. */ -struct rcu_ctrlblk { - long cur; /* Current batch number. */ - long completed; /* Number of the last completed batch */ - long pending; /* Number of the last pending batch */ -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR - unsigned long gp_start; /* Time at which GP started in jiffies. */ - unsigned long jiffies_stall; - /* Time at which to check for CPU stalls. */ -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ - - int signaled; - - spinlock_t lock ____cacheline_internodealigned_in_smp; - DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */ - /* current batch to proceed. */ -} ____cacheline_internodealigned_in_smp; - -/* Is batch a before batch b ? */ -static inline int rcu_batch_before(long a, long b) -{ - return (a - b) < 0; -} - -/* Is batch a after batch b ? */ -static inline int rcu_batch_after(long a, long b) -{ - return (a - b) > 0; -} - -/* Per-CPU data for Read-Copy UPdate. */ -struct rcu_data { - /* 1) quiescent state handling : */ - long quiescbatch; /* Batch # for grace period */ - int passed_quiesc; /* User-mode/idle loop etc. */ - int qs_pending; /* core waits for quiesc state */ - - /* 2) batch handling */ - /* - * if nxtlist is not NULL, then: - * batch: - * The batch # for the last entry of nxtlist - * [*nxttail[1], NULL = *nxttail[2]): - * Entries that batch # <= batch - * [*nxttail[0], *nxttail[1]): - * Entries that batch # <= batch - 1 - * [nxtlist, *nxttail[0]): - * Entries that batch # <= batch - 2 - * The grace period for these entries has completed, and - * the other grace-period-completed entries may be moved - * here temporarily in rcu_process_callbacks(). - */ - long batch; - struct rcu_head *nxtlist; - struct rcu_head **nxttail[3]; - long qlen; /* # of queued callbacks */ - struct rcu_head *donelist; - struct rcu_head **donetail; - long blimit; /* Upper limit on a processed batch */ - int cpu; - struct rcu_head barrier; -}; - -DECLARE_PER_CPU(struct rcu_data, rcu_data); -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); - -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. - */ -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; -} -static inline void rcu_bh_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; -} - -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern struct lockdep_map rcu_lock_map; -# define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) -#else -# define rcu_read_acquire() do { } while (0) -# define rcu_read_release() do { } while (0) -#endif - -#define __rcu_read_lock() \ - do { \ - preempt_disable(); \ - __acquire(RCU); \ - rcu_read_acquire(); \ - } while (0) -#define __rcu_read_unlock() \ - do { \ - rcu_read_release(); \ - __release(RCU); \ - preempt_enable(); \ - } while (0) -#define __rcu_read_lock_bh() \ - do { \ - local_bh_disable(); \ - __acquire(RCU_BH); \ - rcu_read_acquire(); \ - } while (0) -#define __rcu_read_unlock_bh() \ - do { \ - rcu_read_release(); \ - __release(RCU_BH); \ - local_bh_enable(); \ - } while (0) - -#define __synchronize_sched() synchronize_rcu() - -#define call_rcu_sched(head, func) call_rcu(head, func) - -extern void __rcu_init(void); -#define rcu_init_sched() do { } while (0) -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); - -extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); - -#define rcu_enter_nohz() do { } while (0) -#define rcu_exit_nohz() do { } while (0) - -#endif /* __LINUX_RCUCLASSIC_H */ diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e649bd3f2c9..5710f43bbc9 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -198,6 +198,32 @@ static inline void list_splice_init_rcu(struct list_head *list, at->prev = last; } +/** + * list_entry_rcu - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_entry_rcu(ptr, type, member) \ + container_of(rcu_dereference(ptr), type, member) + +/** + * list_first_entry_rcu - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_first_entry_rcu(ptr, type, member) \ + list_entry_rcu((ptr)->next, type, member) + #define __list_for_each_rcu(pos, head) \ for (pos = rcu_dereference((head)->next); \ pos != (head); \ @@ -214,9 +240,9 @@ static inline void list_splice_init_rcu(struct list_head *list, * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_entry_rcu(pos, head, member) \ - for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ + for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ prefetch(pos->member.next), &pos->member != (head); \ - pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) + pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 921340a7b71..95e0615f4d7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -36,7 +36,6 @@ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> -#include <linux/percpu.h> #include <linux/cpumask.h> #include <linux/seqlock.h> #include <linux/lockdep.h> @@ -52,15 +51,26 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; -#if defined(CONFIG_CLASSIC_RCU) -#include <linux/rcuclassic.h> -#elif defined(CONFIG_TREE_RCU) +/* Exported common interfaces */ +extern void synchronize_rcu(void); +extern void synchronize_rcu_bh(void); +extern void rcu_barrier(void); +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); +extern void synchronize_sched_expedited(void); +extern int sched_expedited_torture_stats(char *page); + +/* Internal to kernel */ +extern void rcu_init(void); +extern void rcu_scheduler_starting(void); +extern int rcu_needs_cpu(int cpu); +extern int rcu_scheduler_active; + +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include <linux/rcutree.h> -#elif defined(CONFIG_PREEMPT_RCU) -#include <linux/rcupreempt.h> #else #error "Unknown RCU implementation specified to kernel configuration" -#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ +#endif #define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT @@ -68,6 +78,16 @@ struct rcu_head { (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() \ + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#else +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +#endif + /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * @@ -97,7 +117,12 @@ struct rcu_head { * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() __rcu_read_lock() +static inline void rcu_read_lock(void) +{ + __rcu_read_lock(); + __acquire(RCU); + rcu_read_acquire(); +} /** * rcu_read_unlock - marks the end of an RCU read-side critical section. @@ -114,7 +139,12 @@ struct rcu_head { * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ -#define rcu_read_unlock() __rcu_read_unlock() +static inline void rcu_read_unlock(void) +{ + rcu_read_release(); + __release(RCU); + __rcu_read_unlock(); +} /** * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section @@ -127,14 +157,24 @@ struct rcu_head { * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() __rcu_read_lock_bh() +static inline void rcu_read_lock_bh(void) +{ + __rcu_read_lock_bh(); + __acquire(RCU_BH); + rcu_read_acquire(); +} /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() __rcu_read_unlock_bh() +static inline void rcu_read_unlock_bh(void) +{ + rcu_read_release(); + __release(RCU_BH); + __rcu_read_unlock_bh(); +} /** * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section @@ -145,17 +185,34 @@ struct rcu_head { * - call_rcu_sched() and rcu_barrier_sched() * on the write-side to insure proper synchronization. */ -#define rcu_read_lock_sched() preempt_disable() -#define rcu_read_lock_sched_notrace() preempt_disable_notrace() +static inline void rcu_read_lock_sched(void) +{ + preempt_disable(); + __acquire(RCU_SCHED); + rcu_read_acquire(); +} +static inline notrace void rcu_read_lock_sched_notrace(void) +{ + preempt_disable_notrace(); + __acquire(RCU_SCHED); +} /* * rcu_read_unlock_sched - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched for more information. */ -#define rcu_read_unlock_sched() preempt_enable() -#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() - +static inline void rcu_read_unlock_sched(void) +{ + rcu_read_release(); + __release(RCU_SCHED); + preempt_enable(); +} +static inline notrace void rcu_read_unlock_sched_notrace(void) +{ + __release(RCU_SCHED); + preempt_enable_notrace(); +} /** @@ -257,14 +314,4 @@ extern void call_rcu(struct rcu_head *head, extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); -/* Exported common interfaces */ -extern void synchronize_rcu(void); -extern void rcu_barrier(void); -extern void rcu_barrier_bh(void); -extern void rcu_barrier_sched(void); - -/* Internal to kernel */ -extern void rcu_init(void); -extern int rcu_needs_cpu(int cpu); - #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h deleted file mode 100644 index 3e05c09b54a..00000000000 --- a/include/linux/rcupreempt.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (RT implementation) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2006 - * - * Author: Paul McKenney <paulmck@us.ibm.com> - * - * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU - * - */ - -#ifndef __LINUX_RCUPREEMPT_H -#define __LINUX_RCUPREEMPT_H - -#include <linux/cache.h> -#include <linux/spinlock.h> -#include <linux/threads.h> -#include <linux/percpu.h> -#include <linux/cpumask.h> -#include <linux/seqlock.h> - -struct rcu_dyntick_sched { - int dynticks; - int dynticks_snap; - int sched_qs; - int sched_qs_snap; - int sched_dynticks_snap; -}; - -DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); - -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); - - rdssp->sched_qs++; -} -#define rcu_bh_qsctr_inc(cpu) - -/* - * Someone might want to pass call_rcu_bh as a function pointer. - * So this needs to just be a rename and not a macro function. - * (no parentheses) - */ -#define call_rcu_bh call_rcu - -/** - * call_rcu_sched - Queue RCU callback for invocation after sched grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period - * - * The update function will be invoked some time after a full - * synchronize_sched()-style grace period elapses, in other words after - * all currently executing preempt-disabled sections of code (including - * hardirq handlers, NMI handlers, and local_irq_save() blocks) have - * completed. - */ -extern void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *head)); - -extern void __rcu_read_lock(void) __acquires(RCU); -extern void __rcu_read_unlock(void) __releases(RCU); -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - -#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } -#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } - -extern void __synchronize_sched(void); - -extern void __rcu_init(void); -extern void rcu_init_sched(void); -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); -extern long rcu_batches_completed(void); - -/* - * Return the number of RCU batches processed thus far. Useful for debug - * and statistic. The _bh variant is identifcal to straight RCU - */ -static inline long rcu_batches_completed_bh(void) -{ - return rcu_batches_completed(); -} - -#ifdef CONFIG_RCU_TRACE -struct rcupreempt_trace; -extern long *rcupreempt_flipctr(int cpu); -extern long rcupreempt_data_completed(void); -extern int rcupreempt_flip_flag(int cpu); -extern int rcupreempt_mb_flag(int cpu); -extern char *rcupreempt_try_flip_state_name(void); -extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); -#endif - -struct softirq_action; - -#ifdef CONFIG_NO_HZ - -static inline void rcu_enter_nohz(void) -{ - static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - - smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ - __get_cpu_var(rcu_dyntick_sched).dynticks++; - WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); -} - -static inline void rcu_exit_nohz(void) -{ - static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); - - __get_cpu_var(rcu_dyntick_sched).dynticks++; - smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ - WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), - &rs); -} - -#else /* CONFIG_NO_HZ */ -#define rcu_enter_nohz() do { } while (0) -#define rcu_exit_nohz() do { } while (0) -#endif /* CONFIG_NO_HZ */ - -#endif /* __LINUX_RCUPREEMPT_H */ diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h deleted file mode 100644 index b99ae073192..00000000000 --- a/include/linux/rcupreempt_trace.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (RT implementation) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2006 - * - * Author: Paul McKenney <paulmck@us.ibm.com> - * - * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of the Preemptible Read-Copy Update mechanism see - - * http://lwn.net/Articles/253651/ - */ - -#ifndef __LINUX_RCUPREEMPT_TRACE_H -#define __LINUX_RCUPREEMPT_TRACE_H - -#include <linux/types.h> -#include <linux/kernel.h> - -#include <asm/atomic.h> - -/* - * PREEMPT_RCU data structures. - */ - -struct rcupreempt_trace { - long next_length; - long next_add; - long wait_length; - long wait_add; - long done_length; - long done_add; - long done_remove; - atomic_t done_invoked; - long rcu_check_callbacks; - atomic_t rcu_try_flip_1; - atomic_t rcu_try_flip_e1; - long rcu_try_flip_i1; - long rcu_try_flip_ie1; - long rcu_try_flip_g1; - long rcu_try_flip_a1; - long rcu_try_flip_ae1; - long rcu_try_flip_a2; - long rcu_try_flip_z1; - long rcu_try_flip_ze1; - long rcu_try_flip_z2; - long rcu_try_flip_m1; - long rcu_try_flip_me1; - long rcu_try_flip_m2; -}; - -#ifdef CONFIG_RCU_TRACE -#define RCU_TRACE(fn, arg) fn(arg); -#else -#define RCU_TRACE(fn, arg) -#endif - -extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); - -#endif /* __LINUX_RCUPREEMPT_TRACE_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index d4368b7975c..a8930771782 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,281 +30,57 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -#include <linux/cache.h> -#include <linux/spinlock.h> -#include <linux/threads.h> -#include <linux/percpu.h> -#include <linux/cpumask.h> -#include <linux/seqlock.h> +extern void rcu_sched_qs(int cpu); +extern void rcu_bh_qs(int cpu); -/* - * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. - * In theory, it should be possible to add more levels straightforwardly. - * In practice, this has not been tested, so there is probably some - * bug somewhere. - */ -#define MAX_RCU_LVLS 3 -#define RCU_FANOUT (CONFIG_RCU_FANOUT) -#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) -#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) - -#if NR_CPUS <= RCU_FANOUT -# define NUM_RCU_LVLS 1 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (NR_CPUS) -# define NUM_RCU_LVL_2 0 -# define NUM_RCU_LVL_3 0 -#elif NR_CPUS <= RCU_FANOUT_SQ -# define NUM_RCU_LVLS 2 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) -# define NUM_RCU_LVL_2 (NR_CPUS) -# define NUM_RCU_LVL_3 0 -#elif NR_CPUS <= RCU_FANOUT_CUBE -# define NUM_RCU_LVLS 3 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) -# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) -# define NUM_RCU_LVL_3 NR_CPUS -#else -# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" -#endif /* #if (NR_CPUS) <= RCU_FANOUT */ - -#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) -#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) - -/* - * Dynticks per-CPU state. - */ -struct rcu_dynticks { - int dynticks_nesting; /* Track nesting level, sort of. */ - int dynticks; /* Even value for dynticks-idle, else odd. */ - int dynticks_nmi; /* Even value for either dynticks-idle or */ - /* not in nmi handler, else odd. So this */ - /* remains even for nmi from irq handler. */ -}; - -/* - * Definition for node within the RCU grace-period-detection hierarchy. - */ -struct rcu_node { - spinlock_t lock; - unsigned long qsmask; /* CPUs or groups that need to switch in */ - /* order for current grace period to proceed.*/ - unsigned long qsmaskinit; - /* Per-GP initialization for qsmask. */ - unsigned long grpmask; /* Mask to apply to parent qsmask. */ - int grplo; /* lowest-numbered CPU or group here. */ - int grphi; /* highest-numbered CPU or group here. */ - u8 grpnum; /* CPU/group number for next level up. */ - u8 level; /* root is at level 0. */ - struct rcu_node *parent; -} ____cacheline_internodealigned_in_smp; - -/* Index values for nxttail array in struct rcu_data. */ -#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ -#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ -#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ -#define RCU_NEXT_TAIL 3 -#define RCU_NEXT_SIZE 4 - -/* Per-CPU data for read-copy update. */ -struct rcu_data { - /* 1) quiescent-state and grace-period handling : */ - long completed; /* Track rsp->completed gp number */ - /* in order to detect GP end. */ - long gpnum; /* Highest gp number that this CPU */ - /* is aware of having started. */ - long passed_quiesc_completed; - /* Value of completed at time of qs. */ - bool passed_quiesc; /* User-mode/idle loop etc. */ - bool qs_pending; /* Core waits for quiesc state. */ - bool beenonline; /* CPU online at least once. */ - struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ - unsigned long grpmask; /* Mask to apply to leaf qsmask. */ - - /* 2) batch handling */ - /* - * If nxtlist is not NULL, it is partitioned as follows. - * Any of the partitions might be empty, in which case the - * pointer to that partition will be equal to the pointer for - * the following partition. When the list is empty, all of - * the nxttail elements point to nxtlist, which is NULL. - * - * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): - * Entries that might have arrived after current GP ended - * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): - * Entries known to have arrived before current GP ended - * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): - * Entries that batch # <= ->completed - 1: waiting for current GP - * [nxtlist, *nxttail[RCU_DONE_TAIL]): - * Entries that batch # <= ->completed - * The grace period for these entries has completed, and - * the other grace-period-completed entries may be moved - * here temporarily in rcu_process_callbacks(). - */ - struct rcu_head *nxtlist; - struct rcu_head **nxttail[RCU_NEXT_SIZE]; - long qlen; /* # of queued callbacks */ - long blimit; /* Upper limit on a processed batch */ - -#ifdef CONFIG_NO_HZ - /* 3) dynticks interface. */ - struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ - int dynticks_snap; /* Per-GP tracking for dynticks. */ - int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ -#endif /* #ifdef CONFIG_NO_HZ */ - - /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ -#ifdef CONFIG_NO_HZ - unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ -#endif /* #ifdef CONFIG_NO_HZ */ - unsigned long offline_fqs; /* Kicked due to being offline. */ - unsigned long resched_ipi; /* Sent a resched IPI. */ - - /* 5) state to allow this CPU to force_quiescent_state on others */ - long n_rcu_pending; /* rcu_pending() calls since boot. */ - long n_rcu_pending_force_qs; /* when to force quiescent states. */ - - int cpu; -}; - -/* Values for signaled field in struct rcu_state. */ -#define RCU_GP_INIT 0 /* Grace period being initialized. */ -#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ -#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ -#ifdef CONFIG_NO_HZ -#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK -#else /* #ifdef CONFIG_NO_HZ */ -#define RCU_SIGNAL_INIT RCU_FORCE_QS -#endif /* #else #ifdef CONFIG_NO_HZ */ - -#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR -#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ -#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ -#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ - /* to take at least one */ - /* scheduling clock irq */ - /* before ratting on them. */ - -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ - -/* - * RCU global state, including node hierarchy. This hierarchy is - * represented in "heap" form in a dense array. The root (first level) - * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second - * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), - * and the third level in ->node[m+1] and following (->node[m+1] referenced - * by ->level[2]). The number of levels is determined by the number of - * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" - * consisting of a single rcu_node. - */ -struct rcu_state { - struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ - struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ - u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ - u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ - struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ - - /* The following fields are guarded by the root rcu_node's lock. */ - - u8 signaled ____cacheline_internodealigned_in_smp; - /* Force QS state. */ - long gpnum; /* Current gp number. */ - long completed; /* # of last completed gp. */ - spinlock_t onofflock; /* exclude on/offline and */ - /* starting new GP. */ - spinlock_t fqslock; /* Only one task forcing */ - /* quiescent states. */ - unsigned long jiffies_force_qs; /* Time at which to invoke */ - /* force_quiescent_state(). */ - unsigned long n_force_qs; /* Number of calls to */ - /* force_quiescent_state(). */ - unsigned long n_force_qs_lh; /* ~Number of calls leaving */ - /* due to lock unavailable. */ - unsigned long n_force_qs_ngp; /* Number of calls leaving */ - /* due to no GP active. */ -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR - unsigned long gp_start; /* Time at which GP started, */ - /* but in jiffies. */ - unsigned long jiffies_stall; /* Time at which to check */ - /* for CPU stalls. */ -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ -#ifdef CONFIG_NO_HZ - long dynticks_completed; /* Value of completed @ snap. */ -#endif /* #ifdef CONFIG_NO_HZ */ -}; - -extern struct rcu_state rcu_state; -DECLARE_PER_CPU(struct rcu_data, rcu_data); - -extern struct rcu_state rcu_bh_state; -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); +extern int rcu_needs_cpu(int cpu); -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. - */ -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; - rdp->passed_quiesc_completed = rdp->completed; -} -static inline void rcu_bh_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; - rdp->passed_quiesc_completed = rdp->completed; -} +#ifdef CONFIG_TREE_PREEMPT_RCU -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); +extern void __rcu_read_lock(void); +extern void __rcu_read_unlock(void); +extern void exit_rcu(void); -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern struct lockdep_map rcu_lock_map; -# define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) -#else -# define rcu_read_acquire() do { } while (0) -# define rcu_read_release() do { } while (0) -#endif +#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ static inline void __rcu_read_lock(void) { preempt_disable(); - __acquire(RCU); - rcu_read_acquire(); } + static inline void __rcu_read_unlock(void) { - rcu_read_release(); - __release(RCU); preempt_enable(); } + +static inline void exit_rcu(void) +{ +} + +#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ + static inline void __rcu_read_lock_bh(void) { local_bh_disable(); - __acquire(RCU_BH); - rcu_read_acquire(); } static inline void __rcu_read_unlock_bh(void) { - rcu_read_release(); - __release(RCU_BH); local_bh_enable(); } #define __synchronize_sched() synchronize_rcu() -#define call_rcu_sched(head, func) call_rcu(head, func) +extern void call_rcu_sched(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)); -static inline void rcu_init_sched(void) +static inline void synchronize_rcu_expedited(void) +{ + synchronize_sched_expedited(); +} + +static inline void synchronize_rcu_bh_expedited(void) { + synchronize_sched_expedited(); } extern void __rcu_init(void); @@ -313,6 +89,11 @@ extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); +extern long rcu_batches_completed_sched(void); + +static inline void rcu_init_sched(void) +{ +} #ifdef CONFIG_NO_HZ void rcu_enter_nohz(void); @@ -326,4 +107,10 @@ static inline void rcu_exit_nohz(void) } #endif /* CONFIG_NO_HZ */ +/* A context switch is a grace period for rcutree. */ +static inline int rcu_blocking_is_gp(void) +{ + return num_online_cpus() == 1; +} + #endif /* __LINUX_RCUTREE_H */ diff --git a/include/linux/rds.h b/include/linux/rds.h new file mode 100644 index 00000000000..89d46e1afbb --- /dev/null +++ b/include/linux/rds.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2008 Oracle. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _LINUX_RDS_H +#define _LINUX_RDS_H + +#include <linux/types.h> + +/* These sparse annotated types shouldn't be in any user + * visible header file. We should clean this up rather + * than kludging around them. */ +#ifndef __KERNEL__ +#define __be16 u_int16_t +#define __be32 u_int32_t +#define __be64 u_int64_t +#endif + +#define RDS_IB_ABI_VERSION 0x301 + +/* + * setsockopt/getsockopt for SOL_RDS + */ +#define RDS_CANCEL_SENT_TO 1 +#define RDS_GET_MR 2 +#define RDS_FREE_MR 3 +/* deprecated: RDS_BARRIER 4 */ +#define RDS_RECVERR 5 +#define RDS_CONG_MONITOR 6 + +/* + * Control message types for SOL_RDS. + * + * CMSG_RDMA_ARGS (sendmsg) + * Request a RDMA transfer to/from the specified + * memory ranges. + * The cmsg_data is a struct rds_rdma_args. + * RDS_CMSG_RDMA_DEST (recvmsg, sendmsg) + * Kernel informs application about intended + * source/destination of a RDMA transfer + * RDS_CMSG_RDMA_MAP (sendmsg) + * Application asks kernel to map the given + * memory range into a IB MR, and send the + * R_Key along in an RDS extension header. + * The cmsg_data is a struct rds_get_mr_args, + * the same as for the GET_MR setsockopt. + * RDS_CMSG_RDMA_STATUS (recvmsg) + * Returns the status of a completed RDMA operation. + */ +#define RDS_CMSG_RDMA_ARGS 1 +#define RDS_CMSG_RDMA_DEST 2 +#define RDS_CMSG_RDMA_MAP 3 +#define RDS_CMSG_RDMA_STATUS 4 +#define RDS_CMSG_CONG_UPDATE 5 + +#define RDS_INFO_FIRST 10000 +#define RDS_INFO_COUNTERS 10000 +#define RDS_INFO_CONNECTIONS 10001 +/* 10002 aka RDS_INFO_FLOWS is deprecated */ +#define RDS_INFO_SEND_MESSAGES 10003 +#define RDS_INFO_RETRANS_MESSAGES 10004 +#define RDS_INFO_RECV_MESSAGES 10005 +#define RDS_INFO_SOCKETS 10006 +#define RDS_INFO_TCP_SOCKETS 10007 +#define RDS_INFO_IB_CONNECTIONS 10008 +#define RDS_INFO_CONNECTION_STATS 10009 +#define RDS_INFO_IWARP_CONNECTIONS 10010 +#define RDS_INFO_LAST 10010 + +struct rds_info_counter { + u_int8_t name[32]; + u_int64_t value; +} __attribute__((packed)); + +#define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 +#define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 +#define RDS_INFO_CONNECTION_FLAG_CONNECTED 0x04 + +#define TRANSNAMSIZ 16 + +struct rds_info_connection { + u_int64_t next_tx_seq; + u_int64_t next_rx_seq; + __be32 laddr; + __be32 faddr; + u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */ + u_int8_t flags; +} __attribute__((packed)); + +struct rds_info_flow { + __be32 laddr; + __be32 faddr; + u_int32_t bytes; + __be16 lport; + __be16 fport; +} __attribute__((packed)); + +#define RDS_INFO_MESSAGE_FLAG_ACK 0x01 +#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 + +struct rds_info_message { + u_int64_t seq; + u_int32_t len; + __be32 laddr; + __be32 faddr; + __be16 lport; + __be16 fport; + u_int8_t flags; +} __attribute__((packed)); + +struct rds_info_socket { + u_int32_t sndbuf; + __be32 bound_addr; + __be32 connected_addr; + __be16 bound_port; + __be16 connected_port; + u_int32_t rcvbuf; + u_int64_t inum; +} __attribute__((packed)); + +struct rds_info_tcp_socket { + __be32 local_addr; + __be16 local_port; + __be32 peer_addr; + __be16 peer_port; + u_int64_t hdr_rem; + u_int64_t data_rem; + u_int32_t last_sent_nxt; + u_int32_t last_expected_una; + u_int32_t last_seen_una; +} __attribute__((packed)); + +#define RDS_IB_GID_LEN 16 +struct rds_info_rdma_connection { + __be32 src_addr; + __be32 dst_addr; + uint8_t src_gid[RDS_IB_GID_LEN]; + uint8_t dst_gid[RDS_IB_GID_LEN]; + + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t rdma_mr_max; + uint32_t rdma_mr_size; +}; + +/* + * Congestion monitoring. + * Congestion control in RDS happens at the host connection + * level by exchanging a bitmap marking congested ports. + * By default, a process sleeping in poll() is always woken + * up when the congestion map is updated. + * With explicit monitoring, an application can have more + * fine-grained control. + * The application installs a 64bit mask value in the socket, + * where each bit corresponds to a group of ports. + * When a congestion update arrives, RDS checks the set of + * ports that are now uncongested against the list bit mask + * installed in the socket, and if they overlap, we queue a + * cong_notification on the socket. + * + * To install the congestion monitor bitmask, use RDS_CONG_MONITOR + * with the 64bit mask. + * Congestion updates are received via RDS_CMSG_CONG_UPDATE + * control messages. + * + * The correspondence between bits and ports is + * 1 << (portnum % 64) + */ +#define RDS_CONG_MONITOR_SIZE 64 +#define RDS_CONG_MONITOR_BIT(port) (((unsigned int) port) % RDS_CONG_MONITOR_SIZE) +#define RDS_CONG_MONITOR_MASK(port) (1ULL << RDS_CONG_MONITOR_BIT(port)) + +/* + * RDMA related types + */ + +/* + * This encapsulates a remote memory location. + * In the current implementation, it contains the R_Key + * of the remote memory region, and the offset into it + * (so that the application does not have to worry about + * alignment). + */ +typedef u_int64_t rds_rdma_cookie_t; + +struct rds_iovec { + u_int64_t addr; + u_int64_t bytes; +}; + +struct rds_get_mr_args { + struct rds_iovec vec; + u_int64_t cookie_addr; + uint64_t flags; +}; + +struct rds_free_mr_args { + rds_rdma_cookie_t cookie; + u_int64_t flags; +}; + +struct rds_rdma_args { + rds_rdma_cookie_t cookie; + struct rds_iovec remote_vec; + u_int64_t local_vec_addr; + u_int64_t nr_local; + u_int64_t flags; + u_int64_t user_token; +}; + +struct rds_rdma_notify { + u_int64_t user_token; + int32_t status; +}; + +#define RDS_RDMA_SUCCESS 0 +#define RDS_RDMA_REMOTE_ERROR 1 +#define RDS_RDMA_CANCELED 2 +#define RDS_RDMA_DROPPED 3 +#define RDS_RDMA_OTHER_ERROR 4 + +/* + * Common set of flags for all RDMA related structs + */ +#define RDS_RDMA_READWRITE 0x0001 +#define RDS_RDMA_FENCE 0x0002 /* use FENCE for immediate send */ +#define RDS_RDMA_INVALIDATE 0x0004 /* invalidate R_Key after freeing MR */ +#define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ +#define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ +#define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ + +#endif /* IB_RDS_H */ diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h index e84b0a9feda..a6d014005d4 100644 --- a/include/linux/regulator/bq24022.h +++ b/include/linux/regulator/bq24022.h @@ -10,6 +10,8 @@ * */ +struct regulator_init_data; + /** * bq24022_mach_info - platform data for bq24022 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging @@ -18,4 +20,5 @@ struct bq24022_mach_info { int gpio_nce; int gpio_iset2; + struct regulator_init_data *init_data; }; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 801bf77ff4e..277f4b964df 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -3,7 +3,7 @@ * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * - * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -88,6 +88,7 @@ * FAIL Regulator output has failed. * OVER_TEMP Regulator over temp. * FORCE_DISABLE Regulator shut down by software. + * VOLTAGE_CHANGE Regulator voltage changed. * * NOTE: These events can be OR'ed together when passed into handler. */ @@ -98,6 +99,7 @@ #define REGULATOR_EVENT_FAIL 0x08 #define REGULATOR_EVENT_OVER_TEMP 0x10 #define REGULATOR_EVENT_FORCE_DISABLE 0x20 +#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 struct regulator; @@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers, void regulator_bulk_free(int num_consumers, struct regulator_bulk_data *consumers); +int regulator_count_voltages(struct regulator *regulator); +int regulator_list_voltage(struct regulator *regulator, unsigned selector); int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); int regulator_get_voltage(struct regulator *regulator); int regulator_set_current_limit(struct regulator *regulator, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 2dae05705f1..225f733e753 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -3,7 +3,7 @@ * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * - * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -21,25 +21,39 @@ struct regulator_dev; struct regulator_init_data; +enum regulator_status { + REGULATOR_STATUS_OFF, + REGULATOR_STATUS_ON, + REGULATOR_STATUS_ERROR, + /* fast/normal/idle/standby are flavors of "on" */ + REGULATOR_STATUS_FAST, + REGULATOR_STATUS_NORMAL, + REGULATOR_STATUS_IDLE, + REGULATOR_STATUS_STANDBY, +}; + /** * struct regulator_ops - regulator operations. * - * This struct describes regulator operations which can be implemented by - * regulator chip drivers. - * - * @enable: Enable the regulator. - * @disable: Disable the regulator. + * @enable: Configure the regulator as enabled. + * @disable: Configure the regulator as disabled. * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. * * @set_voltage: Set the voltage for the regulator within the range specified. * The driver should select the voltage closest to min_uV. * @get_voltage: Return the currently configured voltage for the regulator. + * @list_voltage: Return one of the supported voltages, in microvolts; zero + * if the selector indicates a voltage that is unusable on this system; + * or negative errno. Selectors range from zero to one less than + * regulator_desc.n_voltages. Voltages may be reported in any order. * * @set_current_limit: Configure a limit for a current-limited regulator. - * @get_current_limit: Get the limit for a current-limited regulator. + * @get_current_limit: Get the configured limit for a current-limited regulator. * - * @set_mode: Set the operating mode for the regulator. - * @get_mode: Get the current operating mode for the regulator. + * @set_mode: Set the configured operating mode for the regulator. + * @get_mode: Get the configured operating mode for the regulator. + * @get_status: Return actual (not as-configured) status of regulator, as a + * REGULATOR_STATUS value (or negative errno) * @get_optimum_mode: Get the most efficient operating mode for the regulator * when running with the specified parameters. * @@ -51,9 +65,15 @@ struct regulator_init_data; * suspended. * @set_suspend_mode: Set the operating mode for the regulator when the * system is suspended. + * + * This struct describes regulator operations which can be implemented by + * regulator chip drivers. */ struct regulator_ops { + /* enumerate supported voltages */ + int (*list_voltage) (struct regulator_dev *, unsigned selector); + /* get/set regulator voltage */ int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); int (*get_voltage) (struct regulator_dev *); @@ -72,6 +92,13 @@ struct regulator_ops { int (*set_mode) (struct regulator_dev *, unsigned int mode); unsigned int (*get_mode) (struct regulator_dev *); + /* report regulator status ... most other accessors report + * control inputs, this reports results of combining inputs + * from Linux (and other sources) with the actual load. + * returns REGULATOR_STATUS_* or negative errno. + */ + int (*get_status)(struct regulator_dev *); + /* get most efficient regulator operating mode for load */ unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, int output_uV, int load_uA); @@ -106,6 +133,7 @@ enum regulator_type { * * @name: Identifying name for the regulator. * @id: Numerical identifier for the regulator. + * @n_voltages: Number of selectors available for ops.list_voltage(). * @ops: Regulator operations table. * @irq: Interrupt number for the regulator. * @type: Indicates if the regulator is a voltage or current regulator. @@ -114,14 +142,48 @@ enum regulator_type { struct regulator_desc { const char *name; int id; + unsigned n_voltages; struct regulator_ops *ops; int irq; enum regulator_type type; struct module *owner; }; +/* + * struct regulator_dev + * + * Voltage / Current regulator class device. One for each + * regulator. + * + * This should *not* be used directly by anything except the regulator + * core and notification injection (which should take the mutex and do + * no other direct access). + */ +struct regulator_dev { + struct regulator_desc *desc; + int use_count; + + /* lists we belong to */ + struct list_head list; /* list of all regulators */ + struct list_head slist; /* list of supplied regulators */ + + /* lists we own */ + struct list_head consumer_list; /* consumers we supply */ + struct list_head supply_list; /* regulators we supply */ + + struct blocking_notifier_head notifier; + struct mutex mutex; /* consumer lock */ + struct module *owner; + struct device dev; + struct regulation_constraints *constraints; + struct regulator_dev *supply; /* for tree */ + + void *reg_data; /* regulator_dev data */ +}; + struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, - struct device *dev, void *driver_data); + struct device *dev, struct regulator_init_data *init_data, + void *driver_data); void regulator_unregister(struct regulator_dev *rdev); int regulator_notifier_call_chain(struct regulator_dev *rdev, diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 1387a5d2190..91b4da31f1b 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h @@ -14,9 +14,12 @@ #ifndef __REGULATOR_FIXED_H #define __REGULATOR_FIXED_H +struct regulator_init_data; + struct fixed_voltage_config { const char *supply_name; int microvolts; + struct regulator_init_data *init_data; }; #endif diff --git a/include/linux/regulator/lp3971.h b/include/linux/regulator/lp3971.h new file mode 100644 index 00000000000..61401649fe7 --- /dev/null +++ b/include/linux/regulator/lp3971.h @@ -0,0 +1,51 @@ +/* + * National Semiconductors LP3971 PMIC chip client interface + * + * Copyright (C) 2009 Samsung Electronics + * Author: Marek Szyprowski <m.szyprowski@samsung.com> + * + * Based on wm8400.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_REGULATOR_LP3971_H +#define __LINUX_REGULATOR_LP3971_H + +#include <linux/regulator/machine.h> + +#define LP3971_LDO1 0 +#define LP3971_LDO2 1 +#define LP3971_LDO3 2 +#define LP3971_LDO4 3 +#define LP3971_LDO5 4 + +#define LP3971_DCDC1 5 +#define LP3971_DCDC2 6 +#define LP3971_DCDC3 7 + +#define LP3971_NUM_REGULATORS 8 + +struct lp3971_regulator_subdev { + int id; + struct regulator_init_data *initdata; +}; + +struct lp3971_platform_data { + int num_regulators; + struct lp3971_regulator_subdev *regulators; +}; + +#endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 3794773b23d..bac64fa390f 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -3,7 +3,7 @@ * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * - * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> + * Author: Liam Girdwood <lrg@slimlogic.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -73,7 +73,9 @@ struct regulator_state { * * @always_on: Set if the regulator should never be disabled. * @boot_on: Set if the regulator is enabled when the system is initially - * started. + * started. If the regulator is not enabled by the hardware or + * bootloader then it will be enabled when the constraints are + * applied. * @apply_uV: Apply the voltage constraint when initialising. * * @input_uV: Input voltage for regulator when supplied by another regulator. @@ -83,6 +85,7 @@ struct regulator_state { * @state_standby: State for regulator when system is suspended in standby * mode. * @initial_state: Suspend state to set by default. + * @initial_mode: Mode to set at startup. */ struct regulation_constraints { @@ -111,6 +114,9 @@ struct regulation_constraints { struct regulator_state state_standby; suspend_state_t initial_state; /* suspend state to set at init */ + /* mode to set on startup */ + unsigned int initial_mode; + /* constriant flags */ unsigned always_on:1; /* regulator never off when system is on */ unsigned boot_on:1; /* bootloader/firmware enabled regulator */ @@ -160,4 +166,6 @@ struct regulator_init_data { int regulator_suspend_prepare(suspend_state_t state); +void regulator_has_full_constraints(void); + #endif diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h new file mode 100644 index 00000000000..44563192bf1 --- /dev/null +++ b/include/linux/regulator/max1586.h @@ -0,0 +1,63 @@ +/* + * max1586.h -- Voltage regulation for the Maxim 1586 + * + * Copyright (C) 2008 Robert Jarzmik + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef REGULATOR_MAX1586 +#define REGULATOR_MAX1586 + +#include <linux/regulator/machine.h> + +#define MAX1586_V3 0 +#define MAX1586_V6 1 + +/* precalculated values for v3_gain */ +#define MAX1586_GAIN_NO_R24 1000000 /* 700000 .. 1475000 mV */ +#define MAX1586_GAIN_R24_3k32 1051098 /* 735768 .. 1550369 mV */ +#define MAX1586_GAIN_R24_5k11 1078648 /* 755053 .. 1591005 mV */ +#define MAX1586_GAIN_R24_7k5 1115432 /* 780802 .. 1645262 mV */ + +/** + * max1586_subdev_data - regulator data + * @id: regulator Id (either MAX1586_V3 or MAX1586_V6) + * @name: regulator cute name (example for V3: "vcc_core") + * @platform_data: regulator init data (contraints, supplies, ...) + */ +struct max1586_subdev_data { + int id; + char *name; + struct regulator_init_data *platform_data; +}; + +/** + * max1586_platform_data - platform data for max1586 + * @num_subdevs: number of regultors used (may be 1 or 2) + * @subdevs: regulator used + * At most, there will be a regulator for V3 and one for V6 voltages. + * @v3_gain: gain on the V3 voltage output multiplied by 1e6. + * This can be calculated as ((1 + R24/R25 + R24/185.5kOhm) * 1e6) + * for an external resistor configuration as described in the + * data sheet (R25=100kOhm). + */ +struct max1586_platform_data { + int num_subdevs; + struct max1586_subdev_data *subdevs; + int v3_gain; +}; + +#endif diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h new file mode 100644 index 00000000000..b4554ce9d4b --- /dev/null +++ b/include/linux/regulator/userspace-consumer.h @@ -0,0 +1,25 @@ +#ifndef __REGULATOR_PLATFORM_CONSUMER_H_ +#define __REGULATOR_PLATFORM_CONSUMER_H_ + +struct regulator_consumer_supply; + +/** + * struct regulator_userspace_consumer_data - line consumer + * initialisation data. + * + * @name: Name for the consumer line + * @num_supplies: Number of supplies feeding the line + * @supplies: Supplies configuration. + * @init_on: Set if the regulators supplying the line should be + * enabled during initialisation + */ +struct regulator_userspace_consumer_data { + const char *name; + + int num_supplies; + struct regulator_bulk_data *supplies; + + bool init_on; +}; + +#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */ diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h index fe00f781a62..b4448853900 100644 --- a/include/linux/reiserfs_acl.h +++ b/include/linux/reiserfs_acl.h @@ -49,23 +49,13 @@ static inline int reiserfs_acl_count(size_t size) #ifdef CONFIG_REISERFS_FS_POSIX_ACL struct posix_acl *reiserfs_get_acl(struct inode *inode, int type); int reiserfs_acl_chmod(struct inode *inode); -int reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry, +int reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, + struct inode *dir, struct dentry *dentry, struct inode *inode); int reiserfs_cache_default_acl(struct inode *dir); -extern int reiserfs_xattr_posix_acl_init(void) __init; -extern int reiserfs_xattr_posix_acl_exit(void); -extern struct reiserfs_xattr_handler posix_acl_default_handler; -extern struct reiserfs_xattr_handler posix_acl_access_handler; +extern struct xattr_handler reiserfs_posix_acl_default_handler; +extern struct xattr_handler reiserfs_posix_acl_access_handler; -static inline void reiserfs_init_acl_access(struct inode *inode) -{ - REISERFS_I(inode)->i_acl_access = NULL; -} - -static inline void reiserfs_init_acl_default(struct inode *inode) -{ - REISERFS_I(inode)->i_acl_default = NULL; -} #else #define reiserfs_cache_default_acl(inode) 0 @@ -75,33 +65,16 @@ static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type) return NULL; } -static inline int reiserfs_xattr_posix_acl_init(void) -{ - return 0; -} - -static inline int reiserfs_xattr_posix_acl_exit(void) -{ - return 0; -} - static inline int reiserfs_acl_chmod(struct inode *inode) { return 0; } static inline int -reiserfs_inherit_default_acl(const struct inode *dir, struct dentry *dentry, +reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, + const struct inode *dir, struct dentry *dentry, struct inode *inode) { return 0; } - -static inline void reiserfs_init_acl_access(struct inode *inode) -{ -} - -static inline void reiserfs_init_acl_default(struct inode *inode) -{ -} #endif diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index bc5114d35e9..dd31e7bae35 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -28,8 +28,6 @@ #include <linux/reiserfs_fs_sb.h> #endif -struct fid; - /* * include/linux/reiser_fs.h * @@ -37,6 +35,31 @@ struct fid; * */ +/* ioctl's command */ +#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) +/* define following flags to be the same as in ext2, so that chattr(1), + lsattr(1) will work with us. */ +#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS +#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS +#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION +#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION + +#ifdef __KERNEL__ +/* the 32 bit compat definitions with int argument */ +#define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int) +#define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS +#define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS +#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION +#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION + +/* Locking primitives */ +/* Right now we are still falling back to (un)lock_kernel, but eventually that + would evolve into real per-fs locks */ +#define reiserfs_write_lock( sb ) lock_kernel() +#define reiserfs_write_unlock( sb ) unlock_kernel() + +struct fid; + /* in reading the #defines, it may help to understand that they employ the following abbreviations: @@ -79,15 +102,21 @@ struct fid; */ #define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */ -void reiserfs_warning(struct super_block *s, const char *fmt, ...); +void __reiserfs_warning(struct super_block *s, const char *id, + const char *func, const char *fmt, ...); +#define reiserfs_warning(s, id, fmt, args...) \ + __reiserfs_warning(s, id, __func__, fmt, ##args) /* assertions handling */ /** always check a condition and panic if it's false. */ -#define __RASSERT( cond, scond, format, args... ) \ -if( !( cond ) ) \ - reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \ - __FILE__ ":%i:%s: " format "\n", \ - in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __func__ , ##args ) +#define __RASSERT(cond, scond, format, args...) \ +do { \ + if (!(cond)) \ + reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \ + __FILE__ ":%i:%s: " format "\n", \ + in_interrupt() ? -1 : task_pid_nr(current), \ + __LINE__, __func__ , ##args); \ +} while (0) #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) @@ -171,7 +200,11 @@ struct reiserfs_super_block { __le32 s_flags; /* Right now used only by inode-attributes, if enabled */ unsigned char s_uuid[16]; /* filesystem unique identifier */ unsigned char s_label[16]; /* filesystem volume label */ - char s_unused[88]; /* zero filled by mkreiserfs and + __le16 s_mnt_count; /* Count of mounts since last fsck */ + __le16 s_max_mnt_count; /* Maximum mounts before check */ + __le32 s_lastcheck; /* Timestamp of last fsck */ + __le32 s_check_interval; /* Interval between checks */ + char s_unused[76]; /* zero filled by mkreiserfs and * reiserfs_convert_objectid_map_v1() * so any additions must be updated * there as well. */ @@ -553,10 +586,8 @@ static inline int uniqueness2type(__u32 uniqueness) return TYPE_DIRECT; case V1_DIRENTRY_UNIQUENESS: return TYPE_DIRENTRY; - default: - reiserfs_warning(NULL, "vs-500: unknown uniqueness %d", - uniqueness); case V1_ANY_UNIQUENESS: + default: return TYPE_ANY; } } @@ -573,9 +604,8 @@ static inline __u32 type2uniqueness(int type) return V1_DIRECT_UNIQUENESS; case TYPE_DIRENTRY: return V1_DIRENTRY_UNIQUENESS; - default: - reiserfs_warning(NULL, "vs-501: unknown type %d", type); case TYPE_ANY: + default: return V1_ANY_UNIQUENESS; } } @@ -630,23 +660,54 @@ static inline void set_le_key_k_type(int version, struct reiserfs_key *key, cpu_to_le32(type2uniqueness(type))) : (void)(set_offset_v2_k_type(&(key->u.k_offset_v2), type)); } + static inline void set_le_ih_k_type(struct item_head *ih, int type) { set_le_key_k_type(ih_version(ih), &(ih->ih_key), type); } -#define is_direntry_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRENTRY) -#define is_direct_le_key(version,key) (le_key_k_type (version, key) == TYPE_DIRECT) -#define is_indirect_le_key(version,key) (le_key_k_type (version, key) == TYPE_INDIRECT) -#define is_statdata_le_key(version,key) (le_key_k_type (version, key) == TYPE_STAT_DATA) +static inline int is_direntry_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_DIRENTRY; +} + +static inline int is_direct_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_DIRECT; +} + +static inline int is_indirect_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_INDIRECT; +} + +static inline int is_statdata_le_key(int version, struct reiserfs_key *key) +{ + return le_key_k_type(version, key) == TYPE_STAT_DATA; +} // // item header has version. // -#define is_direntry_le_ih(ih) is_direntry_le_key (ih_version (ih), &((ih)->ih_key)) -#define is_direct_le_ih(ih) is_direct_le_key (ih_version (ih), &((ih)->ih_key)) -#define is_indirect_le_ih(ih) is_indirect_le_key (ih_version(ih), &((ih)->ih_key)) -#define is_statdata_le_ih(ih) is_statdata_le_key (ih_version (ih), &((ih)->ih_key)) +static inline int is_direntry_le_ih(struct item_head *ih) +{ + return is_direntry_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_direct_le_ih(struct item_head *ih) +{ + return is_direct_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_indirect_le_ih(struct item_head *ih) +{ + return is_indirect_le_key(ih_version(ih), &ih->ih_key); +} + +static inline int is_statdata_le_ih(struct item_head *ih) +{ + return is_statdata_le_key(ih_version(ih), &ih->ih_key); +} // // key is pointer to cpu key, result is cpu @@ -687,9 +748,9 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key) #define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key))) #define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key))) -#define I_K_KEY_IN_ITEM(p_s_ih, p_s_key, n_blocksize) \ - ( ! COMP_SHORT_KEYS(p_s_ih, p_s_key) && \ - I_OFF_BYTE_IN_ITEM(p_s_ih, k_offset (p_s_key), n_blocksize) ) +#define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \ + (!COMP_SHORT_KEYS(ih, key) && \ + I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize)) /* maximal length of item */ #define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE) @@ -698,6 +759,7 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key) /* object identifier for root dir */ #define REISERFS_ROOT_OBJECTID 2 #define REISERFS_ROOT_PARENT_OBJECTID 1 + extern struct reiserfs_key root_key; /* @@ -744,25 +806,25 @@ struct block_head { #define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */ /* Given the buffer head of a formatted node, resolve to the block head of that node. */ -#define B_BLK_HEAD(p_s_bh) ((struct block_head *)((p_s_bh)->b_data)) +#define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data)) /* Number of items that are in buffer. */ -#define B_NR_ITEMS(p_s_bh) (blkh_nr_item(B_BLK_HEAD(p_s_bh))) -#define B_LEVEL(p_s_bh) (blkh_level(B_BLK_HEAD(p_s_bh))) -#define B_FREE_SPACE(p_s_bh) (blkh_free_space(B_BLK_HEAD(p_s_bh))) +#define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh))) +#define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh))) +#define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh))) -#define PUT_B_NR_ITEMS(p_s_bh,val) do { set_blkh_nr_item(B_BLK_HEAD(p_s_bh),val); } while (0) -#define PUT_B_LEVEL(p_s_bh,val) do { set_blkh_level(B_BLK_HEAD(p_s_bh),val); } while (0) -#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0) +#define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0) +#define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0) +#define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0) /* Get right delimiting key. -- little endian */ -#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh)))) +#define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh)))) /* Does the buffer contain a disk leaf. */ -#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL) +#define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL) /* Does the buffer contain a disk internal node */ -#define B_IS_KEYS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) > DISK_LEAF_NODE_LEVEL \ - && B_LEVEL(p_s_bh) <= MAX_HEIGHT) +#define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \ + && B_LEVEL(bh) <= MAX_HEIGHT) /***************************************************************************/ /* STAT DATA */ @@ -1112,12 +1174,13 @@ struct disk_child { #define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0) /* Get disk child by buffer header and position in the tree node. */ -#define B_N_CHILD(p_s_bh,n_pos) ((struct disk_child *)\ -((p_s_bh)->b_data+BLKH_SIZE+B_NR_ITEMS(p_s_bh)*KEY_SIZE+DC_SIZE*(n_pos))) +#define B_N_CHILD(bh, n_pos) ((struct disk_child *)\ +((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos))) /* Get disk child number by buffer header and position in the tree node. */ -#define B_N_CHILD_NUM(p_s_bh,n_pos) (dc_block_number(B_N_CHILD(p_s_bh,n_pos))) -#define PUT_B_N_CHILD_NUM(p_s_bh,n_pos, val) (put_dc_block_number(B_N_CHILD(p_s_bh,n_pos), val )) +#define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos))) +#define PUT_B_N_CHILD_NUM(bh, n_pos, val) \ + (put_dc_block_number(B_N_CHILD(bh, n_pos), val)) /* maximal value of field child_size in structure disk_child */ /* child size is the combined size of all items and their headers */ @@ -1188,33 +1251,33 @@ struct treepath { struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} /* Get path element by path and path position. */ -#define PATH_OFFSET_PELEMENT(p_s_path,n_offset) ((p_s_path)->path_elements +(n_offset)) +#define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset)) /* Get buffer header at the path by path and path position. */ -#define PATH_OFFSET_PBUFFER(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_buffer) +#define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer) /* Get position in the element at the path by path and path position. */ -#define PATH_OFFSET_POSITION(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_position) +#define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position) -#define PATH_PLAST_BUFFER(p_s_path) (PATH_OFFSET_PBUFFER((p_s_path), (p_s_path)->path_length)) +#define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length)) /* you know, to the person who didn't write this the macro name does not at first suggest what it does. Maybe POSITION_FROM_PATH_END? Or maybe we should just focus on dumping paths... -Hans */ -#define PATH_LAST_POSITION(p_s_path) (PATH_OFFSET_POSITION((p_s_path), (p_s_path)->path_length)) +#define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length)) -#define PATH_PITEM_HEAD(p_s_path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_path),PATH_LAST_POSITION(p_s_path)) +#define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path)) /* in do_balance leaf has h == 0 in contrast with path structure, where root has level == 0. That is why we need these defines */ -#define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */ +#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */ #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */ #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h)) #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */ -#define PATH_H_PATH_OFFSET(p_s_path, n_h) ((p_s_path)->path_length - (n_h)) +#define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h)) #define get_last_bh(path) PATH_PLAST_BUFFER(path) #define get_ih(path) PATH_PITEM_HEAD(path) @@ -1444,6 +1507,16 @@ struct buffer_info { int bi_position; }; +static inline struct super_block *sb_from_tb(struct tree_balance *tb) +{ + return tb ? tb->tb_sb : NULL; +} + +static inline struct super_block *sb_from_bi(struct buffer_info *bi) +{ + return bi ? sb_from_tb(bi->tb) : NULL; +} + /* there are 4 types of items: stat data, directory item, indirect, direct. +-------------------+------------+--------------+------------+ | | k_offset | k_uniqueness | mergeable? | @@ -1494,7 +1567,7 @@ extern struct item_operations *item_ops[TYPE_ANY + 1]; #define COMP_SHORT_KEYS comp_short_keys /* number of blocks pointed to by the indirect item */ -#define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE ) +#define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE) /* the used space within the unformatted node corresponding to pos within the item pointed to by ih */ #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size)) @@ -1540,7 +1613,6 @@ struct reiserfs_iget_args { /* FUNCTION DECLARATIONS */ /***************************************************************************/ -/*#ifdef __KERNEL__*/ #define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12) #define journal_trans_half(blocksize) \ @@ -1598,6 +1670,10 @@ struct reiserfs_journal_header { #define JOURNAL_MAX_COMMIT_AGE 30 #define JOURNAL_MAX_TRANS_AGE 30 #define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9) +#define JOURNAL_BLOCKS_PER_OBJECT(sb) (JOURNAL_PER_BALANCE_CNT * 3 + \ + 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \ + REISERFS_QUOTA_TRANS_BLOCKS(sb))) + #ifdef CONFIG_QUOTA /* We need to update data and inode (atime) */ #define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0) @@ -1672,7 +1748,7 @@ struct reiserfs_transaction_handle { int t_refcount; int t_blocks_logged; /* number of blocks this writer has logged */ int t_blocks_allocated; /* number of blocks this writer allocated */ - unsigned long t_trans_id; /* sanity check, equals the current trans id */ + unsigned int t_trans_id; /* sanity check, equals the current trans id */ void *t_handle_save; /* save existing current->journal_info */ unsigned displace_new_blocks:1; /* if new block allocation occurres, that block should be displaced from others */ @@ -1748,13 +1824,13 @@ int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *, int journal_mark_freed(struct reiserfs_transaction_handle *, struct super_block *, b_blocknr_t blocknr); int journal_transaction_should_end(struct reiserfs_transaction_handle *, int); -int reiserfs_in_journal(struct super_block *p_s_sb, unsigned int bmap_nr, - int bit_nr, int searchall, b_blocknr_t *next); +int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, + int bit_nr, int searchall, b_blocknr_t *next); int journal_begin(struct reiserfs_transaction_handle *, - struct super_block *p_s_sb, unsigned long); + struct super_block *sb, unsigned long); int journal_join_abort(struct reiserfs_transaction_handle *, - struct super_block *p_s_sb, unsigned long); -void reiserfs_journal_abort(struct super_block *sb, int errno); + struct super_block *sb, unsigned long); +void reiserfs_abort_journal(struct super_block *sb, int errno); void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...); int reiserfs_allocate_list_bitmaps(struct super_block *s, struct reiserfs_list_bitmap *, unsigned int); @@ -1771,8 +1847,8 @@ int reiserfs_convert_objectid_map_v1(struct super_block *); /* stree.c */ int B_IS_IN_TREE(const struct buffer_head *); -extern void copy_item_head(struct item_head *p_v_to, - const struct item_head *p_v_from); +extern void copy_item_head(struct item_head *to, + const struct item_head *from); // first key is in cpu form, second - le extern int comp_short_keys(const struct reiserfs_key *le_key, @@ -1807,20 +1883,20 @@ static inline void copy_key(struct reiserfs_key *to, memcpy(to, from, KEY_SIZE); } -int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path); -const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, - const struct super_block *p_s_sb); +int comp_items(const struct item_head *stored_ih, const struct treepath *path); +const struct reiserfs_key *get_rkey(const struct treepath *chk_path, + const struct super_block *sb); int search_by_key(struct super_block *, const struct cpu_key *, struct treepath *, int); #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL) -int search_for_position_by_key(struct super_block *p_s_sb, - const struct cpu_key *p_s_cpu_key, - struct treepath *p_s_search_path); -extern void decrement_bcount(struct buffer_head *p_s_bh); -void decrement_counters_in_path(struct treepath *p_s_search_path); -void pathrelse(struct treepath *p_s_search_path); +int search_for_position_by_key(struct super_block *sb, + const struct cpu_key *cpu_key, + struct treepath *search_path); +extern void decrement_bcount(struct buffer_head *bh); +void decrement_counters_in_path(struct treepath *search_path); +void pathrelse(struct treepath *search_path); int reiserfs_check_path(struct treepath *p); -void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path); +void pathrelse_and_restore(struct super_block *s, struct treepath *search_path); int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath *path, @@ -1843,14 +1919,14 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *path, const struct cpu_key *key, - struct inode *inode, struct buffer_head *p_s_un_bh); + struct inode *inode, struct buffer_head *un_bh); void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, struct inode *inode, struct reiserfs_key *key); int reiserfs_delete_object(struct reiserfs_transaction_handle *th, - struct inode *p_s_inode); + struct inode *inode); int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, - struct inode *p_s_inode, struct page *, + struct inode *inode, struct page *, int update_timestamps); #define i_block_size(inode) ((inode)->i_sb->s_blocksize) @@ -1894,10 +1970,12 @@ void make_le_item_head(struct item_head *ih, const struct cpu_key *key, loff_t offset, int type, int length, int entry_count); struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key); +struct reiserfs_security_handle; int reiserfs_new_inode(struct reiserfs_transaction_handle *th, struct inode *dir, int mode, const char *symname, loff_t i_size, - struct dentry *dentry, struct inode *inode); + struct dentry *dentry, struct inode *inode, + struct reiserfs_security_handle *security); void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th, struct inode *inode, loff_t size); @@ -1955,7 +2033,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, #define PROC_INFO_MAX( sb, field, value ) VOID_V #define PROC_INFO_INC( sb, field ) VOID_V #define PROC_INFO_ADD( sb, field, val ) VOID_V -#define PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level ) VOID_V +#define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V #endif /* dir.c */ @@ -1963,6 +2041,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations; extern const struct inode_operations reiserfs_symlink_inode_operations; extern const struct inode_operations reiserfs_special_inode_operations; extern const struct file_operations reiserfs_dir_operations; +int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *); /* tail_conversion.c */ int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, @@ -1979,13 +2058,20 @@ extern const struct address_space_operations reiserfs_address_space_operations; /* fix_nodes.c */ -int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, - struct item_head *p_s_ins_ih, const void *); +int fix_nodes(int n_op_mode, struct tree_balance *tb, + struct item_head *ins_ih, const void *); void unfix_nodes(struct tree_balance *); /* prints.c */ -void reiserfs_panic(struct super_block *s, const char *fmt, ...) +void __reiserfs_panic(struct super_block *s, const char *id, + const char *function, const char *fmt, ...) __attribute__ ((noreturn)); +#define reiserfs_panic(s, id, fmt, args...) \ + __reiserfs_panic(s, id, __func__, fmt, ##args) +void __reiserfs_error(struct super_block *s, const char *id, + const char *function, const char *fmt, ...); +#define reiserfs_error(s, id, fmt, args...) \ + __reiserfs_error(s, id, __func__, fmt, ##args) void reiserfs_info(struct super_block *s, const char *fmt, ...); void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...); void print_indirect_item(struct buffer_head *bh, int item_num); @@ -2022,7 +2108,7 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, int zeros_number); void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, int pos_in_item, int cut_size); -void leaf_paste_entries(struct buffer_head *bh, int item_num, int before, +void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, int new_entry_count, struct reiserfs_de_head *new_dehs, const char *records, int paste_size); /* ibalance.c */ @@ -2178,29 +2264,6 @@ long reiserfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); int reiserfs_unpack(struct inode *inode, struct file *filp); -/* ioctl's command */ -#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) -/* define following flags to be the same as in ext2, so that chattr(1), - lsattr(1) will work with us. */ -#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS -#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS -#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION -#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION - -/* the 32 bit compat definitions with int argument */ -#define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int) -#define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS -#define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS -#define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION -#define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION - -/* Locking primitives */ -/* Right now we are still falling back to (un)lock_kernel, but eventually that - would evolve into real per-fs locks */ -#define reiserfs_write_lock( sb ) lock_kernel() -#define reiserfs_write_unlock( sb ) unlock_kernel() - -/* xattr stuff */ -#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem) +#endif /* __KERNEL__ */ #endif /* _LINUX_REISER_FS_H */ diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h index ce3663fb010..89f4d3abbf5 100644 --- a/include/linux/reiserfs_fs_i.h +++ b/include/linux/reiserfs_fs_i.h @@ -51,15 +51,11 @@ struct reiserfs_inode_info { /* we use these for fsync or O_SYNC to decide which transaction ** needs to be committed in order for this inode to be properly ** flushed */ - unsigned long i_trans_id; + unsigned int i_trans_id; struct reiserfs_journal_list *i_jl; struct mutex i_mmap; -#ifdef CONFIG_REISERFS_FS_POSIX_ACL - struct posix_acl *i_acl_access; - struct posix_acl *i_acl_default; -#endif #ifdef CONFIG_REISERFS_FS_XATTR - struct rw_semaphore xattr_sem; + struct rw_semaphore i_xattr_sem; #endif struct inode vfs_inode; }; diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index bda6b562a1e..dab68bbed67 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h @@ -14,7 +14,7 @@ typedef enum { } reiserfs_super_block_flags; /* struct reiserfs_super_block accessors/mutators - * since this is a disk structure, it will always be in + * since this is a disk structure, it will always be in * little endian format. */ #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) @@ -73,6 +73,9 @@ typedef enum { #define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version)) #define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v)) +#define sb_mnt_count(sbp) (le16_to_cpu((sbp)->s_mnt_count)) +#define set_sb_mnt_count(sbp, v) ((sbp)->s_mnt_count = cpu_to_le16(v)) + #define sb_reserved_for_journal(sbp) \ (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal)) #define set_sb_reserved_for_journal(sbp,v) \ @@ -80,16 +83,16 @@ typedef enum { /* LOGGING -- */ -/* These all interelate for performance. +/* These all interelate for performance. ** -** If the journal block count is smaller than n transactions, you lose speed. +** If the journal block count is smaller than n transactions, you lose speed. ** I don't know what n is yet, I'm guessing 8-16. ** ** typical transaction size depends on the application, how often fsync is -** called, and how many metadata blocks you dirty in a 30 second period. +** called, and how many metadata blocks you dirty in a 30 second period. ** The more small files (<16k) you use, the larger your transactions will ** be. -** +** ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough ** to prevent wrapping before dirty meta blocks get to disk. @@ -153,7 +156,7 @@ struct reiserfs_journal_list { atomic_t j_commit_left; atomic_t j_older_commits_done; /* all commits older than this on disk */ struct mutex j_commit_mutex; - unsigned long j_trans_id; + unsigned int j_trans_id; time_t j_timestamp; struct reiserfs_list_bitmap *j_list_bitmap; struct buffer_head *j_commit_bh; /* commit buffer head */ @@ -182,7 +185,7 @@ struct reiserfs_journal { int j_1st_reserved_block; /* first block on s_dev of reserved area journal */ unsigned long j_state; - unsigned long j_trans_id; + unsigned int j_trans_id; unsigned long j_mount_id; unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */ unsigned long j_len; /* length of current waiting commit */ @@ -190,7 +193,7 @@ struct reiserfs_journal { atomic_t j_wcount; /* count of writers for current commit */ unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */ unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */ - unsigned long j_last_flush_trans_id; /* last fully flushed journal timestamp */ + unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */ struct buffer_head *j_header_bh; time_t j_trans_start_time; /* time this transaction started */ @@ -223,10 +226,10 @@ struct reiserfs_journal { int j_num_work_lists; /* number that need attention from kreiserfsd */ /* debugging to make sure things are flushed in order */ - int j_last_flush_id; + unsigned int j_last_flush_id; /* debugging to make sure things are committed in order */ - int j_last_commit_id; + unsigned int j_last_commit_id; struct list_head j_bitmap_nodes; struct list_head j_dirty_buffers; @@ -239,7 +242,7 @@ struct reiserfs_journal { struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ - struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all + struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all the transactions */ struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ int j_persistent_trans; @@ -399,10 +402,7 @@ struct reiserfs_sb_info { int reserved_blocks; /* amount of blocks reserved for further allocations */ spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ struct dentry *priv_root; /* root of /.reiserfs_priv */ -#ifdef CONFIG_REISERFS_FS_XATTR - struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */ - struct rw_semaphore xattr_dir_sem; -#endif + struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */ int j_errno; #ifdef CONFIG_QUOTA char *s_qf_names[MAXQUOTAS]; @@ -426,7 +426,7 @@ enum reiserfs_mount_options { partition will be dealt with in a manner of 3.5.x */ -/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting +/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option ** is not required. If the normal autodection code can't determine which ** hash to use (because both hashes had the same value for a file) @@ -451,9 +451,9 @@ enum reiserfs_mount_options { REISERFS_NO_UNHASHED_RELOCATION, REISERFS_HASHED_RELOCATION, REISERFS_ATTRS, - REISERFS_XATTRS, REISERFS_XATTRS_USER, REISERFS_POSIXACL, + REISERFS_EXPOSE_PRIVROOT, REISERFS_BARRIER_NONE, REISERFS_BARRIER_FLUSH, @@ -489,9 +489,9 @@ enum reiserfs_mount_options { #define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG)) #define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED)) #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) -#define reiserfs_xattrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS)) #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) +#define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT)) #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) #define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE)) #define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH)) diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index af135ae895d..99928dce37e 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -15,6 +15,12 @@ struct reiserfs_xattr_header { __le32 h_hash; /* hash of the value */ }; +struct reiserfs_security_handle { + char *name; + void *value; + size_t length; +}; + #ifdef __KERNEL__ #include <linux/init.h> @@ -29,22 +35,15 @@ struct iattr; struct super_block; struct nameidata; -struct reiserfs_xattr_handler { - char *prefix; - int (*init) (void); - void (*exit) (void); - int (*get) (struct inode * inode, const char *name, void *buffer, - size_t size); - int (*set) (struct inode * inode, const char *name, const void *buffer, - size_t size, int flags); - int (*del) (struct inode * inode, const char *name); - int (*list) (struct inode * inode, const char *name, int namelen, - char *out); - struct list_head handlers; -}; +int reiserfs_xattr_register_handlers(void) __init; +void reiserfs_xattr_unregister_handlers(void); +int reiserfs_xattr_init(struct super_block *sb, int mount_flags); +int reiserfs_lookup_privroot(struct super_block *sb); +int reiserfs_delete_xattrs(struct inode *inode); +int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); +int reiserfs_permission(struct inode *inode, int mask); #ifdef CONFIG_REISERFS_FS_XATTR -#define is_reiserfs_priv_object(inode) IS_PRIVATE(inode) #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); @@ -52,104 +51,94 @@ int reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); int reiserfs_removexattr(struct dentry *dentry, const char *name); -int reiserfs_delete_xattrs(struct inode *inode); -int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); -int reiserfs_xattr_init(struct super_block *sb, int mount_flags); -int reiserfs_permission(struct inode *inode, int mask); -int reiserfs_xattr_del(struct inode *, const char *); -int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t); +int reiserfs_xattr_get(struct inode *, const char *, void *, size_t); int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); - -extern struct reiserfs_xattr_handler user_handler; -extern struct reiserfs_xattr_handler trusted_handler; -extern struct reiserfs_xattr_handler security_handler; - -int reiserfs_xattr_register_handlers(void) __init; -void reiserfs_xattr_unregister_handlers(void); - -static inline void reiserfs_write_lock_xattrs(struct super_block *sb) +int reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *, + struct inode *, const char *, const void *, + size_t, int); + +extern struct xattr_handler reiserfs_xattr_user_handler; +extern struct xattr_handler reiserfs_xattr_trusted_handler; +extern struct xattr_handler reiserfs_xattr_security_handler; +#ifdef CONFIG_REISERFS_FS_SECURITY +int reiserfs_security_init(struct inode *dir, struct inode *inode, + struct reiserfs_security_handle *sec); +int reiserfs_security_write(struct reiserfs_transaction_handle *th, + struct inode *inode, + struct reiserfs_security_handle *sec); +void reiserfs_security_free(struct reiserfs_security_handle *sec); +#endif + +#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) +static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size) { - down_write(&REISERFS_XATTR_DIR_SEM(sb)); -} -static inline void reiserfs_write_unlock_xattrs(struct super_block *sb) -{ - up_write(&REISERFS_XATTR_DIR_SEM(sb)); -} -static inline void reiserfs_read_lock_xattrs(struct super_block *sb) -{ - down_read(&REISERFS_XATTR_DIR_SEM(sb)); + loff_t ret = 0; + if (reiserfs_file_data_log(inode)) { + ret = _ROUND_UP(xattr_size(size), inode->i_sb->s_blocksize); + ret >>= inode->i_sb->s_blocksize_bits; + } + return ret; } -static inline void reiserfs_read_unlock_xattrs(struct super_block *sb) +/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file. + * Let's try to be smart about it. + * xattr root: We cache it. If it's not cached, we may need to create it. + * xattr dir: If anything has been loaded for this inode, we can set a flag + * saying so. + * xattr file: Since we don't cache xattrs, we can't tell. We always include + * blocks for it. + * + * However, since root and dir can be created between calls - YOU MUST SAVE + * THIS VALUE. + */ +static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode) { - up_read(&REISERFS_XATTR_DIR_SEM(sb)); -} + size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); -static inline void reiserfs_write_lock_xattr_i(struct inode *inode) -{ - down_write(&REISERFS_I(inode)->xattr_sem); -} -static inline void reiserfs_write_unlock_xattr_i(struct inode *inode) -{ - up_write(&REISERFS_I(inode)->xattr_sem); -} -static inline void reiserfs_read_lock_xattr_i(struct inode *inode) -{ - down_read(&REISERFS_I(inode)->xattr_sem); -} - -static inline void reiserfs_read_unlock_xattr_i(struct inode *inode) -{ - up_read(&REISERFS_I(inode)->xattr_sem); -} + if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) { + nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); + if (!REISERFS_SB(inode->i_sb)->xattr_root->d_inode) + nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); + } -static inline void reiserfs_mark_inode_private(struct inode *inode) -{ - inode->i_flags |= S_PRIVATE; + return nblocks; } static inline void reiserfs_init_xattr_rwsem(struct inode *inode) { - init_rwsem(&REISERFS_I(inode)->xattr_sem); + init_rwsem(&REISERFS_I(inode)->i_xattr_sem); } #else -#define is_reiserfs_priv_object(inode) 0 -#define reiserfs_mark_inode_private(inode) do {;} while(0) #define reiserfs_getxattr NULL #define reiserfs_setxattr NULL #define reiserfs_listxattr NULL #define reiserfs_removexattr NULL -#define reiserfs_write_lock_xattrs(sb) do {;} while(0) -#define reiserfs_write_unlock_xattrs(sb) do {;} while(0) -#define reiserfs_read_lock_xattrs(sb) -#define reiserfs_read_unlock_xattrs(sb) -#define reiserfs_permission NULL - -#define reiserfs_xattr_register_handlers() 0 -#define reiserfs_xattr_unregister_handlers() - -static inline int reiserfs_delete_xattrs(struct inode *inode) +static inline void reiserfs_init_xattr_rwsem(struct inode *inode) { - return 0; -}; -static inline int reiserfs_chown_xattrs(struct inode *inode, - struct iattr *attrs) +} +#endif /* CONFIG_REISERFS_FS_XATTR */ + +#ifndef CONFIG_REISERFS_FS_SECURITY +static inline int reiserfs_security_init(struct inode *dir, + struct inode *inode, + struct reiserfs_security_handle *sec) { return 0; -}; -static inline int reiserfs_xattr_init(struct super_block *sb, int mount_flags) +} +static inline int +reiserfs_security_write(struct reiserfs_transaction_handle *th, + struct inode *inode, + struct reiserfs_security_handle *sec) { - sb->s_flags = (sb->s_flags & ~MS_POSIXACL); /* to be sure */ return 0; -}; -static inline void reiserfs_init_xattr_rwsem(struct inode *inode) -{ } -#endif /* CONFIG_REISERFS_FS_XATTR */ +static inline void reiserfs_security_free(struct reiserfs_security_handle *sec) +{} +#endif #endif /* __KERNEL__ */ diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 4c5bcf6ca7e..511f42fc681 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -49,6 +49,8 @@ struct res_counter { struct res_counter *parent; }; +#define RESOURCE_MAX (unsigned long long)LLONG_MAX + /** * Helpers to interact with userspace * res_counter_read_u64() - returns the value of the specified member. diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 164332cbb77..3392c59d270 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -4,149 +4,384 @@ /* * Copyright (C) 2006 - 2007 Ivo van Doorn * Copyright (C) 2007 Dmitry Torokhov + * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the - * Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/leds.h> + +/* define userspace visible states */ +#define RFKILL_STATE_SOFT_BLOCKED 0 +#define RFKILL_STATE_UNBLOCKED 1 +#define RFKILL_STATE_HARD_BLOCKED 2 /** * enum rfkill_type - type of rfkill switch. - * RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device. - * RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device. - * RFKILL_TYPE_UWB: switch is on a ultra wideband device. - * RFKILL_TYPE_WIMAX: switch is on a WiMAX device. - * RFKILL_TYPE_WWAN: switch is on a wireless WAN device. + * + * @RFKILL_TYPE_ALL: toggles all switches (userspace only) + * @RFKILL_TYPE_WLAN: switch is on a 802.11 wireless network device. + * @RFKILL_TYPE_BLUETOOTH: switch is on a bluetooth device. + * @RFKILL_TYPE_UWB: switch is on a ultra wideband device. + * @RFKILL_TYPE_WIMAX: switch is on a WiMAX device. + * @RFKILL_TYPE_WWAN: switch is on a wireless WAN device. + * @NUM_RFKILL_TYPES: number of defined rfkill types */ enum rfkill_type { - RFKILL_TYPE_WLAN , + RFKILL_TYPE_ALL = 0, + RFKILL_TYPE_WLAN, RFKILL_TYPE_BLUETOOTH, RFKILL_TYPE_UWB, RFKILL_TYPE_WIMAX, RFKILL_TYPE_WWAN, - RFKILL_TYPE_MAX, + RFKILL_TYPE_GPS, + NUM_RFKILL_TYPES, }; -enum rfkill_state { - RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */ - RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */ - RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */ - RFKILL_STATE_MAX, /* marker for last valid state */ +/** + * enum rfkill_operation - operation types + * @RFKILL_OP_ADD: a device was added + * @RFKILL_OP_DEL: a device was removed + * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device + * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all) + */ +enum rfkill_operation { + RFKILL_OP_ADD = 0, + RFKILL_OP_DEL, + RFKILL_OP_CHANGE, + RFKILL_OP_CHANGE_ALL, }; +/** + * struct rfkill_event - events for userspace on /dev/rfkill + * @idx: index of dev rfkill + * @type: type of the rfkill struct + * @op: operation code + * @hard: hard state (0/1) + * @soft: soft state (0/1) + * + * Structure used for userspace communication on /dev/rfkill, + * used for events from the kernel and control to the kernel. + */ +struct rfkill_event { + __u32 idx; + __u8 type; + __u8 op; + __u8 soft, hard; +} __packed; + /* - * These are DEPRECATED, drivers using them should be verified to - * comply with the rfkill usage guidelines in Documentation/rfkill.txt - * and then converted to use the new names for rfkill_state - */ -#define RFKILL_STATE_OFF RFKILL_STATE_SOFT_BLOCKED -#define RFKILL_STATE_ON RFKILL_STATE_UNBLOCKED - -/** - * struct rfkill - rfkill control structure. - * @name: Name of the switch. - * @type: Radio type which the button controls, the value stored - * here should be a value from enum rfkill_type. - * @state: State of the switch, "UNBLOCKED" means radio can operate. - * @user_claim_unsupported: Whether the hardware supports exclusive - * RF-kill control by userspace. Set this before registering. - * @user_claim: Set when the switch is controlled exlusively by userspace. - * @mutex: Guards switch state transitions. It serializes callbacks - * and also protects the state. - * @data: Pointer to the RF button drivers private data which will be - * passed along when toggling radio state. - * @toggle_radio(): Mandatory handler to control state of the radio. - * only RFKILL_STATE_SOFT_BLOCKED and RFKILL_STATE_UNBLOCKED are - * valid parameters. - * @get_state(): handler to read current radio state from hardware, - * may be called from atomic context, should return 0 on success. - * Either this handler OR judicious use of rfkill_force_state() is - * MANDATORY for any driver capable of RFKILL_STATE_HARD_BLOCKED. - * @led_trigger: A LED trigger for this button's LED. - * @dev: Device structure integrating the switch into device tree. - * @node: Used to place switch into list of all switches known to the - * the system. - * - * This structure represents a RF switch located on a network device. - */ -struct rfkill { - const char *name; - enum rfkill_type type; - - bool user_claim_unsupported; - bool user_claim; - - /* the mutex serializes callbacks and also protects - * the state */ - struct mutex mutex; - enum rfkill_state state; - void *data; - int (*toggle_radio)(void *data, enum rfkill_state state); - int (*get_state)(void *data, enum rfkill_state *state); + * We are planning to be backward and forward compatible with changes + * to the event struct, by adding new, optional, members at the end. + * When reading an event (whether the kernel from userspace or vice + * versa) we need to accept anything that's at least as large as the + * version 1 event size, but might be able to accept other sizes in + * the future. + * + * One exception is the kernel -- we already have two event sizes in + * that we've made the 'hard' member optional since our only option + * is to ignore it anyway. + */ +#define RFKILL_EVENT_SIZE_V1 8 -#ifdef CONFIG_RFKILL_LEDS - struct led_trigger led_trigger; -#endif +/* ioctl for turning off rfkill-input (if present) */ +#define RFKILL_IOC_MAGIC 'R' +#define RFKILL_IOC_NOINPUT 1 +#define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT) - struct device dev; - struct list_head node; - enum rfkill_state state_for_resume; +/* and that's all userspace gets */ +#ifdef __KERNEL__ +/* don't allow anyone to use these in the kernel */ +enum rfkill_user_states { + RFKILL_USER_STATE_SOFT_BLOCKED = RFKILL_STATE_SOFT_BLOCKED, + RFKILL_USER_STATE_UNBLOCKED = RFKILL_STATE_UNBLOCKED, + RFKILL_USER_STATE_HARD_BLOCKED = RFKILL_STATE_HARD_BLOCKED, }; -#define to_rfkill(d) container_of(d, struct rfkill, dev) +#undef RFKILL_STATE_SOFT_BLOCKED +#undef RFKILL_STATE_UNBLOCKED +#undef RFKILL_STATE_HARD_BLOCKED + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/leds.h> +#include <linux/err.h> + +/* this is opaque */ +struct rfkill; + +/** + * struct rfkill_ops - rfkill driver methods + * + * @poll: poll the rfkill block state(s) -- only assign this method + * when you need polling. When called, simply call one of the + * rfkill_set{,_hw,_sw}_state family of functions. If the hw + * is getting unblocked you need to take into account the return + * value of those functions to make sure the software block is + * properly used. + * @query: query the rfkill block state(s) and call exactly one of the + * rfkill_set{,_hw,_sw}_state family of functions. Assign this + * method if input events can cause hardware state changes to make + * the rfkill core query your driver before setting a requested + * block. + * @set_block: turn the transmitter on (blocked == false) or off + * (blocked == true) -- ignore and return 0 when hard blocked. + * This callback must be assigned. + */ +struct rfkill_ops { + void (*poll)(struct rfkill *rfkill, void *data); + void (*query)(struct rfkill *rfkill, void *data); + int (*set_block)(void *data, bool blocked); +}; + +#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) +/** + * rfkill_alloc - allocate rfkill structure + * @name: name of the struct -- the string is not copied internally + * @parent: device that has rf switch on it + * @type: type of the switch (RFKILL_TYPE_*) + * @ops: rfkill methods + * @ops_data: data passed to each method + * + * This function should be called by the transmitter driver to allocate an + * rfkill structure. Returns %NULL on failure. + */ +struct rfkill * __must_check rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data); -struct rfkill * __must_check rfkill_allocate(struct device *parent, - enum rfkill_type type); -void rfkill_free(struct rfkill *rfkill); +/** + * rfkill_register - Register a rfkill structure. + * @rfkill: rfkill structure to be registered + * + * This function should be called by the transmitter driver to register + * the rfkill structure. Before calling this function the driver needs + * to be ready to service method calls from rfkill. + * + * If rfkill_init_sw_state() is not called before registration, + * set_block() will be called to initialize the software blocked state + * to a default value. + * + * If the hardware blocked state is not set before registration, + * it is assumed to be unblocked. + */ int __must_check rfkill_register(struct rfkill *rfkill); + +/** + * rfkill_pause_polling(struct rfkill *rfkill) + * + * Pause polling -- say transmitter is off for other reasons. + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway + */ +void rfkill_pause_polling(struct rfkill *rfkill); + +/** + * rfkill_resume_polling(struct rfkill *rfkill) + * + * Pause polling -- say transmitter is off for other reasons. + * NOTE: not necessary for suspend/resume -- in that case the + * core stops polling anyway + */ +void rfkill_resume_polling(struct rfkill *rfkill); + + +/** + * rfkill_unregister - Unregister a rfkill structure. + * @rfkill: rfkill structure to be unregistered + * + * This function should be called by the network driver during device + * teardown to destroy rfkill structure. Until it returns, the driver + * needs to be able to service method calls. + */ void rfkill_unregister(struct rfkill *rfkill); -int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state); -int rfkill_set_default(enum rfkill_type type, enum rfkill_state state); +/** + * rfkill_destroy - free rfkill structure + * @rfkill: rfkill structure to be destroyed + * + * Destroys the rfkill structure. + */ +void rfkill_destroy(struct rfkill *rfkill); /** - * rfkill_state_complement - return complementar state - * @state: state to return the complement of + * rfkill_set_hw_state - Set the internal rfkill hardware block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current hardware block state to set * - * Returns RFKILL_STATE_SOFT_BLOCKED if @state is RFKILL_STATE_UNBLOCKED, - * returns RFKILL_STATE_UNBLOCKED otherwise. + * rfkill drivers that get events when the hard-blocked state changes + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. They should also use this after + * resume if the state could have changed. + * + * You need not (but may) call this function if poll_state is assigned. + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked) so that drivers need not keep track of the soft + * block state -- which they might not be able to. */ -static inline enum rfkill_state rfkill_state_complement(enum rfkill_state state) +bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_sw_state - Set the internal rfkill software block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current software block state to set + * + * rfkill drivers that get events when the soft-blocked state changes + * (yes, some platforms directly act on input but allow changing again) + * use this function to notify the rfkill core (and through that also + * userspace) of the current state. + * + * Drivers should also call this function after resume if the state has + * been changed by the user. This only makes sense for "persistent" + * devices (see rfkill_init_sw_state()). + * + * This function can be called in any context, even from within rfkill + * callbacks. + * + * The function returns the combined block state (true if transmitter + * should be blocked). + */ +bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_init_sw_state - Initialize persistent software block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current software block state to set + * + * rfkill drivers that preserve their software block state over power off + * use this function to notify the rfkill core (and through that also + * userspace) of their initial state. It should only be used before + * registration. + * + * In addition, it marks the device as "persistent", an attribute which + * can be read by userspace. Persistent devices are expected to preserve + * their own state when suspended. + */ +void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); + +/** + * rfkill_set_states - Set the internal rfkill block states + * @rfkill: pointer to the rfkill class to modify. + * @sw: the current software block state to set + * @hw: the current hardware block state to set + * + * This function can be called in any context, even from within rfkill + * callbacks. + */ +void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); + +/** + * rfkill_blocked - query rfkill block + * + * @rfkill: rfkill struct to query + */ +bool rfkill_blocked(struct rfkill *rfkill); +#else /* !RFKILL */ +static inline struct rfkill * __must_check +rfkill_alloc(const char *name, + struct device *parent, + const enum rfkill_type type, + const struct rfkill_ops *ops, + void *ops_data) +{ + return ERR_PTR(-ENODEV); +} + +static inline int __must_check rfkill_register(struct rfkill *rfkill) +{ + if (rfkill == ERR_PTR(-ENODEV)) + return 0; + return -EINVAL; +} + +static inline void rfkill_pause_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_resume_polling(struct rfkill *rfkill) +{ +} + +static inline void rfkill_unregister(struct rfkill *rfkill) +{ +} + +static inline void rfkill_destroy(struct rfkill *rfkill) { - return (state == RFKILL_STATE_UNBLOCKED) ? - RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED; } +static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) +{ + return blocked; +} + +static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) +{ +} + +static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) +{ +} + +static inline bool rfkill_blocked(struct rfkill *rfkill) +{ + return false; +} +#endif /* RFKILL || RFKILL_MODULE */ + + +#ifdef CONFIG_RFKILL_LEDS /** - * rfkill_get_led_name - Get the LED trigger name for the button's LED. + * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED. * This function might return a NULL pointer if registering of the - * LED trigger failed. - * Use this as "default_trigger" for the LED. + * LED trigger failed. Use this as "default_trigger" for the LED. */ -static inline char *rfkill_get_led_name(struct rfkill *rfkill) -{ -#ifdef CONFIG_RFKILL_LEDS - return (char *)(rfkill->led_trigger.name); +const char *rfkill_get_led_trigger_name(struct rfkill *rfkill); + +/** + * rfkill_set_led_trigger_name -- set the LED trigger name + * @rfkill: rfkill struct + * @name: LED trigger name + * + * This function sets the LED trigger name of the radio LED + * trigger that rfkill creates. It is optional, but if called + * must be called before rfkill_register() to be effective. + */ +void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name); #else +static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) +{ return NULL; -#endif } +static inline void +rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) +{ +} +#endif + +#endif /* __KERNEL__ */ + #endif /* RFKILL_H */ diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b3b35966008..5fcc31ed577 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -1,6 +1,7 @@ #ifndef _LINUX_RING_BUFFER_H #define _LINUX_RING_BUFFER_H +#include <linux/kmemcheck.h> #include <linux/mm.h> #include <linux/seq_file.h> @@ -8,20 +9,27 @@ struct ring_buffer; struct ring_buffer_iter; /* - * Don't reference this struct directly, use functions below. + * Don't refer to this struct directly, use functions below. */ struct ring_buffer_event { - u32 type:2, len:3, time_delta:27; + kmemcheck_bitfield_begin(bitfield); + u32 type_len:5, time_delta:27; + kmemcheck_bitfield_end(bitfield); + u32 array[]; }; /** * enum ring_buffer_type - internal ring buffer types * - * @RINGBUF_TYPE_PADDING: Left over page padding - * array is ignored - * size is variable depending on how much + * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event + * If time_delta is 0: + * array is ignored + * size is variable depending on how much * padding is needed + * If time_delta is non zero: + * array[0] holds the actual length + * size = 4 + length (bytes) * * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta * array[0] = time delta (28 .. 59) @@ -32,22 +40,23 @@ struct ring_buffer_event { * array[1..2] = tv_sec * size = 16 bytes * - * @RINGBUF_TYPE_DATA: Data record - * If len is zero: + * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: + * Data record + * If type_len is zero: * array[0] holds the actual length * array[1..(length+3)/4] holds data - * size = 4 + 4 + length (bytes) + * size = 4 + length (bytes) * else - * length = len << 2 + * length = type_len << 2 * array[0..(length+3)/4-1] holds data * size = 4 + length (bytes) */ enum ring_buffer_type { + RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, RINGBUF_TYPE_PADDING, RINGBUF_TYPE_TIME_EXTEND, /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ RINGBUF_TYPE_TIME_STAMP, - RINGBUF_TYPE_DATA, }; unsigned ring_buffer_event_length(struct ring_buffer_event *event); @@ -66,21 +75,47 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) } /* + * ring_buffer_discard_commit will remove an event that has not + * ben committed yet. If this is used, then ring_buffer_unlock_commit + * must not be called on the discarded event. This function + * will try to remove the event from the ring buffer completely + * if another event has not been written after it. + * + * Example use: + * + * if (some_condition) + * ring_buffer_discard_commit(buffer, event); + * else + * ring_buffer_unlock_commit(buffer, event); + */ +void ring_buffer_discard_commit(struct ring_buffer *buffer, + struct ring_buffer_event *event); + +/* * size is in bytes for each per CPU buffer. */ struct ring_buffer * -ring_buffer_alloc(unsigned long size, unsigned flags); +__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); + +/* + * Because the ring buffer is generic, if other users of the ring buffer get + * traced by ftrace, it can produce lockdep warnings. We need to keep each + * ring buffer's lock class separate. + */ +#define ring_buffer_alloc(size, flags) \ +({ \ + static struct lock_class_key __key; \ + __ring_buffer_alloc((size), (flags), &__key); \ +}) + void ring_buffer_free(struct ring_buffer *buffer); int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); -struct ring_buffer_event * -ring_buffer_lock_reserve(struct ring_buffer *buffer, - unsigned long length, - unsigned long *flags); +struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, + unsigned long length); int ring_buffer_unlock_commit(struct ring_buffer *buffer, - struct ring_buffer_event *event, - unsigned long flags); + struct ring_buffer_event *event); int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data); @@ -105,8 +140,17 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer); void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_reset(struct ring_buffer *buffer); +#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, struct ring_buffer *buffer_b, int cpu); +#else +static inline int +ring_buffer_swap_cpu(struct ring_buffer *buffer_a, + struct ring_buffer *buffer_b, int cpu) +{ + return -ENODEV; +} +#endif int ring_buffer_empty(struct ring_buffer *buffer); int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); @@ -120,18 +164,26 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct ring_buffer *buffer); unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); +unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); + +u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); +void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, + int cpu, u64 *ts); +void ring_buffer_set_clock(struct ring_buffer *buffer, + u64 (*clock)(void)); -u64 ring_buffer_time_stamp(int cpu); -void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); +size_t ring_buffer_page_len(void *page); -void tracing_on(void); -void tracing_off(void); -void tracing_off_permanent(void); void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); -int ring_buffer_read_page(struct ring_buffer *buffer, - void **data_page, int cpu, int full); +int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, + size_t len, int cpu, int full); + +struct trace_seq; + +int ring_buffer_print_entry_header(struct trace_seq *s); +int ring_buffer_print_page_header(struct trace_seq *s); enum ring_buffer_flags { RB_FL_OVERWRITE = 1 << 0, diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b35bc0e19cd..bf116d0dbf2 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -83,7 +83,8 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, /* * Called from mm/vmscan.c to handle paging out */ -int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt); +int page_referenced(struct page *, int is_locked, + struct mem_cgroup *cnt, unsigned long *vm_flags); int try_to_unmap(struct page *, int ignore_refs); /* @@ -105,18 +106,11 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); */ int page_mkclean(struct page *); -#ifdef CONFIG_UNEVICTABLE_LRU /* * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ int try_to_munlock(struct page *); -#else -static inline int try_to_munlock(struct page *page) -{ - return 0; /* a.k.a. SWAP_SUCCESS */ -} -#endif #else /* !CONFIG_MMU */ @@ -124,7 +118,14 @@ static inline int try_to_munlock(struct page *page) #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0) -#define page_referenced(page,l,cnt) TestClearPageReferenced(page) +static inline int page_referenced(struct page *page, int is_locked, + struct mem_cgroup *cnt, + unsigned long *vm_flags) +{ + *vm_flags = 0; + return TestClearPageReferenced(page); +} + #define try_to_unmap(page, refs) SWAP_FAIL static inline int page_mkclean(struct page *page) diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h index e20bbf9eb36..c490fbc43fe 100644 --- a/include/linux/romfs_fs.h +++ b/include/linux/romfs_fs.h @@ -53,9 +53,4 @@ struct romfs_inode { #define ROMFH_PAD (ROMFH_SIZE-1) #define ROMFH_MASK (~ROMFH_PAD) -#ifdef __KERNEL__ - -/* Not much now */ - -#endif /* __KERNEL__ */ #endif diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h new file mode 100644 index 00000000000..215278b8df2 --- /dev/null +++ b/include/linux/rotary_encoder.h @@ -0,0 +1,15 @@ +#ifndef __ROTARY_ENCODER_H__ +#define __ROTARY_ENCODER_H__ + +struct rotary_encoder_platform_data { + unsigned int steps; + unsigned int axis; + unsigned int gpio_a; + unsigned int gpio_b; + unsigned int inverted_a; + unsigned int inverted_b; + bool relative_axis; + bool rollover; +}; + +#endif /* __ROTARY_ENCODER_H__ */ diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h index bf74e63c98f..8ba646e610d 100644 --- a/include/linux/rtc-v3020.h +++ b/include/linux/rtc-v3020.h @@ -14,6 +14,12 @@ * is used depends on the board. */ struct v3020_platform_data { int leftshift; /* (1<<(leftshift)) & readl() */ + + int use_gpio:1; + unsigned int gpio_cs; + unsigned int gpio_wr; + unsigned int gpio_rd; + unsigned int gpio_io; }; #define V3020_STATUS_0 0x00 diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 4046b75563c..60f88a7fb13 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -99,6 +99,7 @@ struct rtc_pll_info { #ifdef __KERNEL__ +#include <linux/types.h> #include <linux/interrupt.h> extern int rtc_month_days(unsigned int month, unsigned int year); @@ -232,6 +233,11 @@ int rtc_register(rtc_task_t *task); int rtc_unregister(rtc_task_t *task); int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); +static inline bool is_leap_year(unsigned int year) +{ + return (!(year % 4) && (year % 100)) || !(year % 400); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_RTC_H_ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index e88f7058b3a..adf2068d12b 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -1,6 +1,7 @@ #ifndef __LINUX_RTNETLINK_H #define __LINUX_RTNETLINK_H +#include <linux/types.h> #include <linux/netlink.h> #include <linux/if_link.h> #include <linux/if_addr.h> @@ -103,7 +104,7 @@ enum { RTM_NEWADDRLABEL = 72, #define RTM_NEWADDRLABEL RTM_NEWADDRLABEL RTM_DELADDRLABEL, -#define RTM_NEWADDRLABEL RTM_NEWADDRLABEL +#define RTM_DELADDRLABEL RTM_DELADDRLABEL RTM_GETADDRLABEL, #define RTM_GETADDRLABEL RTM_GETADDRLABEL @@ -216,6 +217,7 @@ enum #define RTPROT_DNROUTED 13 /* DECnet routing daemon */ #define RTPROT_XORP 14 /* XORP */ #define RTPROT_NTK 15 /* Netsukuku */ +#define RTPROT_DHCP 16 /* DHCP client */ /* rtm_scope @@ -621,8 +623,8 @@ static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str) extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid); -extern int rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group, - struct nlmsghdr *nlh, gfp_t flags); +extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, + u32 group, struct nlmsghdr *nlh, gfp_t flags); extern void rtnl_set_sk_err(struct net *net, u32 group, int error); extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics); extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h index f7b826b565c..a53915cd558 100644 --- a/include/linux/rxrpc.h +++ b/include/linux/rxrpc.h @@ -58,5 +58,12 @@ struct sockaddr_rxrpc { #define RXRPC_SECURITY_AUTH 1 /* authenticated packets */ #define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */ +/* + * RxRPC security indices + */ +#define RXRPC_SECURITY_NONE 0 /* no security protocol */ +#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */ +#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */ +#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */ #endif /* _LINUX_RXRPC_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index e5996984ddd..9aaf5bfdad1 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -242,6 +242,8 @@ size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, */ #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ +#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ +#define SG_MITER_FROM_SG (1 << 2) /* nop */ struct sg_mapping_iter { /* the following three fields can be accessed directly */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 4cae9b81a1f..f3d74bd04d1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -38,6 +38,8 @@ #define SCHED_BATCH 3 /* SCHED_ISO: reserved but not implemented yet */ #define SCHED_IDLE 5 +/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ +#define SCHED_RESET_ON_FORK 0x40000000 #ifdef __KERNEL__ @@ -68,7 +70,7 @@ struct sched_param { #include <linux/smp.h> #include <linux/sem.h> #include <linux/signal.h> -#include <linux/fs_struct.h> +#include <linux/path.h> #include <linux/compiler.h> #include <linux/completion.h> #include <linux/pid.h> @@ -77,6 +79,7 @@ struct sched_param { #include <linux/proportions.h> #include <linux/seccomp.h> #include <linux/rcupdate.h> +#include <linux/rculist.h> #include <linux/rtmutex.h> #include <linux/time.h> @@ -91,12 +94,13 @@ struct sched_param { #include <asm/processor.h> -struct mem_cgroup; struct exec_domain; struct futex_pi_state; struct robust_list_head; struct bio; -struct bts_tracer; +struct fs_struct; +struct bts_context; +struct perf_counter_context; /* * List of flags we want to share for kernel threads, @@ -115,6 +119,7 @@ struct bts_tracer; * 11 bit fractions. */ extern unsigned long avenrun[]; /* Load averages */ +extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ @@ -134,8 +139,11 @@ DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); -extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); +extern void calc_global_load(void); +extern u64 cpu_nr_migrations(int cpu); + +extern unsigned long get_parent_ip(unsigned long addr); struct seq_file; struct cfs_rq; @@ -202,7 +210,8 @@ extern unsigned long long time_sync_thresh; #define task_is_stopped_or_traced(task) \ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) #define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0) + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FREEZING) == 0) #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) @@ -253,6 +262,7 @@ extern void task_rq_unlock_wait(struct task_struct *p); extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); +extern int get_nohz_load_balancer(void); #else static inline int select_nohz_load_balancer(int cpu) { @@ -293,18 +303,15 @@ extern void sched_show_task(struct task_struct *p); extern void softlockup_tick(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); +extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, + size_t *lenp, loff_t *ppos); extern unsigned int softlockup_panic; -extern unsigned long sysctl_hung_task_check_count; -extern unsigned long sysctl_hung_task_timeout_secs; -extern unsigned long sysctl_hung_task_warnings; extern int softlockup_thresh; #else static inline void softlockup_tick(void) { } -static inline void spawn_softlockup_task(void) -{ -} static inline void touch_softlockup_watchdog(void) { } @@ -313,6 +320,15 @@ static inline void touch_all_softlockup_watchdogs(void) } #endif +#ifdef CONFIG_DETECT_HUNG_TASK +extern unsigned int sysctl_hung_task_panic; +extern unsigned long sysctl_hung_task_check_count; +extern unsigned long sysctl_hung_task_timeout_secs; +extern unsigned long sysctl_hung_task_warnings; +extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + struct file *filp, void __user *buffer, + size_t *lenp, loff_t *ppos); +#endif /* Attach to any functions which should be ignored in wchan output. */ #define __sched __attribute__((__section__(".sched.text"))) @@ -328,13 +344,27 @@ extern signed long schedule_timeout(signed long timeout); extern signed long schedule_timeout_interruptible(signed long timeout); extern signed long schedule_timeout_killable(signed long timeout); extern signed long schedule_timeout_uninterruptible(signed long timeout); +asmlinkage void __schedule(void); asmlinkage void schedule(void); +extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); struct nsproxy; struct user_namespace; -/* Maximum number of active map areas.. This is a random (large) number */ -#define DEFAULT_MAX_MAP_COUNT 65536 +/* + * Default maximum number of active map areas, this limits the number of vmas + * per mm struct. Users can overwrite this number by sysctl but there is a + * problem. + * + * When a program's coredump is generated as ELF format, a section is created + * per a vma. In ELF, the number of sections is represented in unsigned short. + * This means the number of sections should be smaller than 65535 at coredump. + * Because the kernel adds some informative sections to a image of program at + * generating coredump, we need some margin. The number of extra sections is + * 1-3 now and depends on arch. We use "5" as safe margin, here. + */ +#define MAPCOUNT_ELF_CORE_MARGIN (5) +#define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) extern int sysctl_max_map_count; @@ -386,8 +416,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); (mm)->hiwater_vm = (mm)->total_vm; \ } while (0) -#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) -#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) +static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) +{ + return max(mm->hiwater_rss, get_mm_rss(mm)); +} + +static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) +{ + return max(mm->hiwater_vm, mm->total_vm); +} extern void set_dumpable(struct mm_struct *mm, int value); extern int get_dumpable(struct mm_struct *mm); @@ -456,16 +493,36 @@ struct task_cputime { #define virt_exp utime #define sched_exp sum_exec_runtime +#define INIT_CPUTIME \ + (struct task_cputime) { \ + .utime = cputime_zero, \ + .stime = cputime_zero, \ + .sum_exec_runtime = 0, \ + } + +/* + * Disable preemption until the scheduler is running. + * Reset by start_kernel()->sched_init()->init_idle(). + * + * We include PREEMPT_ACTIVE to avoid cond_resched() from working + * before the scheduler is active -- see should_resched(). + */ +#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) + /** - * struct thread_group_cputime - thread group interval timer counts - * @totals: thread group interval timers; substructure for - * uniprocessor kernel, per-cpu for SMP kernel. + * struct thread_group_cputimer - thread group interval timer counts + * @cputime: thread group interval timers. + * @running: non-zero when there are timers running and + * @cputime receives updates. + * @lock: lock for fields in this struct. * * This structure contains the version of task_cputime, above, that is - * used for thread group CPU clock calculations. + * used for thread group CPU timer calculations. */ -struct thread_group_cputime { - struct task_cputime *totals; +struct thread_group_cputimer { + struct task_cputime cputime; + int running; + spinlock_t lock; }; /* @@ -514,35 +571,18 @@ struct signal_struct { cputime_t it_prof_incr, it_virt_incr; /* - * Thread group totals for process CPU clocks. - * See thread_group_cputime(), et al, for details. + * Thread group totals for process CPU timers. + * See thread_group_cputimer(), et al, for details. */ - struct thread_group_cputime cputime; + struct thread_group_cputimer cputimer; /* Earliest-expiration cache. */ struct task_cputime cputime_expires; struct list_head cpu_timers[3]; - /* job control IDs */ - - /* - * pgrp and session fields are deprecated. - * use the task_session_Xnr and task_pgrp_Xnr routines below - */ - - union { - pid_t pgrp __deprecated; - pid_t __pgrp; - }; - struct pid *tty_old_pgrp; - union { - pid_t session __deprecated; - pid_t __session; - }; - /* boolean value for session group leader */ int leader; @@ -554,7 +594,7 @@ struct signal_struct { * Live threads maintain their own counters and add to these * in __exit_signal, except for the group leader. */ - cputime_t cutime, cstime; + cputime_t utime, stime, cutime, cstime; cputime_t gtime; cputime_t cgtime; unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; @@ -563,6 +603,14 @@ struct signal_struct { struct task_io_accounting ioac; /* + * Cumulative ns of schedule CPU time fo dead threads in the + * group, not including a zombie group leader, (This only differs + * from jiffies_to_ns(utime + stime) if sched_clock uses something + * other than jiffies.) + */ + unsigned long long sum_sched_runtime; + + /* * We don't bother to synchronize most readers of this at all, * because there is no reader checking a limit that actually needs * to get both rlim_cur and rlim_max atomically, and either one @@ -626,7 +674,6 @@ struct user_struct { atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ #endif #ifdef CONFIG_EPOLL - atomic_t epoll_devs; /* The number of epoll descriptors currently open */ atomic_t epoll_watches; /* The number of file descriptors currently watched */ #endif #ifdef CONFIG_POSIX_MQUEUE @@ -649,9 +696,13 @@ struct user_struct { struct task_group *tg; #ifdef CONFIG_SYSFS struct kobject kobj; - struct work_struct work; + struct delayed_work work; #endif #endif + +#ifdef CONFIG_PERF_COUNTERS + atomic_long_t locked_vm; +#endif }; extern int uids_sysfs_init(void); @@ -747,18 +798,19 @@ enum cpu_idle_type { #define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE #ifdef CONFIG_SMP -#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ -#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ -#define SD_BALANCE_EXEC 4 /* Balance on exec */ -#define SD_BALANCE_FORK 8 /* Balance on fork, clone */ -#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ -#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ -#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ -#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ -#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ -#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ -#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ -#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ +#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ +#define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */ +#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ +#define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */ +#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ +#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ +#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ +#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ +#define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */ +#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ enum powersavings_balance_level { POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ @@ -778,7 +830,7 @@ static inline int sd_balance_for_mc_power(void) if (sched_smt_power_savings) return SD_POWERSAVINGS_BALANCE; - return 0; + return SD_PREFER_SIBLING; } static inline int sd_balance_for_package_power(void) @@ -786,7 +838,7 @@ static inline int sd_balance_for_package_power(void) if (sched_mc_power_savings | sched_smt_power_savings) return SD_POWERSAVINGS_BALANCE; - return 0; + return SD_PREFER_SIBLING; } /* @@ -808,17 +860,21 @@ struct sched_group { /* * CPU power of this group, SCHED_LOAD_SCALE being max power for a - * single CPU. This is read only (except for setup, hotplug CPU). - * Note : Never change cpu_power without recompute its reciprocal + * single CPU. */ - unsigned int __cpu_power; + unsigned int cpu_power; + /* - * reciprocal value of cpu_power to avoid expensive divides - * (see include/linux/reciprocal_div.h) + * The CPUs this group covers. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + * + * It is also be embedded into static data structures at build + * time. (See 'struct static_sched_group' in kernel/sched.c) */ - u32 reciprocal_cpu_power; - - unsigned long cpumask[]; + unsigned long cpumask[0]; }; static inline struct cpumask *sched_group_cpus(struct sched_group *sg) @@ -859,6 +915,7 @@ struct sched_domain { unsigned int newidle_idx; unsigned int wake_idx; unsigned int forkexec_idx; + unsigned int smt_gain; int flags; /* See SD_* */ enum sched_domain_level level; @@ -904,8 +961,17 @@ struct sched_domain { char *name; #endif - /* span of all CPUs in this domain */ - unsigned long span[]; + /* + * Span of all CPUs in this domain. + * + * NOTE: this field is variable length. (Allocated dynamically + * by attaching extra space to the end of the structure, + * depending on how many CPUs the kernel has booted up with) + * + * It is also be embedded into static data structures at build + * time. (See 'struct static_sched_domain' in kernel/sched.c) + */ + unsigned long span[0]; }; static inline struct cpumask *sched_domain_span(struct sched_domain *sd) @@ -1031,11 +1097,18 @@ struct sched_entity { u64 last_wakeup; u64 avg_overlap; + u64 nr_migrations; + + u64 start_runtime; + u64 avg_wakeup; + #ifdef CONFIG_SCHEDSTATS u64 wait_start; u64 wait_max; u64 wait_count; u64 wait_sum; + u64 iowait_count; + u64 iowait_sum; u64 sleep_start; u64 sleep_max; @@ -1046,7 +1119,6 @@ struct sched_entity { u64 exec_max; u64 slice_max; - u64 nr_migrations; u64 nr_migrations_cold; u64 nr_failed_migrations_affine; u64 nr_failed_migrations_running; @@ -1090,6 +1162,8 @@ struct sched_rt_entity { #endif }; +struct rcu_node; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; @@ -1133,16 +1207,19 @@ struct task_struct { unsigned int policy; cpumask_t cpus_allowed; -#ifdef CONFIG_PREEMPT_RCU +#ifdef CONFIG_TREE_PREEMPT_RCU int rcu_read_lock_nesting; - int rcu_flipctr_idx; -#endif /* #ifdef CONFIG_PREEMPT_RCU */ + char rcu_read_unlock_special; + struct rcu_node *rcu_blocked_node; + struct list_head rcu_node_entry; +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; #endif struct list_head tasks; + struct plist_node pushable_tasks; struct mm_struct *mm, *active_mm; @@ -1154,6 +1231,14 @@ struct task_struct { /* ??? */ unsigned int personality; unsigned did_exec:1; + unsigned in_execve:1; /* Tell the LSMs that the process is doing an + * execve */ + unsigned in_iowait:1; + + + /* Revert to default priority/policy when forking */ + unsigned sched_reset_on_fork:1; + pid_t pid; pid_t tgid; @@ -1161,6 +1246,7 @@ struct task_struct { /* Canary value for the -fstack-protector gcc feature */ unsigned long stack_canary; #endif + /* * pointers to (original) parent process, youngest child, younger sibling, * older sibling, respectively. (p->father can be replaced with @@ -1183,18 +1269,11 @@ struct task_struct { struct list_head ptraced; struct list_head ptrace_entry; -#ifdef CONFIG_X86_PTRACE_BTS /* * This is the tracer handle for the ptrace BTS extension. * This field actually belongs to the ptracer task. */ - struct bts_tracer *bts; - /* - * The buffer to hold the BTS data. - */ - void *bts_buffer; - size_t bts_size; -#endif /* CONFIG_X86_PTRACE_BTS */ + struct bts_context *bts; /* PID/PID hash table linkage. */ struct pid_link pids[PIDTYPE_MAX]; @@ -1221,7 +1300,10 @@ struct task_struct { * credentials (COW) */ const struct cred *cred; /* effective (overridable) subjective task * credentials (COW) */ - struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ + struct mutex cred_guard_mutex; /* guard against foreign influences on + * credential calculations + * (notably. ptrace) */ + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock @@ -1233,9 +1315,8 @@ struct task_struct { /* ipc stuff */ struct sysv_sem sysvsem; #endif -#ifdef CONFIG_DETECT_SOFTLOCKUP +#ifdef CONFIG_DETECT_HUNG_TASK /* hung task detection */ - unsigned long last_switch_timestamp; unsigned long last_switch_count; #endif /* CPU-specific state of this task */ @@ -1269,9 +1350,15 @@ struct task_struct { /* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; -/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ +/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, + * mempolicy */ spinlock_t alloc_lock; +#ifdef CONFIG_GENERIC_HARDIRQS + /* IRQ handler threads */ + struct irqaction *irqaction; +#endif + /* Protection of the PI data structures: */ spinlock_t pi_lock; @@ -1307,6 +1394,7 @@ struct task_struct { int lockdep_depth; unsigned int lockdep_recursion; struct held_lock held_locks[MAX_LOCK_DEPTH]; + gfp_t lockdep_reclaim_gfp; #endif /* journalling filesystem info */ @@ -1331,8 +1419,7 @@ struct task_struct { cputime_t acct_timexpd; /* stime + utime since last update */ #endif #ifdef CONFIG_CPUSETS - nodemask_t mems_allowed; - int cpuset_mems_generation; + nodemask_t mems_allowed; /* Protected by alloc_lock */ int cpuset_mem_spread_rotor; #endif #ifdef CONFIG_CGROUPS @@ -1349,8 +1436,13 @@ struct task_struct { struct list_head pi_state_list; struct futex_pi_state *pi_state_cache; #endif +#ifdef CONFIG_PERF_COUNTERS + struct perf_counter_context *perf_counter_ctxp; + struct mutex perf_counter_mutex; + struct list_head perf_counter_list; +#endif #ifdef CONFIG_NUMA - struct mempolicy *mempolicy; + struct mempolicy *mempolicy; /* Protected by alloc_lock */ short il_next; #endif atomic_t fs_excl; /* holding fs exclusive resources */ @@ -1384,6 +1476,8 @@ struct task_struct { int curr_ret_stack; /* Stack of return addresses for return function tracing */ struct ftrace_ret_stack *ret_stack; + /* time stamp for last schedule */ + unsigned long long ftrace_timestamp; /* * Number of functions that haven't been traced * because of depth overrun. @@ -1395,9 +1489,14 @@ struct task_struct { #ifdef CONFIG_TRACING /* state flags for use by tracers */ unsigned long trace; -#endif + /* bitmask of trace recursion */ + unsigned long trace_recursion; +#endif /* CONFIG_TRACING */ }; +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) + /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH @@ -1429,16 +1528,6 @@ static inline int rt_task(struct task_struct *p) return rt_prio(p->prio); } -static inline void set_task_session(struct task_struct *tsk, pid_t session) -{ - tsk->signal->__session = session; -} - -static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) -{ - tsk->signal->__pgrp = pgrp; -} - static inline struct pid *task_pid(struct task_struct *task) { return task->pids[PIDTYPE_PID].pid; @@ -1449,6 +1538,11 @@ static inline struct pid *task_tgid(struct task_struct *task) return task->group_leader->pids[PIDTYPE_PID].pid; } +/* + * Without tasklist or rcu lock it is not safe to dereference + * the result of task_pgrp/task_session even if task == current, + * we can race with another thread doing sys_setsid/sys_setpgid. + */ static inline struct pid *task_pgrp(struct task_struct *task) { return task->group_leader->pids[PIDTYPE_PGID].pid; @@ -1474,17 +1568,23 @@ struct pid_namespace; * * see also pid_nr() etc in include/linux/pid.h */ +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, + struct pid_namespace *ns); static inline pid_t task_pid_nr(struct task_struct *tsk) { return tsk->pid; } -pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); +static inline pid_t task_pid_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); +} static inline pid_t task_pid_vnr(struct task_struct *tsk) { - return pid_vnr(task_pid(tsk)); + return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); } @@ -1501,31 +1601,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) } -static inline pid_t task_pgrp_nr(struct task_struct *tsk) +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) { - return tsk->signal->__pgrp; + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); } -pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); - static inline pid_t task_pgrp_vnr(struct task_struct *tsk) { - return pid_vnr(task_pgrp(tsk)); + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); } -static inline pid_t task_session_nr(struct task_struct *tsk) +static inline pid_t task_session_nr_ns(struct task_struct *tsk, + struct pid_namespace *ns) { - return tsk->signal->__session; + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); } -pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); - static inline pid_t task_session_vnr(struct task_struct *tsk) { - return pid_vnr(task_session(tsk)); + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); } +/* obsolete, do not use */ +static inline pid_t task_pgrp_nr(struct task_struct *tsk) +{ + return task_pgrp_nr_ns(tsk, &init_pid_ns); +} /** * pid_alive - check that a task structure is not stale @@ -1590,6 +1693,7 @@ extern cputime_t task_gtime(struct task_struct *p); #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ +#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ @@ -1632,6 +1736,28 @@ extern cputime_t task_gtime(struct task_struct *p); #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) +#ifdef CONFIG_TREE_PREEMPT_RCU + +#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ +#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ +#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */ + +static inline void rcu_copy_process(struct task_struct *p) +{ + p->rcu_read_lock_nesting = 0; + p->rcu_read_unlock_special = 0; + p->rcu_blocked_node = NULL; + INIT_LIST_HEAD(&p->rcu_node_entry); +} + +#else + +static inline void rcu_copy_process(struct task_struct *p) +{ +} + +#endif + #ifdef CONFIG_SMP extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); @@ -1649,6 +1775,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) return set_cpus_allowed_ptr(p, &new_mask); } +/* + * Architectures can set this to 1 if they have specified + * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, + * but then during bootup it turns out that sched_clock() + * is reliable after all: + */ +#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK +extern int sched_clock_stable; +#endif + extern unsigned long long sched_clock(void); extern void sched_clock_init(void); @@ -1711,16 +1847,29 @@ extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_shares_ratelimit; extern unsigned int sysctl_sched_shares_thresh; -#ifdef CONFIG_SCHED_DEBUG extern unsigned int sysctl_sched_child_runs_first; +#ifdef CONFIG_SCHED_DEBUG extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; +extern unsigned int sysctl_sched_time_avg; +extern unsigned int sysctl_timer_migration; int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, loff_t *ppos); #endif +#ifdef CONFIG_SCHED_DEBUG +static inline unsigned int get_sysctl_timer_migration(void) +{ + return sysctl_timer_migration; +} +#else +static inline unsigned int get_sysctl_timer_migration(void) +{ + return 1; +} +#endif extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; @@ -1787,9 +1936,6 @@ extern struct pid_namespace init_pid_ns; /* * find a task by one of its numerical ids * - * find_task_by_pid_type_ns(): - * it is the most generic call - it finds a task by all id, - * type and namespace specified * find_task_by_pid_ns(): * finds a task by its pid in the specified namespace * find_task_by_vpid(): @@ -1798,9 +1944,6 @@ extern struct pid_namespace init_pid_ns; * see also find_vpid() etc in include/linux/pid.h */ -extern struct task_struct *find_task_by_pid_type_ns(int type, int pid, - struct pid_namespace *ns); - extern struct task_struct *find_task_by_vpid(pid_t nr); extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); @@ -1835,6 +1978,7 @@ extern void sched_dead(struct task_struct *p); extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); +extern void __flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); @@ -1925,7 +2069,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *); /* Allocate a new mm structure and copy contents from tsk->mm */ extern struct mm_struct *dup_mm(struct task_struct *tsk); -extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); +extern int copy_thread(unsigned long, unsigned long, unsigned long, + struct task_struct *, struct pt_regs *); extern void flush_thread(void); extern void exit_thread(void); @@ -1950,8 +2095,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); extern char *get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP +extern void wait_task_context_switch(struct task_struct *p); extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else +static inline void wait_task_context_switch(struct task_struct *p) {} static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) { @@ -1959,12 +2106,13 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, } #endif -#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) +#define next_task(p) \ + list_entry_rcu((p)->tasks.next, struct task_struct, tasks) #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) -extern bool is_single_threaded(struct task_struct *); +extern bool current_is_single_threaded(void); /* * Careful: do_each_thread/while_each_thread is a double loop so @@ -1998,8 +2146,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2) static inline struct task_struct *next_thread(const struct task_struct *p) { - return list_entry(rcu_dereference(p->thread_group.next), - struct task_struct, thread_group); + return list_entry_rcu(p->thread_group.next, + struct task_struct, thread_group); } static inline int thread_group_empty(struct task_struct *p) @@ -2010,6 +2158,11 @@ static inline int thread_group_empty(struct task_struct *p) #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) +static inline int task_detached(struct task_struct *p) +{ + return p->exit_signal == -1; +} + /* * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also @@ -2066,6 +2219,19 @@ static inline int object_is_on_stack(void *obj) extern void thread_info_cache_init(void); +#ifdef CONFIG_DEBUG_STACK_USAGE +static inline unsigned long stack_not_used(struct task_struct *p) +{ + unsigned long *n = end_of_stack(p); + + do { /* Skip over canary */ + n++; + } while (!*n); + + return (unsigned long)n - (unsigned long)end_of_stack(p); +} +#endif + /* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */ @@ -2109,6 +2275,12 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } +static inline int restart_syscall(void) +{ + set_tsk_thread_flag(current, TIF_SIGPENDING); + return -ERESTARTNOINTR; +} + static inline int signal_pending(struct task_struct *p) { return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); @@ -2144,23 +2316,31 @@ static inline int need_resched(void) * cond_resched_softirq() will enable bhs before scheduling. */ extern int _cond_resched(void); -#ifdef CONFIG_PREEMPT_BKL -static inline int cond_resched(void) -{ - return 0; -} + +#define cond_resched() ({ \ + __might_sleep(__FILE__, __LINE__, 0); \ + _cond_resched(); \ +}) + +extern int __cond_resched_lock(spinlock_t *lock); + +#ifdef CONFIG_PREEMPT +#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET #else -static inline int cond_resched(void) -{ - return _cond_resched(); -} +#define PREEMPT_LOCK_OFFSET 0 #endif -extern int cond_resched_lock(spinlock_t * lock); -extern int cond_resched_softirq(void); -static inline int cond_resched_bkl(void) -{ - return _cond_resched(); -} + +#define cond_resched_lock(lock) ({ \ + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ + __cond_resched_lock(lock); \ +}) + +extern int __cond_resched_softirq(void); + +#define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ + __cond_resched_softirq(); \ +}) /* * Does a critical section need to be broken due to another @@ -2179,25 +2359,18 @@ static inline int spin_needbreak(spinlock_t *lock) /* * Thread group CPU time accounting. */ - -extern int thread_group_cputime_alloc(struct task_struct *); -extern void thread_group_cputime(struct task_struct *, struct task_cputime *); +void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); +void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); static inline void thread_group_cputime_init(struct signal_struct *sig) { - sig->cputime.totals = NULL; -} - -static inline int thread_group_cputime_clone_thread(struct task_struct *curr) -{ - if (curr->signal->cputime.totals) - return 0; - return thread_group_cputime_alloc(curr); + sig->cputimer.cputime = INIT_CPUTIME; + spin_lock_init(&sig->cputimer.lock); + sig->cputimer.running = 0; } static inline void thread_group_cputime_free(struct signal_struct *sig) { - free_percpu(sig->cputime.totals); } /* @@ -2277,9 +2450,13 @@ extern long sched_group_rt_runtime(struct task_group *tg); extern int sched_group_set_rt_period(struct task_group *tg, long rt_period_us); extern long sched_group_rt_period(struct task_group *tg); +extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); #endif #endif +extern int task_can_switch_user(struct user_struct *up, + struct task_struct *tsk); + #ifdef CONFIG_TASK_XACCT static inline void add_rchar(struct task_struct *tsk, ssize_t amt) { @@ -2322,6 +2499,13 @@ static inline void inc_syscw(struct task_struct *tsk) #define TASK_SIZE_OF(tsk) TASK_SIZE #endif +/* + * Call the function if the target task is executing on a CPU right now: + */ +extern void task_oncpu_function_call(struct task_struct *p, + void (*func) (void *info), void *info); + + #ifdef CONFIG_MM_OWNER extern void mm_update_next_owner(struct mm_struct *mm); extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8ba1c320f97..b464b9d3d24 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -60,7 +60,7 @@ typedef struct sctphdr { __be16 source; __be16 dest; __be32 vtag; - __be32 checksum; + __le32 checksum; } __attribute__((packed)) sctp_sctphdr_t; #ifdef __KERNEL__ @@ -172,35 +172,35 @@ typedef struct sctp_paramhdr { typedef enum { /* RFC 2960 Section 3.3.5 */ - SCTP_PARAM_HEARTBEAT_INFO = __constant_htons(1), + SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1), /* RFC 2960 Section 3.3.2.1 */ - SCTP_PARAM_IPV4_ADDRESS = __constant_htons(5), - SCTP_PARAM_IPV6_ADDRESS = __constant_htons(6), - SCTP_PARAM_STATE_COOKIE = __constant_htons(7), - SCTP_PARAM_UNRECOGNIZED_PARAMETERS = __constant_htons(8), - SCTP_PARAM_COOKIE_PRESERVATIVE = __constant_htons(9), - SCTP_PARAM_HOST_NAME_ADDRESS = __constant_htons(11), - SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = __constant_htons(12), - SCTP_PARAM_ECN_CAPABLE = __constant_htons(0x8000), + SCTP_PARAM_IPV4_ADDRESS = cpu_to_be16(5), + SCTP_PARAM_IPV6_ADDRESS = cpu_to_be16(6), + SCTP_PARAM_STATE_COOKIE = cpu_to_be16(7), + SCTP_PARAM_UNRECOGNIZED_PARAMETERS = cpu_to_be16(8), + SCTP_PARAM_COOKIE_PRESERVATIVE = cpu_to_be16(9), + SCTP_PARAM_HOST_NAME_ADDRESS = cpu_to_be16(11), + SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = cpu_to_be16(12), + SCTP_PARAM_ECN_CAPABLE = cpu_to_be16(0x8000), /* AUTH Extension Section 3 */ - SCTP_PARAM_RANDOM = __constant_htons(0x8002), - SCTP_PARAM_CHUNKS = __constant_htons(0x8003), - SCTP_PARAM_HMAC_ALGO = __constant_htons(0x8004), + SCTP_PARAM_RANDOM = cpu_to_be16(0x8002), + SCTP_PARAM_CHUNKS = cpu_to_be16(0x8003), + SCTP_PARAM_HMAC_ALGO = cpu_to_be16(0x8004), /* Add-IP: Supported Extensions, Section 4.2 */ - SCTP_PARAM_SUPPORTED_EXT = __constant_htons(0x8008), + SCTP_PARAM_SUPPORTED_EXT = cpu_to_be16(0x8008), /* PR-SCTP Sec 3.1 */ - SCTP_PARAM_FWD_TSN_SUPPORT = __constant_htons(0xc000), + SCTP_PARAM_FWD_TSN_SUPPORT = cpu_to_be16(0xc000), /* Add-IP Extension. Section 3.2 */ - SCTP_PARAM_ADD_IP = __constant_htons(0xc001), - SCTP_PARAM_DEL_IP = __constant_htons(0xc002), - SCTP_PARAM_ERR_CAUSE = __constant_htons(0xc003), - SCTP_PARAM_SET_PRIMARY = __constant_htons(0xc004), - SCTP_PARAM_SUCCESS_REPORT = __constant_htons(0xc005), - SCTP_PARAM_ADAPTATION_LAYER_IND = __constant_htons(0xc006), + SCTP_PARAM_ADD_IP = cpu_to_be16(0xc001), + SCTP_PARAM_DEL_IP = cpu_to_be16(0xc002), + SCTP_PARAM_ERR_CAUSE = cpu_to_be16(0xc003), + SCTP_PARAM_SET_PRIMARY = cpu_to_be16(0xc004), + SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005), + SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006), } sctp_param_t; /* enum */ @@ -212,13 +212,13 @@ typedef enum { * */ typedef enum { - SCTP_PARAM_ACTION_DISCARD = __constant_htons(0x0000), - SCTP_PARAM_ACTION_DISCARD_ERR = __constant_htons(0x4000), - SCTP_PARAM_ACTION_SKIP = __constant_htons(0x8000), - SCTP_PARAM_ACTION_SKIP_ERR = __constant_htons(0xc000), + SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000), + SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000), + SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000), + SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000), } sctp_param_action_t; -enum { SCTP_PARAM_ACTION_MASK = __constant_htons(0xc000), }; +enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), }; /* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */ @@ -457,17 +457,17 @@ typedef struct sctp_operr_chunk { */ typedef enum { - SCTP_ERROR_NO_ERROR = __constant_htons(0x00), - SCTP_ERROR_INV_STRM = __constant_htons(0x01), - SCTP_ERROR_MISS_PARAM = __constant_htons(0x02), - SCTP_ERROR_STALE_COOKIE = __constant_htons(0x03), - SCTP_ERROR_NO_RESOURCE = __constant_htons(0x04), - SCTP_ERROR_DNS_FAILED = __constant_htons(0x05), - SCTP_ERROR_UNKNOWN_CHUNK = __constant_htons(0x06), - SCTP_ERROR_INV_PARAM = __constant_htons(0x07), - SCTP_ERROR_UNKNOWN_PARAM = __constant_htons(0x08), - SCTP_ERROR_NO_DATA = __constant_htons(0x09), - SCTP_ERROR_COOKIE_IN_SHUTDOWN = __constant_htons(0x0a), + SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00), + SCTP_ERROR_INV_STRM = cpu_to_be16(0x01), + SCTP_ERROR_MISS_PARAM = cpu_to_be16(0x02), + SCTP_ERROR_STALE_COOKIE = cpu_to_be16(0x03), + SCTP_ERROR_NO_RESOURCE = cpu_to_be16(0x04), + SCTP_ERROR_DNS_FAILED = cpu_to_be16(0x05), + SCTP_ERROR_UNKNOWN_CHUNK = cpu_to_be16(0x06), + SCTP_ERROR_INV_PARAM = cpu_to_be16(0x07), + SCTP_ERROR_UNKNOWN_PARAM = cpu_to_be16(0x08), + SCTP_ERROR_NO_DATA = cpu_to_be16(0x09), + SCTP_ERROR_COOKIE_IN_SHUTDOWN = cpu_to_be16(0x0a), /* SCTP Implementation Guide: @@ -476,9 +476,9 @@ typedef enum { * 13 Protocol Violation */ - SCTP_ERROR_RESTART = __constant_htons(0x0b), - SCTP_ERROR_USER_ABORT = __constant_htons(0x0c), - SCTP_ERROR_PROTO_VIOLATION = __constant_htons(0x0d), + SCTP_ERROR_RESTART = cpu_to_be16(0x0b), + SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c), + SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d), /* ADDIP Section 3.3 New Error Causes * @@ -487,17 +487,17 @@ typedef enum { * * Value Cause Code * --------- ---------------- - * 0x0100 Request to Delete Last Remaining IP Address. - * 0x0101 Operation Refused Due to Resource Shortage. - * 0x0102 Request to Delete Source IP Address. - * 0x0103 Association Aborted due to illegal ASCONF-ACK - * 0x0104 Request refused - no authorization. + * 0x00A0 Request to Delete Last Remaining IP Address. + * 0x00A1 Operation Refused Due to Resource Shortage. + * 0x00A2 Request to Delete Source IP Address. + * 0x00A3 Association Aborted due to illegal ASCONF-ACK + * 0x00A4 Request refused - no authorization. */ - SCTP_ERROR_DEL_LAST_IP = __constant_htons(0x0100), - SCTP_ERROR_RSRC_LOW = __constant_htons(0x0101), - SCTP_ERROR_DEL_SRC_IP = __constant_htons(0x0102), - SCTP_ERROR_ASCONF_ACK = __constant_htons(0x0103), - SCTP_ERROR_REQ_REFUSED = __constant_htons(0x0104), + SCTP_ERROR_DEL_LAST_IP = cpu_to_be16(0x00A0), + SCTP_ERROR_RSRC_LOW = cpu_to_be16(0x00A1), + SCTP_ERROR_DEL_SRC_IP = cpu_to_be16(0x00A2), + SCTP_ERROR_ASCONF_ACK = cpu_to_be16(0x00A3), + SCTP_ERROR_REQ_REFUSED = cpu_to_be16(0x00A4), /* AUTH Section 4. New Error Cause * @@ -509,7 +509,7 @@ typedef enum { * -------------------------------------------------------------- * 0x0105 Unsupported HMAC Identifier */ - SCTP_ERROR_UNSUP_HMAC = __constant_htons(0x0105) + SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105) } sctp_error_t; diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 262a8dccfa8..167c33361d9 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -21,6 +21,8 @@ extern long prctl_set_seccomp(unsigned long); #else /* CONFIG_SECCOMP */ +#include <linux/errno.h> + typedef struct { } seccomp_t; #define secure_computing(x) do { } while (0) diff --git a/include/linux/security.h b/include/linux/security.h index 1f2ab6353c0..d050b66ab9e 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -28,10 +28,12 @@ #include <linux/resource.h> #include <linux/sem.h> #include <linux/shm.h> +#include <linux/mm.h> /* PAGE_ALIGN */ #include <linux/msg.h> #include <linux/sched.h> #include <linux/key.h> #include <linux/xfrm.h> +#include <linux/gfp.h> #include <net/flow.h> /* Maximum number of letters for an LSM name string */ @@ -51,7 +53,7 @@ struct audit_krule; extern int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, int audit); extern int cap_settime(struct timespec *ts, struct timezone *tz); -extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); +extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern int cap_capset(struct cred *new, const struct cred *old, @@ -65,6 +67,9 @@ extern int cap_inode_setxattr(struct dentry *dentry, const char *name, extern int cap_inode_removexattr(struct dentry *dentry, const char *name); extern int cap_inode_need_killpriv(struct dentry *dentry); extern int cap_inode_killpriv(struct dentry *dentry); +extern int cap_file_mmap(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags, + unsigned long addr, unsigned long addr_only); extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5); @@ -91,6 +96,7 @@ extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); extern int cap_netlink_recv(struct sk_buff *skb, int cap); extern unsigned long mmap_min_addr; +extern unsigned long dac_mmap_min_addr; /* * Values used in the task_security_ops calls */ @@ -115,6 +121,21 @@ struct request_sock; #define LSM_UNSAFE_PTRACE 2 #define LSM_UNSAFE_PTRACE_CAP 4 +/* + * If a hint addr is less than mmap_min_addr change hint to be as + * low as possible but still greater than mmap_min_addr + */ +static inline unsigned long round_hint_to_min(unsigned long hint) +{ + hint &= PAGE_MASK; + if (((void *)hint != NULL) && + (hint < mmap_min_addr)) + return PAGE_ALIGN(mmap_min_addr); + return hint; +} +extern int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp, + void __user *buffer, size_t *lenp, loff_t *ppos); + #ifdef CONFIG_SECURITY struct security_mnt_opts { @@ -632,6 +653,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * manual page for definitions of the @clone_flags. * @clone_flags contains the flags indicating what should be shared. * Return 0 if permission is granted. + * @cred_alloc_blank: + * @cred points to the credentials. + * @gfp indicates the atomicity of any memory allocations. + * Only allocate sufficient memory and attach to @cred such that + * cred_transfer() will not get ENOMEM. * @cred_free: * @cred points to the credentials. * Deallocate and clear the cred->security field in a set of credentials. @@ -644,6 +670,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @new points to the new credentials. * @old points to the original credentials. * Install a new set of credentials. + * @cred_transfer: + * @new points to the new credentials. + * @old points to the original credentials. + * Transfer data from original creds to new creds * @kernel_act_as: * Set the credentials for a kernel service to act as (subjective context). * @new points to the credentials to be modified. @@ -657,6 +687,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @inode points to the inode to use as a reference. * The current task must be the one that nominated @inode. * Return 0 if successful. + * @kernel_module_request: + * Ability to trigger the kernel to automatically upcall to userspace for + * userspace to load a kernel module with the given name. + * Return 0 if successful. * @task_setuid: * Check permission before setting one or more of the user identity * attributes of the current process. The @flags parameter indicates @@ -880,11 +914,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @sock contains the listening socket structure. * @newsock contains the newly created server socket for connection. * Return 0 if permission is granted. - * @socket_post_accept: - * This hook allows a security module to copy security - * information into the newly created socket's inode. - * @sock contains the listening socket structure. - * @newsock contains the newly created server socket for connection. * @socket_sendmsg: * Check permission before transmitting a message to another socket. * @sock contains the socket structure. @@ -978,6 +1007,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Sets the connection's peersid to the secmark on skb. * @req_classify_flow: * Sets the flow's sid to the openreq sid. + * @tun_dev_create: + * Check permissions prior to creating a new TUN device. + * @tun_dev_post_create: + * This hook allows a module to update or allocate a per-socket security + * structure. + * @sk contains the newly created sock structure. + * @tun_dev_attach: + * Check permissions prior to attaching to a persistent TUN device. This + * hook can also be used by the module to update any security state + * associated with the TUN device's sock structure. + * @sk contains the existing sock structure. * * Security hooks for XFRM operations. * @@ -1072,6 +1112,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return the length of the string (including terminating NUL) or -ve if * an error. * May also return 0 (and a NULL buffer pointer) if there is no label. + * @key_session_to_parent: + * Forcibly assign the session keyring from a process to its parent + * process. + * @cred: Pointer to process's credentials + * @parent_cred: Pointer to parent process's credentials + * @keyring: Proposed new session keyring + * Return 0 if permission is granted, -ve error otherwise. * * Security hooks affecting all System V IPC operations. * @@ -1213,7 +1260,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @alter contains the flag indicating whether changes are to be made. * Return 0 if permission is granted. * - * @ptrace_may_access: + * @ptrace_access_check: * Check permission before allowing the current process to trace the * @child process. * Security modules may also want to perform a process tracing check @@ -1228,7 +1275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself * to the @parent process for tracing. - * The parent process will still have to undergo the ptrace_may_access + * The parent process will still have to undergo the ptrace_access_check * checks before it is allowed to trace this one. * @parent contains the task_struct structure for debugger process. * Return 0 if permission is granted. @@ -1335,12 +1382,47 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * audit_rule_init. * @rule contains the allocated rule * + * @inode_notifysecctx: + * Notify the security module of what the security context of an inode + * should be. Initializes the incore security context managed by the + * security module for this inode. Example usage: NFS client invokes + * this hook to initialize the security context in its incore inode to the + * value provided by the server for the file when the server returned the + * file's attributes to the client. + * + * Must be called with inode->i_mutex locked. + * + * @inode we wish to set the security context of. + * @ctx contains the string which we wish to set in the inode. + * @ctxlen contains the length of @ctx. + * + * @inode_setsecctx: + * Change the security context of an inode. Updates the + * incore security context managed by the security module and invokes the + * fs code as needed (via __vfs_setxattr_noperm) to update any backing + * xattrs that represent the context. Example usage: NFS server invokes + * this hook to change the security context in its incore inode and on the + * backing filesystem to a value provided by the client on a SETATTR + * operation. + * + * Must be called with inode->i_mutex locked. + * + * @dentry contains the inode we wish to set the security context of. + * @ctx contains the string which we wish to set in the inode. + * @ctxlen contains the length of @ctx. + * + * @inode_getsecctx: + * Returns a string containing all relavent security context information + * + * @inode we wish to set the security context of. + * @ctx is a pointer in which to place the allocated security context. + * @ctxlen points to the place to put the length of @ctx. * This is the main security structure. */ struct security_operations { char name[SECURITY_NAME_MAX + 1]; - int (*ptrace_may_access) (struct task_struct *child, unsigned int mode); + int (*ptrace_access_check) (struct task_struct *child, unsigned int mode); int (*ptrace_traceme) (struct task_struct *parent); int (*capget) (struct task_struct *target, kernel_cap_t *effective, @@ -1467,12 +1549,15 @@ struct security_operations { int (*dentry_open) (struct file *file, const struct cred *cred); int (*task_create) (unsigned long clone_flags); + int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp); void (*cred_free) (struct cred *cred); int (*cred_prepare)(struct cred *new, const struct cred *old, gfp_t gfp); void (*cred_commit)(struct cred *new, const struct cred *old); + void (*cred_transfer)(struct cred *new, const struct cred *old); int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); + int (*kernel_module_request)(void); int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); int (*task_fix_setuid) (struct cred *new, const struct cred *old, int flags); @@ -1540,6 +1625,10 @@ struct security_operations { int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); void (*release_secctx) (char *secdata, u32 seclen); + int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); + int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); + int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); + #ifdef CONFIG_SECURITY_NETWORK int (*unix_stream_connect) (struct socket *sock, struct socket *other, struct sock *newsk); @@ -1554,8 +1643,6 @@ struct security_operations { struct sockaddr *address, int addrlen); int (*socket_listen) (struct socket *sock, int backlog); int (*socket_accept) (struct socket *sock, struct socket *newsock); - void (*socket_post_accept) (struct socket *sock, - struct socket *newsock); int (*socket_sendmsg) (struct socket *sock, struct msghdr *msg, int size); int (*socket_recvmsg) (struct socket *sock, @@ -1578,6 +1665,9 @@ struct security_operations { void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); + int (*tun_dev_create)(void); + void (*tun_dev_post_create)(struct sock *sk); + int (*tun_dev_attach)(struct sock *sk); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -1606,6 +1696,9 @@ struct security_operations { const struct cred *cred, key_perm_t perm); int (*key_getsecurity)(struct key *key, char **_buffer); + int (*key_session_to_parent)(const struct cred *cred, + const struct cred *parent_cred, + struct key *key); #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT @@ -1623,7 +1716,7 @@ extern int security_module_enable(struct security_operations *ops); extern int register_security(struct security_operations *ops); /* Security operations */ -int security_ptrace_may_access(struct task_struct *child, unsigned int mode); +int security_ptrace_access_check(struct task_struct *child, unsigned int mode); int security_ptrace_traceme(struct task_struct *parent); int security_capget(struct task_struct *target, kernel_cap_t *effective, @@ -1722,11 +1815,14 @@ int security_file_send_sigiotask(struct task_struct *tsk, int security_file_receive(struct file *file); int security_dentry_open(struct file *file, const struct cred *cred); int security_task_create(unsigned long clone_flags); +int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); void security_cred_free(struct cred *cred); int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_commit_creds(struct cred *new, const struct cred *old); +void security_transfer_creds(struct cred *new, const struct cred *old); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); +int security_kernel_module_request(void); int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); @@ -1782,6 +1878,9 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); +int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); +int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); +int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); #else /* CONFIG_SECURITY */ struct security_mnt_opts { }; @@ -1804,10 +1903,10 @@ static inline int security_init(void) return 0; } -static inline int security_ptrace_may_access(struct task_struct *child, +static inline int security_ptrace_access_check(struct task_struct *child, unsigned int mode) { - return cap_ptrace_may_access(child, mode); + return cap_ptrace_access_check(child, mode); } static inline int security_ptrace_traceme(struct task_struct *parent) @@ -2203,7 +2302,7 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot, unsigned long addr, unsigned long addr_only) { - return 0; + return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); } static inline int security_file_mprotect(struct vm_area_struct *vma, @@ -2252,6 +2351,11 @@ static inline int security_task_create(unsigned long clone_flags) return 0; } +static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) +{ + return 0; +} + static inline void security_cred_free(struct cred *cred) { } @@ -2267,6 +2371,11 @@ static inline void security_commit_creds(struct cred *new, { } +static inline void security_transfer_creds(struct cred *new, + const struct cred *old) +{ +} + static inline int security_kernel_act_as(struct cred *cred, u32 secid) { return 0; @@ -2278,6 +2387,11 @@ static inline int security_kernel_create_files_as(struct cred *cred, return 0; } +static inline int security_kernel_module_request(void) +{ + return 0; +} + static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) { @@ -2523,6 +2637,19 @@ static inline int security_secctx_to_secid(const char *secdata, static inline void security_release_secctx(char *secdata, u32 seclen) { } + +static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) +{ + return -EOPNOTSUPP; +} +static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) +{ + return -EOPNOTSUPP; +} +static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_SECURITY */ #ifdef CONFIG_SECURITY_NETWORK @@ -2537,7 +2664,6 @@ int security_socket_bind(struct socket *sock, struct sockaddr *address, int addr int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen); int security_socket_listen(struct socket *sock, int backlog); int security_socket_accept(struct socket *sock, struct socket *newsock); -void security_socket_post_accept(struct socket *sock, struct socket *newsock); int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size); int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags); @@ -2562,6 +2688,9 @@ void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req); void security_inet_conn_established(struct sock *sk, struct sk_buff *skb); +int security_tun_dev_create(void); +void security_tun_dev_post_create(struct sock *sk); +int security_tun_dev_attach(struct sock *sk); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct socket *sock, @@ -2616,11 +2745,6 @@ static inline int security_socket_accept(struct socket *sock, return 0; } -static inline void security_socket_post_accept(struct socket *sock, - struct socket *newsock) -{ -} - static inline int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { @@ -2717,6 +2841,20 @@ static inline void security_inet_conn_established(struct sock *sk, struct sk_buff *skb) { } + +static inline int security_tun_dev_create(void) +{ + return 0; +} + +static inline void security_tun_dev_post_create(struct sock *sk) +{ +} + +static inline int security_tun_dev_attach(struct sock *sk) +{ + return 0; +} #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -2873,6 +3011,9 @@ void security_key_free(struct key *key); int security_key_permission(key_ref_t key_ref, const struct cred *cred, key_perm_t perm); int security_key_getsecurity(struct key *key, char **_buffer); +int security_key_session_to_parent(const struct cred *cred, + const struct cred *parent_cred, + struct key *key); #else @@ -2900,6 +3041,13 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer) return 0; } +static inline int security_key_session_to_parent(const struct cred *cred, + const struct cred *parent_cred, + struct key *key) +{ + return 0; +} + #endif #endif /* CONFIG_KEYS */ @@ -2966,5 +3114,28 @@ static inline void securityfs_remove(struct dentry *dentry) #endif +#ifdef CONFIG_SECURITY + +static inline char *alloc_secdata(void) +{ + return (char *)get_zeroed_page(GFP_KERNEL); +} + +static inline void free_secdata(void *secdata) +{ + free_page((unsigned long)secdata); +} + +#else + +static inline char *alloc_secdata(void) +{ + return (char *)1; +} + +static inline void free_secdata(void *secdata) +{ } +#endif /* CONFIG_SECURITY */ + #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/selinux_netlink.h b/include/linux/selinux_netlink.h index bbf489decd8..d239797785c 100644 --- a/include/linux/selinux_netlink.h +++ b/include/linux/selinux_netlink.h @@ -12,6 +12,8 @@ #ifndef _LINUX_SELINUX_NETLINK_H #define _LINUX_SELINUX_NETLINK_H +#include <linux/types.h> + /* Message types. */ #define SELNL_MSG_BASE 0x10 enum { @@ -38,11 +40,11 @@ enum selinux_nlgroups { /* Message structures */ struct selnl_msg_setenforce { - int32_t val; + __s32 val; }; struct selnl_msg_policyload { - u_int32_t seqno; + __u32 seqno; }; #endif /* _LINUX_SELINUX_NETLINK_H */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 40ea5058c2e..0c6a86b7959 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -19,6 +19,7 @@ struct seq_file { size_t from; size_t count; loff_t index; + loff_t read_pos; u64 version; struct mutex lock; const struct seq_operations *op; @@ -42,6 +43,7 @@ int seq_release(struct inode *, struct file *); int seq_escape(struct seq_file *, const char *, const char *); int seq_putc(struct seq_file *m, char c); int seq_puts(struct seq_file *m, const char *s); +int seq_write(struct seq_file *seq, const void *data, size_t len); int seq_printf(struct seq_file *, const char *, ...) __attribute__ ((format (printf,2,3))); @@ -54,7 +56,7 @@ int seq_bitmap(struct seq_file *m, const unsigned long *bits, unsigned int nr_bits); static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) { - return seq_bitmap(m, mask->bits, nr_cpu_ids); + return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids); } static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) @@ -62,12 +64,13 @@ static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) return seq_bitmap(m, mask->bits, MAX_NUMNODES); } -int seq_bitmap_list(struct seq_file *m, unsigned long *bits, +int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, unsigned int nr_bits); -static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask) +static inline int seq_cpumask_list(struct seq_file *m, + const struct cpumask *mask) { - return seq_bitmap_list(m, mask->bits, NR_CPUS); + return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids); } static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) diff --git a/include/linux/serial.h b/include/linux/serial.h index 9136cc5608c..e5bb75a6380 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -96,54 +96,76 @@ struct serial_uart_config { /* * Definitions for async_struct (and serial_struct) flags field + * + * Define ASYNCB_* for convenient use with {test,set,clear}_bit. */ -#define ASYNC_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes - on the callout port */ -#define ASYNC_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */ -#define ASYNC_SAK 0x0004 /* Secure Attention Key (Orange book) */ -#define ASYNC_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */ - -#define ASYNC_SPD_MASK 0x1030 -#define ASYNC_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */ - -#define ASYNC_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */ -#define ASYNC_SPD_CUST 0x0030 /* Use user-specified divisor */ - -#define ASYNC_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */ -#define ASYNC_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */ -#define ASYNC_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */ -#define ASYNC_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */ -#define ASYNC_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */ - -#define ASYNC_HARDPPS_CD 0x0800 /* Call hardpps when CD goes high */ - -#define ASYNC_SPD_SHI 0x1000 /* Use 230400 instead of 38400 bps */ -#define ASYNC_SPD_WARP 0x1010 /* Use 460800 instead of 38400 bps */ - -#define ASYNC_LOW_LATENCY 0x2000 /* Request low latency behaviour */ - -#define ASYNC_BUGGY_UART 0x4000 /* This is a buggy UART, skip some safety - * checks. Note: can be dangerous! */ - -#define ASYNC_AUTOPROBE 0x8000 /* Port was autoprobed by PCI or PNP code */ - -#define ASYNC_FLAGS 0x7FFF /* Possible legal async flags */ -#define ASYNC_USR_MASK 0x3430 /* Legal flags that non-privileged - * users can set or reset */ - -/* Internal flags used only by kernel/chr_drv/serial.c */ -#define ASYNC_INITIALIZED 0x80000000 /* Serial port was initialized */ -#define ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ -#define ASYNC_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ -#define ASYNC_CLOSING 0x08000000 /* Serial port is closing */ -#define ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */ -#define ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */ -#define ASYNC_SHARE_IRQ 0x01000000 /* for multifunction cards - --- no longer used */ -#define ASYNC_CONS_FLOW 0x00800000 /* flow control for console */ - -#define ASYNC_BOOT_ONLYMCA 0x00400000 /* Probe only if MCA bus */ -#define ASYNC_INTERNAL_FLAGS 0xFFC00000 /* Internal flags */ +#define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes + * on the callout port */ +#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */ +#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */ +#define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */ +#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */ +#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */ +#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */ +#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during + * autoconfiguration */ +#define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */ +#define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */ +#define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */ +#define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */ +#define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */ +#define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */ +#define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety + * checks. Note: can be dangerous! */ +#define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */ +#define ASYNCB_LAST_USER 15 + +/* Internal flags used only by kernel */ +#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ +#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ +#define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */ +#define ASYNCB_CLOSING 27 /* Serial port is closing */ +#define ASYNCB_CTS_FLOW 26 /* Do CTS flow control */ +#define ASYNCB_CHECK_CD 25 /* i.e., CLOCAL */ +#define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */ +#define ASYNCB_CONS_FLOW 23 /* flow control for console */ +#define ASYNCB_BOOT_ONLYMCA 22 /* Probe only if MCA bus */ +#define ASYNCB_FIRST_KERNEL 22 + +#define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY) +#define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT) +#define ASYNC_SAK (1U << ASYNCB_SAK) +#define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS) +#define ASYNC_SPD_HI (1U << ASYNCB_SPD_HI) +#define ASYNC_SPD_VHI (1U << ASYNCB_SPD_VHI) +#define ASYNC_SKIP_TEST (1U << ASYNCB_SKIP_TEST) +#define ASYNC_AUTO_IRQ (1U << ASYNCB_AUTO_IRQ) +#define ASYNC_SESSION_LOCKOUT (1U << ASYNCB_SESSION_LOCKOUT) +#define ASYNC_PGRP_LOCKOUT (1U << ASYNCB_PGRP_LOCKOUT) +#define ASYNC_CALLOUT_NOHUP (1U << ASYNCB_CALLOUT_NOHUP) +#define ASYNC_HARDPPS_CD (1U << ASYNCB_HARDPPS_CD) +#define ASYNC_SPD_SHI (1U << ASYNCB_SPD_SHI) +#define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY) +#define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) +#define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) + +#define ASYNC_FLAGS ((1U << ASYNCB_LAST_USER) - 1) +#define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \ + ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY) +#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) +#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) +#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) + +#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) +#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) +#define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF) +#define ASYNC_CLOSING (1U << ASYNCB_CLOSING) +#define ASYNC_CTS_FLOW (1U << ASYNCB_CTS_FLOW) +#define ASYNC_CHECK_CD (1U << ASYNCB_CHECK_CD) +#define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ) +#define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW) +#define ASYNC_BOOT_ONLYMCA (1U << ASYNCB_BOOT_ONLYMCA) +#define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1)) /* * Multiport serial configuration structure --- external structure diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 90bbbf0b116..23d2fb051f9 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -41,7 +41,8 @@ #define PORT_XSCALE 15 #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ -#define PORT_MAX_8250 17 /* max port ID */ +#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ +#define PORT_MAX_8250 18 /* max port ID */ /* * ARM specific type numbers. These are not currently guaranteed @@ -164,6 +165,15 @@ /* NWPSERIAL */ #define PORT_NWPSERIAL 85 +/* MAX3100 */ +#define PORT_MAX3100 86 + +/* Timberdale UART */ +#define PORT_TIMBUART 87 + +/* Qualcomm MSM SoCs */ +#define PORT_MSM 88 + #ifdef __KERNEL__ #include <linux/compiler.h> @@ -277,7 +287,7 @@ struct uart_port { struct uart_icount icount; /* statistics */ struct console *cons; /* struct console, if any */ -#ifdef CONFIG_SERIAL_CORE_CONSOLE +#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) unsigned long sysrq; /* sysrq timeout */ #endif @@ -296,6 +306,7 @@ struct uart_port { #define UPF_HARDPPS_CD ((__force upf_t) (1 << 11)) #define UPF_LOW_LATENCY ((__force upf_t) (1 << 13)) #define UPF_BUGGY_UART ((__force upf_t) (1 << 14)) +#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) #define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h new file mode 100644 index 00000000000..4976befb6ae --- /dev/null +++ b/include/linux/serial_max3100.h @@ -0,0 +1,52 @@ +/* + * + * Copyright (C) 2007 Christian Pellegrin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + + +#ifndef _LINUX_SERIAL_MAX3100_H +#define _LINUX_SERIAL_MAX3100_H 1 + + +/** + * struct plat_max3100 - MAX3100 SPI UART platform data + * @loopback: force MAX3100 in loopback + * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 + * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook + * called on suspend and resume to activate it. + * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw + * flow ctrl is possible but you have less CPU usage) + * + * You should use this structure in your machine description to specify + * how the MAX3100 is connected. Example: + * + * static struct plat_max3100 max3100_plat_data = { + * .loopback = 0, + * .crystal = 0, + * .poll_time = 100, + * }; + * + * static struct spi_board_info spi_board_info[] = { + * { + * .modalias = "max3100", + * .platform_data = &max3100_plat_data, + * .irq = IRQ_EINT12, + * .max_speed_hz = 5*1000*1000, + * .chip_select = 0, + * }, + * }; + * + **/ +struct plat_max3100 { + int loopback; + int crystal; + void (*max3100_hw_suspend) (int suspend); + int poll_time; +}; + +#endif diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h index 96c0d93fc2c..850db2e8051 100644 --- a/include/linux/serial_reg.h +++ b/include/linux/serial_reg.h @@ -323,6 +323,7 @@ #define UART_OMAP_MVER 0x14 /* Module version register */ #define UART_OMAP_SYSC 0x15 /* System configuration register */ #define UART_OMAP_SYSS 0x16 /* System status register */ +#define UART_OMAP_WER 0x17 /* Wake-up enable register */ #endif /* _LINUX_SERIAL_REG_H */ diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 893cc53486b..1c297ddc9d5 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -25,8 +25,7 @@ struct plat_sci_port { unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ unsigned int type; /* SCI / SCIF / IRDA */ upf_t flags; /* UPF_* flags */ + char *clk; /* clock string */ }; -int early_sci_setup(struct uart_port *port); - #endif /* __LINUX_SERIAL_SCI_H */ diff --git a/include/linux/serio.h b/include/linux/serio.h index 1bcb357a01a..126d24c9eaa 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -15,6 +15,7 @@ #ifdef __KERNEL__ +#include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/spinlock.h> @@ -28,7 +29,10 @@ struct serio { char name[32]; char phys[32]; - unsigned int manual_bind; + bool manual_bind; + bool registered; /* port has been fully registered with driver core */ + bool suspended; /* port is suspended */ + struct serio_device_id id; @@ -47,7 +51,6 @@ struct serio { struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */ struct device dev; - unsigned int registered; /* port has been fully registered with driver core */ struct list_head node; }; @@ -58,7 +61,7 @@ struct serio_driver { char *description; struct serio_device_id *id_table; - unsigned int manual_bind; + bool manual_bind; void (*write_wakeup)(struct serio *); irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int); @@ -212,7 +215,7 @@ static inline void serio_unpin_driver(struct serio *serio) #define SERIO_FUJITSU 0x35 #define SERIO_ZHENHUA 0x36 #define SERIO_INEXIO 0x37 -#define SERIO_TOUCHIT213 0x37 +#define SERIO_TOUCHIT213 0x38 #define SERIO_W8001 0x39 #endif diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index 68e212ff9dd..eb1423a0078 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h @@ -85,6 +85,7 @@ struct intc_desc symbol __initdata = { \ } #endif +unsigned int intc_evt2irq(unsigned int vector); void __init register_intc_controller(struct intc_desc *desc); int intc_set_priority(unsigned int irq, unsigned int prio); diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h new file mode 100644 index 00000000000..864bd56bd3b --- /dev/null +++ b/include/linux/sh_timer.h @@ -0,0 +1,13 @@ +#ifndef __SH_TIMER_H__ +#define __SH_TIMER_H__ + +struct sh_timer_config { + char *name; + long channel_offset; + int timer_bit; + char *clk; + unsigned long clockevent_rating; + unsigned long clocksource_rating; +}; + +#endif /* __SH_TIMER_H__ */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index fd83f2584b1..6d3f2f449ea 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -19,10 +19,6 @@ struct shmem_inode_info { swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* first blocks */ struct list_head swaplist; /* chain of maybes on swap */ struct inode vfs_inode; -#ifdef CONFIG_TMPFS_POSIX_ACL - struct posix_acl *i_acl; - struct posix_acl *i_default_acl; -#endif }; struct shmem_sb_info { @@ -43,9 +39,8 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) } #ifdef CONFIG_TMPFS_POSIX_ACL -int shmem_permission(struct inode *, int); +int shmem_check_acl(struct inode *, int); int shmem_acl_init(struct inode *, struct inode *); -void shmem_acl_destroy_inode(struct inode *); extern struct xattr_handler shmem_xattr_acl_access_handler; extern struct xattr_handler shmem_xattr_acl_default_handler; @@ -57,9 +52,6 @@ static inline int shmem_acl_init(struct inode *inode, struct inode *dir) { return 0; } -static inline void shmem_acl_destroy_inode(struct inode *inode) -{ -} #endif /* CONFIG_TMPFS_POSIX_ACL */ #endif diff --git a/include/linux/sht15.h b/include/linux/sht15.h new file mode 100644 index 00000000000..046bce05eca --- /dev/null +++ b/include/linux/sht15.h @@ -0,0 +1,24 @@ +/* + * sht15.h - support for the SHT15 Temperature and Humidity Sensor + * + * Copyright (c) 2009 Jonathan Cameron + * + * Copyright (c) 2007 Wouter Horre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/** + * struct sht15_platform_data - sht15 connectivity info + * @gpio_data: no. of gpio to which bidirectional data line is connected + * @gpio_sck: no. of gpio to which the data clock is connected. + * @supply_mv: supply voltage in mv. Overridden by regulator if available. + **/ +struct sht15_platform_data { + int gpio_data; + int gpio_sck; + int supply_mv; +}; + diff --git a/include/linux/signal.h b/include/linux/signal.h index 84f997f8aa5..c7552836bd9 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -235,6 +235,8 @@ static inline int valid_signal(unsigned long sig) extern int next_signal(struct sigpending *pending, sigset_t *mask); extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); +extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, + siginfo_t *info); extern long do_sigpending(void __user *, unsigned long); extern int sigprocmask(int, sigset_t *, sigset_t *); extern int show_unhandled_signals; diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h index bef0c46d471..b363b916c90 100644 --- a/include/linux/signalfd.h +++ b/include/linux/signalfd.h @@ -8,6 +8,7 @@ #ifndef _LINUX_SIGNALFD_H #define _LINUX_SIGNALFD_H +#include <linux/types.h> /* For O_CLOEXEC and O_NONBLOCK */ #include <linux/fcntl.h> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index cf2cb50f77d..df7b23ac66e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -15,6 +15,7 @@ #define _LINUX_SKBUFF_H #include <linux/kernel.h> +#include <linux/kmemcheck.h> #include <linux/compiler.h> #include <linux/time.h> #include <linux/cache.h> @@ -29,9 +30,6 @@ #include <linux/dmaengine.h> #include <linux/hrtimer.h> -#define HAVE_ALLOC_SKB /* For the drivers to know */ -#define HAVE_ALIGNABLE_SKB /* Ditto 8) */ - /* Don't change this without changing skb_csum_unnecessary! */ #define CHECKSUM_NONE 0 #define CHECKSUM_UNNECESSARY 1 @@ -135,6 +133,56 @@ struct skb_frag_struct { __u32 size; }; +#define HAVE_HW_TIME_STAMP + +/** + * struct skb_shared_hwtstamps - hardware time stamps + * @hwtstamp: hardware time stamp transformed into duration + * since arbitrary point in time + * @syststamp: hwtstamp transformed to system time base + * + * Software time stamps generated by ktime_get_real() are stored in + * skb->tstamp. The relation between the different kinds of time + * stamps is as follows: + * + * syststamp and tstamp can be compared against each other in + * arbitrary combinations. The accuracy of a + * syststamp/tstamp/"syststamp from other device" comparison is + * limited by the accuracy of the transformation into system time + * base. This depends on the device driver and its underlying + * hardware. + * + * hwtstamps can only be compared against other hwtstamps from + * the same device. + * + * This structure is attached to packets as part of the + * &skb_shared_info. Use skb_hwtstamps() to get a pointer. + */ +struct skb_shared_hwtstamps { + ktime_t hwtstamp; + ktime_t syststamp; +}; + +/** + * struct skb_shared_tx - instructions for time stamping of outgoing packets + * @hardware: generate hardware time stamp + * @software: generate software time stamp + * @in_progress: device driver is going to provide + * hardware time stamp + * @flags: all shared_tx flags + * + * These flags are attached to packets as part of the + * &skb_shared_info. Use skb_tx() to get a pointer. + */ +union skb_shared_tx { + struct { + __u8 hardware:1, + software:1, + in_progress:1; + }; + __u8 flags; +}; + /* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */ @@ -142,18 +190,23 @@ struct skb_shared_info { atomic_t dataref; unsigned short nr_frags; unsigned short gso_size; +#ifdef CONFIG_HAS_DMA + dma_addr_t dma_head; +#endif /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; unsigned short gso_type; __be32 ip6_frag_id; -#ifdef CONFIG_HAS_DMA - unsigned int num_dma_maps; -#endif + union skb_shared_tx tx_flags; struct sk_buff *frag_list; + struct skb_shared_hwtstamps hwtstamps; skb_frag_t frags[MAX_SKB_FRAGS]; #ifdef CONFIG_HAS_DMA - dma_addr_t dma_maps[MAX_SKB_FRAGS + 1]; + dma_addr_t dma_maps[MAX_SKB_FRAGS]; #endif + /* Intermediate layers must ensure that destructor_arg + * remains valid until skb destructor */ + void * destructor_arg; }; /* We divide dataref into two halves. The higher 16 bits hold references @@ -188,6 +241,8 @@ enum { SKB_GSO_TCP_ECN = 1 << 3, SKB_GSO_TCPV6 = 1 << 4, + + SKB_GSO_FCOE = 1 << 5, }; #if BITS_PER_LONG > 32 @@ -210,7 +265,7 @@ typedef unsigned char *sk_buff_data_t; * @transport_header: Transport layer header * @network_header: Network layer header * @mac_header: Link layer header - * @dst: destination entry + * @_skb_dst: destination entry * @sp: the security path, used for xfrm * @cb: Control buffer. Free for use by every layer. Put private vars here * @len: Length of actual data @@ -249,10 +304,6 @@ typedef unsigned char *sk_buff_data_t; * @tc_index: Traffic control index * @tc_verd: traffic control verdict * @ndisc_nodetype: router type (from link layer) - * @do_not_encrypt: set to prevent encryption of this frame - * @requeue: set to indicate that the wireless core should attempt - * a software retry on this frame if we failed to - * receive an ACK for it * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions * @secmark: security marking @@ -268,10 +319,7 @@ struct sk_buff { ktime_t tstamp; struct net_device *dev; - union { - struct dst_entry *dst; - struct rtable *rtable; - }; + unsigned long _skb_dst; #ifdef CONFIG_XFRM struct sec_path *sp; #endif @@ -295,6 +343,7 @@ struct sk_buff { }; }; __u32 priority; + kmemcheck_bitfield_begin(flags1); __u8 local_df:1, cloned:1, ip_summed:2, @@ -305,6 +354,7 @@ struct sk_buff { ipvs_property:1, peeked:1, nf_trace:1; + kmemcheck_bitfield_end(flags1); __be16 protocol; void (*destructor)(struct sk_buff *skb); @@ -324,14 +374,14 @@ struct sk_buff { __u16 tc_verd; /* traffic control verdict */ #endif #endif + + kmemcheck_bitfield_begin(flags2); #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif -#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) - __u8 do_not_encrypt:1; - __u8 requeue:1; -#endif - /* 0/13/14 bit hole */ + kmemcheck_bitfield_end(flags2); + + /* 0/14 bit hole */ #ifdef CONFIG_NET_DMA dma_cookie_t dma_cookie; @@ -372,7 +422,23 @@ extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb, enum dma_data_direction dir); #endif +static inline struct dst_entry *skb_dst(const struct sk_buff *skb) +{ + return (struct dst_entry *)skb->_skb_dst; +} + +static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) +{ + skb->_skb_dst = (unsigned long)dst; +} + +static inline struct rtable *skb_rtable(const struct sk_buff *skb) +{ + return (struct rtable *)skb_dst(skb); +} + extern void kfree_skb(struct sk_buff *skb); +extern void consume_skb(struct sk_buff *skb); extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int fclone, int node); @@ -411,20 +477,12 @@ extern int skb_to_sgvec(struct sk_buff *skb, extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); extern int skb_pad(struct sk_buff *skb, int pad); -#define dev_kfree_skb(a) kfree_skb(a) +#define dev_kfree_skb(a) consume_skb(a) +#define dev_consume_skb(a) kfree_skb_clean(a) extern void skb_over_panic(struct sk_buff *skb, int len, void *here); extern void skb_under_panic(struct sk_buff *skb, int len, void *here); -extern void skb_truesize_bug(struct sk_buff *skb); - -static inline void skb_truesize_check(struct sk_buff *skb) -{ - int len = sizeof(struct sk_buff) + skb->len; - - if (unlikely((int)skb->truesize < len)) - skb_truesize_bug(skb); -} extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, int getfrag(void *from, char *to, int offset, @@ -468,6 +526,16 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) /* Internal */ #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) +static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) +{ + return &skb_shinfo(skb)->hwtstamps; +} + +static inline union skb_shared_tx *skb_tx(struct sk_buff *skb) +{ + return &skb_shinfo(skb)->tx_flags; +} + /** * skb_queue_empty - check if a queue is empty * @list: queue head @@ -1008,7 +1076,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, int size); #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) -#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) +#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) #ifdef NET_SKBUFF_DATA_USES_OFFSET @@ -1270,12 +1338,12 @@ static inline int skb_network_offset(const struct sk_buff *skb) * shifting the start of the packet by 2 bytes. Drivers should do this * with: * - * skb_reserve(NET_IP_ALIGN); + * skb_reserve(skb, NET_IP_ALIGN); * * The downside to this alignment of the IP header is that the DMA is now * unaligned. On some architectures the cost of an unaligned DMA is high * and this cost outweighs the gains made by aligning the IP header. - * + * * Since this trade off varies between architectures, we allow NET_IP_ALIGN * to be overridden. */ @@ -1287,7 +1355,7 @@ static inline int skb_network_offset(const struct sk_buff *skb) * The networking layer reserves some headroom in skb data (via * dev_alloc_skb). This is used to avoid having to reallocate skb data when * the header has to grow. In the default case, if the header has to grow - * 16 bytes or less we avoid the reallocation. + * 32 bytes or less we avoid the reallocation. * * Unfortunately this headroom changes the DMA alignment of the resulting * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive @@ -1295,11 +1363,11 @@ static inline int skb_network_offset(const struct sk_buff *skb) * perhaps setting it to a cacheline in size (since that will maintain * cacheline alignment of the DMA). It must be a power of 2. * - * Various parts of the networking layer expect at least 16 bytes of + * Various parts of the networking layer expect at least 32 bytes of * headroom, you should not reduce this. */ #ifndef NET_SKB_PAD -#define NET_SKB_PAD 16 +#define NET_SKB_PAD 32 #endif extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); @@ -1647,6 +1715,25 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) skb = skb->prev) +static inline bool skb_has_frags(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->frag_list != NULL; +} + +static inline void skb_frag_list_init(struct sk_buff *skb) +{ + skb_shinfo(skb)->frag_list = NULL; +} + +static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) +{ + frag->next = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = frag; +} + +#define skb_walk_frags(skb, iter) \ + for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) + extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, int *peeked, int *err); extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, @@ -1661,8 +1748,14 @@ extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, struct iovec *iov); extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, - struct iovec *from, + const struct iovec *from, + int from_offset, int len); +extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, + int offset, + const struct iovec *to, + int to_offset, + int size); extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); @@ -1687,8 +1780,6 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); -extern int skb_gro_receive(struct sk_buff **head, - struct sk_buff *skb); static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer) @@ -1735,6 +1826,11 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, extern void skb_init(void); +static inline ktime_t skb_get_ktime(const struct sk_buff *skb) +{ + return skb->tstamp; +} + /** * skb_get_timestamp - get timestamp from a skb * @skb: skb to get stamp from @@ -1744,11 +1840,18 @@ extern void skb_init(void); * This function converts the offset back to a struct timeval and stores * it in stamp. */ -static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) +static inline void skb_get_timestamp(const struct sk_buff *skb, + struct timeval *stamp) { *stamp = ktime_to_timeval(skb->tstamp); } +static inline void skb_get_timestampns(const struct sk_buff *skb, + struct timespec *stamp) +{ + *stamp = ktime_to_timespec(skb->tstamp); +} + static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); @@ -1764,6 +1867,20 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } +/** + * skb_tstamp_tx - queue clone of skb with send time stamps + * @orig_skb: the original outgoing packet + * @hwtstamps: hardware time stamps, may be NULL if not available + * + * If the skb has a socket associated, then this function clones the + * skb (thus sharing the actual data and optional structures), stores + * the optional hardware time stamping information (if non NULL) or + * generates a software time stamp (otherwise), then queues the clone + * to the error queue of the socket. Errors are silently ignored. + */ +extern void skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps); + extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); extern __sum16 __skb_checksum_complete(struct sk_buff *skb); @@ -1894,7 +2011,7 @@ static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) skb->queue_mapping = queue_mapping; } -static inline u16 skb_get_queue_mapping(struct sk_buff *skb) +static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) { return skb->queue_mapping; } @@ -1904,6 +2021,24 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu to->queue_mapping = from->queue_mapping; } +static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) +{ + skb->queue_mapping = rx_queue + 1; +} + +static inline u16 skb_get_rx_queue(const struct sk_buff *skb) +{ + return skb->queue_mapping - 1; +} + +static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) +{ + return (skb->queue_mapping != 0); +} + +extern u16 skb_tx_hash(const struct net_device *dev, + const struct sk_buff *skb); + #ifdef CONFIG_XFRM static inline struct sec_path *skb_sec_path(struct sk_buff *skb) { diff --git a/include/linux/slab.h b/include/linux/slab.h index f96d13c281e..2da8372519f 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -62,6 +62,15 @@ # define SLAB_DEBUG_OBJECTS 0x00000000UL #endif +#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ + +/* Don't track use of uninitialized memory */ +#ifdef CONFIG_KMEMCHECK +# define SLAB_NOTRACK 0x01000000UL +#else +# define SLAB_NOTRACK 0x00000000UL +#endif + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ @@ -127,6 +136,7 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); void * __must_check __krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); +void kzfree(const void *); size_t ksize(const void *); /* @@ -316,4 +326,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } +void __init kmem_cache_init_late(void); + #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 39c3a5eb8eb..850d057500d 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -14,6 +14,88 @@ #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include <linux/compiler.h> +#include <linux/kmemtrace.h> + +/* + * struct kmem_cache + * + * manages a cache. + */ + +struct kmem_cache { +/* 1) per-cpu data, touched during every alloc/free */ + struct array_cache *array[NR_CPUS]; +/* 2) Cache tunables. Protected by cache_chain_mutex */ + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + + unsigned int buffer_size; + u32 reciprocal_buffer_size; +/* 3) touched by every alloc & free from the backend */ + + unsigned int flags; /* constant flags */ + unsigned int num; /* # of objs per slab */ + +/* 4) cache_grow/shrink */ + /* order of pgs per slab (2^n) */ + unsigned int gfporder; + + /* force GFP flags, e.g. GFP_DMA */ + gfp_t gfpflags; + + size_t colour; /* cache colouring range */ + unsigned int colour_off; /* colour offset */ + struct kmem_cache *slabp_cache; + unsigned int slab_size; + unsigned int dflags; /* dynamic flags */ + + /* constructor func */ + void (*ctor)(void *obj); + +/* 5) cache creation/removal */ + const char *name; + struct list_head next; + +/* 6) statistics */ +#ifdef CONFIG_DEBUG_SLAB + unsigned long num_active; + unsigned long num_allocations; + unsigned long high_mark; + unsigned long grown; + unsigned long reaped; + unsigned long errors; + unsigned long max_freeable; + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; + atomic_t allochit; + atomic_t allocmiss; + atomic_t freehit; + atomic_t freemiss; + + /* + * If debugging is enabled, then the allocator can add additional + * fields and/or padding to every object. buffer_size contains the total + * object size including these internal fields, the following two + * variables contain the offset to the user object and its size. + */ + int obj_offset; + int obj_size; +#endif /* CONFIG_DEBUG_SLAB */ + + /* + * We put nodelists[] at the end of kmem_cache, because we want to size + * this array to nr_node_ids slots instead of MAX_NUMNODES + * (see kmem_cache_init()) + * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache + * is statically defined, so we reserve the max number of nodes. + */ + struct kmem_list3 *nodelists[MAX_NUMNODES]; + /* + * Do not add fields after nodelists[] + */ +}; /* Size description struct for general caches. */ struct cache_sizes { @@ -28,8 +110,26 @@ extern struct cache_sizes malloc_sizes[]; void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); -static inline void *kmalloc(size_t size, gfp_t flags) +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); +extern size_t slab_buffer_size(struct kmem_cache *cachep); +#else +static __always_inline void * +kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) +{ + return kmem_cache_alloc(cachep, flags); +} +static inline size_t slab_buffer_size(struct kmem_cache *cachep) +{ + return 0; +} +#endif + +static __always_inline void *kmalloc(size_t size, gfp_t flags) { + struct kmem_cache *cachep; + void *ret; + if (__builtin_constant_p(size)) { int i = 0; @@ -43,17 +143,21 @@ static inline void *kmalloc(size_t size, gfp_t flags) i++; #include <linux/kmalloc_sizes.h> #undef CACHE - { - extern void __you_cannot_kmalloc_that_much(void); - __you_cannot_kmalloc_that_much(); - } + return NULL; found: #ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) - return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, - flags); + cachep = malloc_sizes[i].cs_dmacachep; + else #endif - return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); + cachep = malloc_sizes[i].cs_cachep; + + ret = kmem_cache_alloc_notrace(cachep, flags); + + trace_kmalloc(_THIS_IP_, ret, + size, slab_buffer_size(cachep), flags); + + return ret; } return __kmalloc(size, flags); } @@ -62,8 +166,25 @@ found: extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, + gfp_t flags, + int nodeid); +#else +static __always_inline void * +kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, + gfp_t flags, + int nodeid) { + return kmem_cache_alloc_node(cachep, flags, nodeid); +} +#endif + +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + struct kmem_cache *cachep; + void *ret; + if (__builtin_constant_p(size)) { int i = 0; @@ -77,18 +198,22 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) i++; #include <linux/kmalloc_sizes.h> #undef CACHE - { - extern void __you_cannot_kmalloc_that_much(void); - __you_cannot_kmalloc_that_much(); - } + return NULL; found: #ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) - return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, - flags, node); + cachep = malloc_sizes[i].cs_dmacachep; + else #endif - return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, - flags, node); + cachep = malloc_sizes[i].cs_cachep; + + ret = kmem_cache_alloc_node_notrace(cachep, flags, node); + + trace_kmalloc_node(_THIS_IP_, ret, + size, slab_buffer_size(cachep), + flags, node); + + return ret; } return __kmalloc_node(size, flags, node); } diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 59a3fa476ab..bb5368df4be 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -3,14 +3,15 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, + gfp_t flags) { return kmem_cache_alloc_node(cachep, flags, -1); } void *__kmalloc_node(size_t size, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { return __kmalloc_node(size, flags, node); } @@ -23,14 +24,19 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) * kmalloc is the normal method of allocating memory * in the kernel. */ -static inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline void *kmalloc(size_t size, gfp_t flags) { return __kmalloc_node(size, flags, -1); } -static inline void *__kmalloc(size_t size, gfp_t flags) +static __always_inline void *__kmalloc(size_t size, gfp_t flags) { return kmalloc(size, flags); } +static inline void kmem_cache_init_late(void) +{ + /* Nothing to do */ +} + #endif /* __LINUX_SLOB_DEF_H */ diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h new file mode 100644 index 00000000000..b65c8881f07 --- /dev/null +++ b/include/linux/slow-work.h @@ -0,0 +1,95 @@ +/* Worker thread pool for slow items, such as filesystem lookups or mkdirs + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + * + * See Documentation/slow-work.txt + */ + +#ifndef _LINUX_SLOW_WORK_H +#define _LINUX_SLOW_WORK_H + +#ifdef CONFIG_SLOW_WORK + +#include <linux/sysctl.h> + +struct slow_work; + +/* + * The operations used to support slow work items + */ +struct slow_work_ops { + /* get a ref on a work item + * - return 0 if successful, -ve if not + */ + int (*get_ref)(struct slow_work *work); + + /* discard a ref to a work item */ + void (*put_ref)(struct slow_work *work); + + /* execute a work item */ + void (*execute)(struct slow_work *work); +}; + +/* + * A slow work item + * - A reference is held on the parent object by the thread pool when it is + * queued + */ +struct slow_work { + unsigned long flags; +#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ +#define SLOW_WORK_EXECUTING 1 /* item currently executing */ +#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ +#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ + const struct slow_work_ops *ops; /* operations table for this item */ + struct list_head link; /* link in queue */ +}; + +/** + * slow_work_init - Initialise a slow work item + * @work: The work item to initialise + * @ops: The operations to use to handle the slow work item + * + * Initialise a slow work item. + */ +static inline void slow_work_init(struct slow_work *work, + const struct slow_work_ops *ops) +{ + work->flags = 0; + work->ops = ops; + INIT_LIST_HEAD(&work->link); +} + +/** + * vslow_work_init - Initialise a very slow work item + * @work: The work item to initialise + * @ops: The operations to use to handle the slow work item + * + * Initialise a very slow work item. This item will be restricted such that + * only a certain number of the pool threads will be able to execute items of + * this type. + */ +static inline void vslow_work_init(struct slow_work *work, + const struct slow_work_ops *ops) +{ + work->flags = 1 << SLOW_WORK_VERY_SLOW; + work->ops = ops; + INIT_LIST_HEAD(&work->link); +} + +extern int slow_work_enqueue(struct slow_work *work); +extern int slow_work_register_user(void); +extern void slow_work_unregister_user(void); + +#ifdef CONFIG_SYSCTL +extern ctl_table slow_work_sysctls[]; +#endif + +#endif /* CONFIG_SLOW_WORK */ +#endif /* _LINUX_SLOW_WORK_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 2f5c16b1aac..c1c862b1d01 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,6 +10,8 @@ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/kobject.h> +#include <linux/kmemtrace.h> +#include <linux/kmemleak.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -46,7 +48,6 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; - unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; @@ -89,6 +90,7 @@ struct kmem_cache { void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ + unsigned long min_partial; const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SLUB_DEBUG @@ -121,10 +123,23 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) /* + * Maximum kmalloc object size handled by SLUB. Larger object allocations + * are passed through to the page allocator. The page allocator "fastpath" + * is relatively slow so we need this value sufficiently high so that + * performance critical objects are allocated through the SLUB fastpath. + * + * This should be dropped to PAGE_SIZE / 2 once the page allocator + * "fastpath" becomes competitive with the slab allocator fastpaths. + */ +#define SLUB_MAX_SIZE (2 * PAGE_SIZE) + +#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) + +/* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; +extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -204,15 +219,33 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); +#else +static __always_inline void * +kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) +{ + return kmem_cache_alloc(s, gfpflags); +} +#endif + static __always_inline void *kmalloc_large(size_t size, gfp_t flags) { - return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); + unsigned int order = get_order(size); + void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); + + kmemleak_alloc(ret, size, 1, flags); + trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); + + return ret; } static __always_inline void *kmalloc(size_t size, gfp_t flags) { + void *ret; + if (__builtin_constant_p(size)) { - if (size > PAGE_SIZE) + if (size > SLUB_MAX_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { @@ -221,7 +254,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) if (!s) return ZERO_SIZE_PTR; - return kmem_cache_alloc(s, flags); + ret = kmem_cache_alloc_notrace(s, flags); + + trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); + + return ret; } } return __kmalloc(size, flags); @@ -231,19 +268,42 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); +#ifdef CONFIG_KMEMTRACE +extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, + gfp_t gfpflags, + int node); +#else +static __always_inline void * +kmem_cache_alloc_node_notrace(struct kmem_cache *s, + gfp_t gfpflags, + int node) +{ + return kmem_cache_alloc_node(s, gfpflags, node); +} +#endif + static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { + void *ret; + if (__builtin_constant_p(size) && - size <= PAGE_SIZE && !(flags & SLUB_DMA)) { + size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) return ZERO_SIZE_PTR; - return kmem_cache_alloc_node(s, flags, node); + ret = kmem_cache_alloc_node_notrace(s, flags, node); + + trace_kmalloc_node(_THIS_IP_, ret, + size, s->size, flags, node); + + return ret; } return __kmalloc_node(size, flags, node); } #endif +void __init kmem_cache_init_late(void); + #endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index 715196b09d6..9e3d8af0920 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, /* * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. * (defined in asm header): - */ + */ /* * stops all CPUs but the current one: @@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, return 0; } -void __smp_call_function_single(int cpuid, struct call_single_data *data); +void __smp_call_function_single(int cpuid, struct call_single_data *data, + int wait); /* * Generic and arch helpers @@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus; #else /* !SMP */ +static inline void smp_send_stop(void) { } + /* * These macros fold the SMP functionality into a single CPU system */ @@ -174,7 +177,12 @@ static inline void init_call_single_data(void) #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() -#define put_cpu_no_resched() preempt_enable_no_resched() + +/* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: + */ +extern void arch_disable_smp_support(void); void smp_setup_processor_id(void); diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h index 1cbf0313add..5241e4fb4ec 100644 --- a/include/linux/smsc911x.h +++ b/include/linux/smsc911x.h @@ -43,5 +43,18 @@ struct smsc911x_platform_config { /* Constants for flags */ #define SMSC911X_USE_16BIT (BIT(0)) #define SMSC911X_USE_32BIT (BIT(1)) +#define SMSC911X_FORCE_INTERNAL_PHY (BIT(2)) +#define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3)) +#define SMSC911X_SAVE_MAC_ADDRESS (BIT(4)) + +/* + * SMSC911X_SWAP_FIFO: + * Enables software byte swap for fifo data. Should only be used as a + * "last resort" in the case of big endian mode on boards with incorrectly + * routed data bus to older devices such as LAN9118. Newer devices such as + * LAN9221 can handle this in hardware, there are registers to control + * this swapping but the driver doesn't currently use them. + */ +#define SMSC911X_SWAP_FIFO (BIT(5)) #endif /* __LINUX_SMSC911X_H__ */ diff --git a/include/linux/snmp.h b/include/linux/snmp.h index aee3f1e1d1c..0f953fe4041 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -18,7 +18,7 @@ enum { IPSTATS_MIB_NUM = 0, - IPSTATS_MIB_INRECEIVES, /* InReceives */ + IPSTATS_MIB_INPKTS, /* InReceives */ IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */ IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */ IPSTATS_MIB_INNOROUTES, /* InNoRoutes */ @@ -28,7 +28,7 @@ enum IPSTATS_MIB_INDISCARDS, /* InDiscards */ IPSTATS_MIB_INDELIVERS, /* InDelivers */ IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */ - IPSTATS_MIB_OUTREQUESTS, /* OutRequests */ + IPSTATS_MIB_OUTPKTS, /* OutRequests */ IPSTATS_MIB_OUTDISCARDS, /* OutDiscards */ IPSTATS_MIB_OUTNOROUTES, /* OutNoRoutes */ IPSTATS_MIB_REASMTIMEOUT, /* ReasmTimeout */ @@ -42,6 +42,12 @@ enum IPSTATS_MIB_OUTMCASTPKTS, /* OutMcastPkts */ IPSTATS_MIB_INBCASTPKTS, /* InBcastPkts */ IPSTATS_MIB_OUTBCASTPKTS, /* OutBcastPkts */ + IPSTATS_MIB_INOCTETS, /* InOctets */ + IPSTATS_MIB_OUTOCTETS, /* OutOctets */ + IPSTATS_MIB_INMCASTOCTETS, /* InMcastOctets */ + IPSTATS_MIB_OUTMCASTOCTETS, /* OutMcastOctets */ + IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */ + IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */ __IPSTATS_MIB_MAX }; diff --git a/include/linux/socket.h b/include/linux/socket.h index 20fc4bbfca4..3b461dffe24 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -24,10 +24,12 @@ struct __kernel_sockaddr_storage { #include <linux/types.h> /* pid_t */ #include <linux/compiler.h> /* __user */ -#ifdef CONFIG_PROC_FS +#ifdef __KERNEL__ +# ifdef CONFIG_PROC_FS struct seq_file; extern void socket_seq_show(struct seq_file *seq); -#endif +# endif +#endif /* __KERNEL__ */ typedef unsigned short sa_family_t; @@ -179,6 +181,7 @@ struct ucred { #define AF_ASH 18 /* Ash */ #define AF_ECONET 19 /* Acorn Econet */ #define AF_ATMSVC 20 /* ATM SVCs */ +#define AF_RDS 21 /* RDS sockets */ #define AF_SNA 22 /* Linux SNA Project (nutters!) */ #define AF_IRDA 23 /* IRDA sockets */ #define AF_PPPOX 24 /* PPPoX sockets */ @@ -191,7 +194,8 @@ struct ucred { #define AF_RXRPC 33 /* RxRPC sockets */ #define AF_ISDN 34 /* mISDN sockets */ #define AF_PHONET 35 /* Phonet sockets */ -#define AF_MAX 36 /* For now.. */ +#define AF_IEEE802154 36 /* IEEE802154 sockets */ +#define AF_MAX 37 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -217,6 +221,7 @@ struct ucred { #define PF_ASH AF_ASH #define PF_ECONET AF_ECONET #define PF_ATMSVC AF_ATMSVC +#define PF_RDS AF_RDS #define PF_SNA AF_SNA #define PF_IRDA AF_IRDA #define PF_PPPOX AF_PPPOX @@ -229,6 +234,7 @@ struct ucred { #define PF_RXRPC AF_RXRPC #define PF_ISDN AF_ISDN #define PF_PHONET AF_PHONET +#define PF_IEEE802154 AF_IEEE802154 #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -298,14 +304,16 @@ struct ucred { #define SOL_PPPOL2TP 273 #define SOL_BLUETOOTH 274 #define SOL_PNPIPE 275 +#define SOL_RDS 276 +#define SOL_IUCV 277 /* IPX options */ #define IPX_TYPE 1 #ifdef __KERNEL__ extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); -extern int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, - int offset, int len); +extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, + int offset, int len); extern int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset, @@ -313,6 +321,8 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); +extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, + int offset, int len); extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen); extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); diff --git a/include/linux/sockios.h b/include/linux/sockios.h index abef7596655..241f179347d 100644 --- a/include/linux/sockios.h +++ b/include/linux/sockios.h @@ -122,6 +122,9 @@ #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ #define SIOCBRDELIF 0x89a3 /* remove interface from bridge */ +/* hardware time stamping: parameters in linux/net_tstamp.h */ +#define SIOCSHWTSTAMP 0x89b0 + /* Device private ioctl calls */ /* diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index f41ffd7c2dd..34c4475ac4a 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h @@ -103,6 +103,14 @@ #define SONYPI_EVENT_WIRELESS_OFF 61 #define SONYPI_EVENT_ZOOM_IN_PRESSED 62 #define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 +#define SONYPI_EVENT_CD_EJECT_PRESSED 64 +#define SONYPI_EVENT_MODEKEY_PRESSED 65 +#define SONYPI_EVENT_PKEY_P4 66 +#define SONYPI_EVENT_PKEY_P5 67 +#define SONYPI_EVENT_SETTINGKEY_PRESSED 68 +#define SONYPI_EVENT_VOLUME_INC_PRESSED 69 +#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 +#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 /* get/set brightness */ #define SONYPI_IOCGBRT _IOR('v', 0, __u8) diff --git a/include/linux/sound.h b/include/linux/sound.h index 9e2a94feed6..44dcf057043 100644 --- a/include/linux/sound.h +++ b/include/linux/sound.h @@ -25,6 +25,7 @@ #define SND_DEV_AMIDI 13 /* Like /dev/midi (obsolete) */ #define SND_DEV_ADMMIDI 14 /* Like /dev/dmmidi (onsolete) */ +#ifdef __KERNEL__ /* * Sound core interface functions */ @@ -40,3 +41,4 @@ extern void unregister_sound_special(int unit); extern void unregister_sound_mixer(int unit); extern void unregister_sound_midi(int unit); extern void unregister_sound_dsp(int unit); +#endif /* __KERNEL__ */ diff --git a/include/linux/soundcard.h b/include/linux/soundcard.h index 523d069c862..1904afedb82 100644 --- a/include/linux/soundcard.h +++ b/include/linux/soundcard.h @@ -1045,50 +1045,36 @@ typedef struct mixer_vol_table { */ #define LOCL_STARTAUDIO 1 -#if (!defined(__KERNEL__) && !defined(KERNEL) && !defined(INKERNEL) && !defined(_KERNEL)) || defined(USE_SEQ_MACROS) +#if !defined(__KERNEL__) || defined(USE_SEQ_MACROS) /* * Some convenience macros to simplify programming of the * /dev/sequencer interface * - * These macros define the API which should be used when possible. + * This is a legacy interface for applications written against + * the OSSlib-3.8 style interface. It is no longer possible + * to actually link against OSSlib with this header, but we + * still provide these macros for programs using them. + * + * If you want to use OSSlib, it is recommended that you get + * the GPL version of OSS-4.x and build against that version + * of the header. + * + * We redefine the extern keyword so that make headers_check + * does not complain about SEQ_USE_EXTBUF. */ #define SEQ_DECLAREBUF() SEQ_USE_EXTBUF() void seqbuf_dump(void); /* This function must be provided by programs */ -extern int OSS_init(int seqfd, int buflen); -extern void OSS_seqbuf_dump(int fd, unsigned char *buf, int buflen); -extern void OSS_seq_advbuf(int len, int fd, unsigned char *buf, int buflen); -extern void OSS_seq_needbuf(int len, int fd, unsigned char *buf, int buflen); -extern void OSS_patch_caching(int dev, int chn, int patch, - int fd, unsigned char *buf, int buflen); -extern void OSS_drum_caching(int dev, int chn, int patch, - int fd, unsigned char *buf, int buflen); -extern void OSS_write_patch(int fd, unsigned char *buf, int len); -extern int OSS_write_patch2(int fd, unsigned char *buf, int len); - #define SEQ_PM_DEFINES int __foo_bar___ -#ifdef OSSLIB -# define SEQ_USE_EXTBUF() \ - extern unsigned char *_seqbuf; \ - extern int _seqbuflen;extern int _seqbufptr -# define SEQ_DEFINEBUF(len) SEQ_USE_EXTBUF();static int _requested_seqbuflen=len -# define _SEQ_ADVBUF(len) OSS_seq_advbuf(len, seqfd, _seqbuf, _seqbuflen) -# define _SEQ_NEEDBUF(len) OSS_seq_needbuf(len, seqfd, _seqbuf, _seqbuflen) -# define SEQ_DUMPBUF() OSS_seqbuf_dump(seqfd, _seqbuf, _seqbuflen) - -# define SEQ_LOAD_GMINSTR(dev, instr) \ - OSS_patch_caching(dev, -1, instr, seqfd, _seqbuf, _seqbuflen) -# define SEQ_LOAD_GMDRUM(dev, drum) \ - OSS_drum_caching(dev, -1, drum, seqfd, _seqbuf, _seqbuflen) -#else /* !OSSLIB */ - -# define SEQ_LOAD_GMINSTR(dev, instr) -# define SEQ_LOAD_GMDRUM(dev, drum) - -# define SEQ_USE_EXTBUF() \ - extern unsigned char _seqbuf[]; \ - extern int _seqbuflen;extern int _seqbufptr + +#define SEQ_LOAD_GMINSTR(dev, instr) +#define SEQ_LOAD_GMDRUM(dev, drum) + +#define _SEQ_EXTERN extern +#define SEQ_USE_EXTBUF() \ + _SEQ_EXTERN unsigned char _seqbuf[]; \ + _SEQ_EXTERN int _seqbuflen; _SEQ_EXTERN int _seqbufptr #ifndef USE_SIMPLE_MACROS /* Sample seqbuf_dump() implementation: @@ -1131,7 +1117,6 @@ extern int OSS_write_patch2(int fd, unsigned char *buf, int len); */ #define _SEQ_NEEDBUF(len) /* empty */ #endif -#endif /* !OSSLIB */ #define SEQ_VOLUME_MODE(dev, mode) {_SEQ_NEEDBUF(8);\ _seqbuf[_seqbufptr] = SEQ_EXTENDED;\ @@ -1215,14 +1200,8 @@ extern int OSS_write_patch2(int fd, unsigned char *buf, int len); _CHN_COMMON(dev, MIDI_CHN_PRESSURE, chn, pressure, 0, 0) #define SEQ_SET_PATCH SEQ_PGM_CHANGE -#ifdef OSSLIB -# define SEQ_PGM_CHANGE(dev, chn, patch) \ - {OSS_patch_caching(dev, chn, patch, seqfd, _seqbuf, _seqbuflen); \ - _CHN_COMMON(dev, MIDI_PGM_CHANGE, chn, patch, 0, 0);} -#else -# define SEQ_PGM_CHANGE(dev, chn, patch) \ +#define SEQ_PGM_CHANGE(dev, chn, patch) \ _CHN_COMMON(dev, MIDI_PGM_CHANGE, chn, patch, 0, 0) -#endif #define SEQ_CONTROL(dev, chn, controller, value) \ _CHN_COMMON(dev, MIDI_CTL_CHANGE, chn, controller, 0, value) @@ -1300,19 +1279,12 @@ extern int OSS_write_patch2(int fd, unsigned char *buf, int len); /* * Patch loading. */ -#ifdef OSSLIB -# define SEQ_WRPATCH(patchx, len) \ - OSS_write_patch(seqfd, (char*)(patchx), len) -# define SEQ_WRPATCH2(patchx, len) \ - OSS_write_patch2(seqfd, (char*)(patchx), len) -#else -# define SEQ_WRPATCH(patchx, len) \ +#define SEQ_WRPATCH(patchx, len) \ {if (_seqbufptr) SEQ_DUMPBUF();\ if (write(seqfd, (char*)(patchx), len)==-1) \ perror("Write patch: /dev/sequencer");} -# define SEQ_WRPATCH2(patchx, len) \ +#define SEQ_WRPATCH2(patchx, len) \ (SEQ_DUMPBUF(), write(seqfd, (char*)(patchx), len)) -#endif #endif #endif diff --git a/include/linux/spi/ad7879.h b/include/linux/spi/ad7879.h new file mode 100644 index 00000000000..4231104c9af --- /dev/null +++ b/include/linux/spi/ad7879.h @@ -0,0 +1,35 @@ +/* linux/spi/ad7879.h */ + +/* Touchscreen characteristics vary between boards and models. The + * platform_data for the device's "struct device" holds this information. + * + * It's OK if the min/max values are zero. + */ +struct ad7879_platform_data { + u16 model; /* 7879 */ + u16 x_plate_ohms; + u16 x_min, x_max; + u16 y_min, y_max; + u16 pressure_min, pressure_max; + + /* [0..255] 0=OFF Starts at 1=550us and goes + * all the way to 9.440ms in steps of 35us. + */ + u8 pen_down_acc_interval; + /* [0..15] Starts at 0=128us and goes all the + * way to 4.096ms in steps of 128us. + */ + u8 first_conversion_delay; + /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ + u8 acquisition_time; + /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */ + u8 averaging; + /* [0..3] Perform X measurements 0 = OFF, + * 1 = 4, 2 = 8, 3 = 16 (median > averaging) + */ + u8 median; + /* 1 = AUX/VBAT/GPIO set to GPIO Output */ + u8 gpio_output; + /* Initial GPIO pin state (valid if gpio_output = 1) */ + u8 gpio_default; +}; diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index 05eab2f11e6..51948eb6927 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -17,6 +17,7 @@ struct ads7846_platform_data { u16 vref_mv; /* external vref value, milliVolts */ bool keep_vref_on; /* set to keep vref on for differential * measurements as well */ + bool swap_xy; /* swap x and y axes */ /* Settling time of the analog signals; a function of Vcc and the * capacitance on the X/Y drivers. If set to non-zero, two samples @@ -51,5 +52,6 @@ struct ads7846_platform_data { void **filter_data); int (*filter) (void *filter_data, int data_idx, int *val); void (*filter_cleanup)(void *filter_data); + void (*wait_for_sync)(void); }; diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h index 1085212c446..306e7b1c69e 100644 --- a/include/linux/spi/eeprom.h +++ b/include/linux/spi/eeprom.h @@ -1,6 +1,8 @@ #ifndef __LINUX_SPI_EEPROM_H #define __LINUX_SPI_EEPROM_H +#include <linux/memory.h> + /* * Put one of these structures in platform_data for SPI EEPROMS handled * by the "at25" driver. On SPI, most EEPROMS understand the same core @@ -17,6 +19,10 @@ struct spi_eeprom { #define EE_ADDR2 0x0002 /* 16 bit addrs */ #define EE_ADDR3 0x0004 /* 24 bit addrs */ #define EE_READONLY 0x0008 /* disallow writes */ + + /* for exporting this chip's data to other kernel code */ + void (*setup)(struct memory_accessor *mem, void *context); + void *context; }; #endif /* __LINUX_SPI_EEPROM_H */ diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h new file mode 100644 index 00000000000..1b5d5384fcd --- /dev/null +++ b/include/linux/spi/libertas_spi.h @@ -0,0 +1,29 @@ +/* + * board-specific data for the libertas_spi driver. + * + * Copyright 2008 Analog Devices Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ +#ifndef _LIBERTAS_SPI_H_ +#define _LIBERTAS_SPI_H_ + +struct spi_device; + +struct libertas_spi_platform_data { + /* There are two ways to read data from the WLAN module's SPI + * interface. Setting 0 or 1 here controls which one is used. + * + * Usually you want to set use_dummy_writes = 1. + * However, if that doesn't work or if you are using a slow SPI clock + * speed, you may want to use 0 here. */ + u16 use_dummy_writes; + + /* Board specific setup/teardown */ + int (*setup)(struct spi_device *spi); + int (*teardown)(struct spi_device *spi); +}; +#endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 68bb1c501d0..c47c4b4da97 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -80,6 +80,8 @@ struct spi_device { #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ #define SPI_3WIRE 0x10 /* SI/SO signals shared */ #define SPI_LOOP 0x20 /* loopback mode */ +#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ +#define SPI_READY 0x80 /* slave pulls low to pause */ u8 bits_per_word; int irq; void *controller_state; @@ -204,6 +206,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * SPI slaves, and are numbered from zero to num_chipselects. * each slave has a chipselect signal, but it's common that not * every chipselect is connected to a slave. + * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @setup: updates the device mode and clocking records used by a * device's SPI controller; protocol code may call this. This * must fail if an unrecognized or unsupported mode is requested. @@ -239,7 +242,24 @@ struct spi_master { */ u16 num_chipselect; - /* setup mode and clock, etc (spi driver may call many times) */ + /* some SPI controllers pose alignment requirements on DMAable + * buffers; let protocol drivers know about these requirements. + */ + u16 dma_alignment; + + /* spi_device.mode flags understood by this controller driver */ + u16 mode_bits; + + /* other constraints relevant to this driver */ + u16 flags; +#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ + + /* Setup mode and clock, etc (spi driver may call many times). + * + * IMPORTANT: this may be called when transfers to another + * device are active. DO NOT UPDATE SHARED REGISTERS in ways + * which could break those transfers. + */ int (*setup)(struct spi_device *spi); /* bidirectional bulk transfers @@ -512,30 +532,7 @@ static inline void spi_message_free(struct spi_message *m) kfree(m); } -/** - * spi_setup - setup SPI mode and clock rate - * @spi: the device whose settings are being modified - * Context: can sleep, and no requests are queued to the device - * - * SPI protocol drivers may need to update the transfer mode if the - * device doesn't work with its default. They may likewise need - * to update clock rates or word sizes from initial values. This function - * changes those settings, and must be called from a context that can sleep. - * Except for SPI_CS_HIGH, which takes effect immediately, the changes take - * effect the next time the device is selected and data is transferred to - * or from it. When this function returns, the spi device is deselected. - * - * Note that this call will fail if the protocol driver specifies an option - * that the underlying controller or its driver does not support. For - * example, not all hardware supports wire transfers using nine bit words, - * LSB-first wire encoding, or active-high chipselects. - */ -static inline int -spi_setup(struct spi_device *spi) -{ - return spi->master->setup(spi); -} - +extern int spi_setup(struct spi_device *spi); /** * spi_async - asynchronous SPI transfer diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index bf8de281b4e..eed4254bd50 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -83,6 +83,13 @@ extern int spi_bitbang_stop(struct spi_bitbang *spi); * int getmiso(struct spi_device *); * void spidelay(unsigned); * + * setsck()'s is_on parameter is a zero/nonzero boolean. + * + * setmosi()'s is_on parameter is a zero/nonzero boolean. + * + * getmiso() is required to return 0 or 1 only. Any other value is invalid + * and will result in improper operation. + * * A non-inlined routine would call bitbang_txrx_*() routines. The * main loop could easily compile down to a handful of instructions, * especially if the delay is a NOP (to run at peak speed). diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h index 0f01a0f1f40..ca6782ee4b9 100644 --- a/include/linux/spi/spi_gpio.h +++ b/include/linux/spi/spi_gpio.h @@ -25,10 +25,16 @@ * ... * }; * + * If chipselect is not used (there's only one device on the bus), assign + * SPI_GPIO_NO_CHIPSELECT to the controller_data: + * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT; + * * If the bitbanged bus is later switched to a "native" controller, * that platform_device and controller_data should be removed. */ +#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l) + /** * struct spi_gpio_platform_data - parameter for bitbanged SPI master * @sck: number of the GPIO used for clock output diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h index c93ef9d42a0..bf0570a84f7 100644 --- a/include/linux/spi/spidev.h +++ b/include/linux/spi/spidev.h @@ -22,6 +22,7 @@ #ifndef SPIDEV_H #define SPIDEV_H +#include <linux/types.h> /* User space versions of kernel symbols for SPI clocking modes, * matching <linux/spi/spi.h> @@ -39,6 +40,8 @@ #define SPI_LSB_FIRST 0x08 #define SPI_3WIRE 0x10 #define SPI_LOOP 0x20 +#define SPI_NO_CS 0x40 +#define SPI_READY 0x80 /*---------------------------------------------------------------------------*/ diff --git a/include/linux/spi/wl12xx.h b/include/linux/spi/wl12xx.h new file mode 100644 index 00000000000..11430cab2aa --- /dev/null +++ b/include/linux/spi/wl12xx.h @@ -0,0 +1,31 @@ +/* + * This file is part of wl12xx + * + * Copyright (C) 2009 Nokia Corporation + * + * Contact: Kalle Valo <kalle.valo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + * + */ + +#ifndef _LINUX_SPI_WL12XX_H +#define _LINUX_SPI_WL12XX_H + +struct wl12xx_platform_data { + void (*set_power)(bool enable); +}; + +#endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index e0c0fccced4..f0ca7a7a175 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -124,7 +124,17 @@ do { \ #ifdef CONFIG_GENERIC_LOCKBREAK #define spin_is_contended(lock) ((lock)->break_lock) #else + +#ifdef __raw_spin_is_contended #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) +#else +#define spin_is_contended(lock) (((void)(lock), 0)) +#endif /*__raw_spin_is_contended*/ +#endif + +/* The lock does not imply full memory barrier. */ +#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK +static inline void smp_mb__after_lock(void) { smp_mb(); } #endif /** @@ -133,24 +143,17 @@ do { \ */ #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) -/* - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: - */ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -# include <linux/spinlock_api_smp.h> -#else -# include <linux/spinlock_api_up.h> -#endif - #ifdef CONFIG_DEBUG_SPINLOCK extern void _raw_spin_lock(spinlock_t *lock); #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); extern void _raw_read_lock(rwlock_t *lock); +#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) extern int _raw_read_trylock(rwlock_t *lock); extern void _raw_read_unlock(rwlock_t *lock); extern void _raw_write_lock(rwlock_t *lock); +#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else @@ -160,9 +163,13 @@ do { \ # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) +# define _raw_read_lock_flags(lock, flags) \ + __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) +# define _raw_write_lock_flags(lock, flags) \ + __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) #endif @@ -252,50 +259,16 @@ do { \ #define spin_lock_irq(lock) _spin_lock_irq(lock) #define spin_lock_bh(lock) _spin_lock_bh(lock) - #define read_lock_irq(lock) _read_lock_irq(lock) #define read_lock_bh(lock) _read_lock_bh(lock) - #define write_lock_irq(lock) _write_lock_irq(lock) #define write_lock_bh(lock) _write_lock_bh(lock) - -/* - * We inline the unlock functions in the nondebug case: - */ -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ - !defined(CONFIG_SMP) -# define spin_unlock(lock) _spin_unlock(lock) -# define read_unlock(lock) _read_unlock(lock) -# define write_unlock(lock) _write_unlock(lock) -# define spin_unlock_irq(lock) _spin_unlock_irq(lock) -# define read_unlock_irq(lock) _read_unlock_irq(lock) -# define write_unlock_irq(lock) _write_unlock_irq(lock) -#else -# define spin_unlock(lock) \ - do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define read_unlock(lock) \ - do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define write_unlock(lock) \ - do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) -# define spin_unlock_irq(lock) \ -do { \ - __raw_spin_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -# define read_unlock_irq(lock) \ -do { \ - __raw_read_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -# define write_unlock_irq(lock) \ -do { \ - __raw_write_unlock(&(lock)->raw_lock); \ - __release(lock); \ - local_irq_enable(); \ -} while (0) -#endif +#define spin_unlock(lock) _spin_unlock(lock) +#define read_unlock(lock) _read_unlock(lock) +#define write_unlock(lock) _write_unlock(lock) +#define spin_unlock_irq(lock) _spin_unlock_irq(lock) +#define read_unlock_irq(lock) _read_unlock_irq(lock) +#define write_unlock_irq(lock) _write_unlock_irq(lock) #define spin_unlock_irqrestore(lock, flags) \ do { \ @@ -364,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); */ #define spin_can_lock(lock) (!spin_is_locked(lock)) +/* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: + */ +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +# include <linux/spinlock_api_smp.h> +#else +# include <linux/spinlock_api_up.h> +#endif + #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d79845d034b..7a7e18fc241 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) __releases(lock); +/* + * We inline the unlock functions in the nondebug case: + */ +#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT) +#define __always_inline__spin_unlock +#define __always_inline__read_unlock +#define __always_inline__write_unlock +#define __always_inline__spin_unlock_irq +#define __always_inline__read_unlock_irq +#define __always_inline__write_unlock_irq +#endif + +#ifndef CONFIG_DEBUG_SPINLOCK +#ifndef CONFIG_GENERIC_LOCKBREAK + +#ifdef __always_inline__spin_lock +#define _spin_lock(lock) __spin_lock(lock) +#endif + +#ifdef __always_inline__read_lock +#define _read_lock(lock) __read_lock(lock) +#endif + +#ifdef __always_inline__write_lock +#define _write_lock(lock) __write_lock(lock) +#endif + +#ifdef __always_inline__spin_lock_bh +#define _spin_lock_bh(lock) __spin_lock_bh(lock) +#endif + +#ifdef __always_inline__read_lock_bh +#define _read_lock_bh(lock) __read_lock_bh(lock) +#endif + +#ifdef __always_inline__write_lock_bh +#define _write_lock_bh(lock) __write_lock_bh(lock) +#endif + +#ifdef __always_inline__spin_lock_irq +#define _spin_lock_irq(lock) __spin_lock_irq(lock) +#endif + +#ifdef __always_inline__read_lock_irq +#define _read_lock_irq(lock) __read_lock_irq(lock) +#endif + +#ifdef __always_inline__write_lock_irq +#define _write_lock_irq(lock) __write_lock_irq(lock) +#endif + +#ifdef __always_inline__spin_lock_irqsave +#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) +#endif + +#ifdef __always_inline__read_lock_irqsave +#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) +#endif + +#ifdef __always_inline__write_lock_irqsave +#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) +#endif + +#endif /* !CONFIG_GENERIC_LOCKBREAK */ + +#ifdef __always_inline__spin_trylock +#define _spin_trylock(lock) __spin_trylock(lock) +#endif + +#ifdef __always_inline__read_trylock +#define _read_trylock(lock) __read_trylock(lock) +#endif + +#ifdef __always_inline__write_trylock +#define _write_trylock(lock) __write_trylock(lock) +#endif + +#ifdef __always_inline__spin_trylock_bh +#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) +#endif + +#ifdef __always_inline__spin_unlock +#define _spin_unlock(lock) __spin_unlock(lock) +#endif + +#ifdef __always_inline__read_unlock +#define _read_unlock(lock) __read_unlock(lock) +#endif + +#ifdef __always_inline__write_unlock +#define _write_unlock(lock) __write_unlock(lock) +#endif + +#ifdef __always_inline__spin_unlock_bh +#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) +#endif + +#ifdef __always_inline__read_unlock_bh +#define _read_unlock_bh(lock) __read_unlock_bh(lock) +#endif + +#ifdef __always_inline__write_unlock_bh +#define _write_unlock_bh(lock) __write_unlock_bh(lock) +#endif + +#ifdef __always_inline__spin_unlock_irq +#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) +#endif + +#ifdef __always_inline__read_unlock_irq +#define _read_unlock_irq(lock) __read_unlock_irq(lock) +#endif + +#ifdef __always_inline__write_unlock_irq +#define _write_unlock_irq(lock) __write_unlock_irq(lock) +#endif + +#ifdef __always_inline__spin_unlock_irqrestore +#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) +#endif + +#ifdef __always_inline__read_unlock_irqrestore +#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) +#endif + +#ifdef __always_inline__write_unlock_irqrestore +#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) +#endif + +#endif /* CONFIG_DEBUG_SPINLOCK */ + +static inline int __spin_trylock(spinlock_t *lock) +{ + preempt_disable(); + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +static inline int __read_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (_raw_read_trylock(lock)) { + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +static inline int __write_trylock(rwlock_t *lock) +{ + preempt_disable(); + if (_raw_write_trylock(lock)) { + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable(); + return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline void __read_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + /* + * On lockdep we dont want the hand-coded irq-enable of + * _raw_spin_lock_flags() code, because lockdep assumes + * that interrupts are not re-enabled during lock-acquire: + */ +#ifdef CONFIG_LOCKDEP + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +#else + _raw_spin_lock_flags(lock, &flags); +#endif + return flags; +} + +static inline void __spin_lock_irq(spinlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} + +static inline void __spin_lock_bh(spinlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} + +static inline unsigned long __read_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, + _raw_read_lock_flags, &flags); + return flags; +} + +static inline void __read_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline void __read_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline unsigned long __write_lock_irqsave(rwlock_t *lock) +{ + unsigned long flags; + + local_irq_save(flags); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, + _raw_write_lock_flags, &flags); + return flags; +} + +static inline void __write_lock_irq(rwlock_t *lock) +{ + local_irq_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +static inline void __write_lock_bh(rwlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +static inline void __spin_lock(spinlock_t *lock) +{ + preempt_disable(); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} + +static inline void __write_lock(rwlock_t *lock) +{ + preempt_disable(); + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); + LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +#endif /* CONFIG_PREEMPT */ + +static inline void __spin_unlock(spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + _raw_spin_unlock(lock); + preempt_enable(); +} + +static inline void __write_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + preempt_enable(); +} + +static inline void __read_unlock(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + preempt_enable(); +} + +static inline void __spin_unlock_irqrestore(spinlock_t *lock, + unsigned long flags) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + _raw_spin_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __spin_unlock_irq(spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + _raw_spin_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __spin_unlock_bh(spinlock_t *lock) +{ + spin_release(&lock->dep_map, 1, _RET_IP_); + _raw_spin_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __read_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __read_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_read_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline void __write_unlock_irqrestore(rwlock_t *lock, + unsigned long flags) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + local_irq_restore(flags); + preempt_enable(); +} + +static inline void __write_unlock_irq(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + local_irq_enable(); + preempt_enable(); +} + +static inline void __write_unlock_bh(rwlock_t *lock) +{ + rwlock_release(&lock->dep_map, 1, _RET_IP_); + _raw_write_unlock(lock); + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline int __spin_trylock_bh(spinlock_t *lock) +{ + local_bh_disable(); + preempt_disable(); + if (_raw_spin_trylock(lock)) { + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return 1; + } + preempt_enable_no_resched(); + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); + return 0; +} + #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 938234c4a99..d4841ed8215 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) #define __raw_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ # define __raw_spin_lock(lock) do { (void)(lock); } while (0) +# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ diff --git a/include/linux/splice.h b/include/linux/splice.h index 528dcb93c2f..18e7c7c0cae 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -11,8 +11,7 @@ #include <linux/pipe_fs_i.h> /* - * splice is tied to pipes as a transport (at least for now), so we'll just - * add the splice flags here. + * Flags passed in from splice/tee/vmsplice */ #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ @@ -36,6 +35,8 @@ struct splice_desc { void *data; /* cookie */ } u; loff_t pos; /* file position */ + size_t num_spliced; /* number of bytes already spliced */ + bool need_wakeup; /* need to wake up writer */ }; struct partial_page { @@ -66,6 +67,16 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, splice_actor *); extern ssize_t __splice_from_pipe(struct pipe_inode_info *, struct splice_desc *, splice_actor *); +extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *, + splice_actor *); +extern int splice_from_pipe_next(struct pipe_inode_info *, + struct splice_desc *); +extern void splice_from_pipe_begin(struct splice_desc *); +extern void splice_from_pipe_end(struct pipe_inode_info *, + struct splice_desc *); +extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *, + struct splice_desc *); + extern ssize_t splice_to_pipe(struct pipe_inode_info *, struct splice_pipe_desc *); extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 17d9b58f637..3d0a9ff24f0 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -27,24 +27,54 @@ struct ssb_sprom { u8 et1mdcport; /* MDIO for enet1 */ u8 board_rev; /* Board revision number from SPROM. */ u8 country_code; /* Country Code */ - u8 ant_available_a; /* A-PHY antenna available bits (up to 4) */ - u8 ant_available_bg; /* B/G-PHY antenna available bits (up to 4) */ + u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */ + u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */ u16 pa0b0; u16 pa0b1; u16 pa0b2; u16 pa1b0; u16 pa1b1; u16 pa1b2; + u16 pa1lob0; + u16 pa1lob1; + u16 pa1lob2; + u16 pa1hib0; + u16 pa1hib1; + u16 pa1hib2; u8 gpio0; /* GPIO pin 0 */ u8 gpio1; /* GPIO pin 1 */ u8 gpio2; /* GPIO pin 2 */ u8 gpio3; /* GPIO pin 3 */ - u16 maxpwr_a; /* A-PHY Amplifier Max Power (in dBm Q5.2) */ - u16 maxpwr_bg; /* B/G-PHY Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */ + u16 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */ u8 itssi_a; /* Idle TSSI Target for A-PHY */ u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */ - u16 boardflags_lo; /* Boardflags (low 16 bits) */ - u16 boardflags_hi; /* Boardflags (high 16 bits) */ + u8 tri2g; /* 2.4GHz TX isolation */ + u8 tri5gl; /* 5.2GHz TX isolation */ + u8 tri5g; /* 5.3GHz TX isolation */ + u8 tri5gh; /* 5.8GHz TX isolation */ + u8 rxpo2g; /* 2GHz RX power offset */ + u8 rxpo5g; /* 5GHz RX power offset */ + u8 rssisav2g; /* 2GHz RSSI params */ + u8 rssismc2g; + u8 rssismf2g; + u8 bxa2g; /* 2GHz BX arch */ + u8 rssisav5g; /* 5GHz RSSI params */ + u8 rssismc5g; + u8 rssismf5g; + u8 bxa5g; /* 5GHz BX arch */ + u16 cck2gpo; /* CCK power offset */ + u32 ofdm2gpo; /* 2.4GHz OFDM power offset */ + u32 ofdm5glpo; /* 5.2GHz OFDM power offset */ + u32 ofdm5gpo; /* 5.3GHz OFDM power offset */ + u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */ + u16 boardflags_lo; /* Board flags (bits 0-15) */ + u16 boardflags_hi; /* Board flags (bits 16-31) */ + u16 boardflags2_lo; /* Board flags (bits 32-47) */ + u16 boardflags2_hi; /* Board flags (bits 48-63) */ + /* TODO store board flags in a single u64 */ /* Antenna gain values for up to 4 antennas * on each band. Values in dBm/4 (Q5.2). Negative gain means the @@ -58,7 +88,7 @@ struct ssb_sprom { } ghz5; /* 5GHz band */ } antenna_gain; - /* TODO - add any parameters needed from rev 2, 3, or 4 SPROMs */ + /* TODO - add any parameters needed from rev 2, 3, 4, 5 or 8 SPROMs */ }; /* Information about the PCB the circuitry is soldered on. */ @@ -208,6 +238,7 @@ enum ssb_bustype { SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */ SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */ SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */ + SSB_BUSTYPE_SDIO, /* SSB is connected to SDIO bus */ }; /* board_vendor */ @@ -240,8 +271,12 @@ struct ssb_bus { /* The core in the basic address register window. (PCI bus only) */ struct ssb_device *mapped_device; - /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ - u8 mapped_pcmcia_seg; + union { + /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */ + u8 mapped_pcmcia_seg; + /* Current SSB base address window for SDIO. */ + u32 sdio_sbaddr; + }; /* Lock for core and segment switching. * On PCMCIA-host busses this is used to protect the whole MMIO access. */ spinlock_t bar_lock; @@ -252,6 +287,11 @@ struct ssb_bus { struct pci_dev *host_pci; /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */ struct pcmcia_device *host_pcmcia; + /* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */ + struct sdio_func *host_sdio; + + /* See enum ssb_quirks */ + unsigned int quirks; #ifdef CONFIG_SSB_SPROM /* Mutex to protect the SPROM writing. */ @@ -306,6 +346,11 @@ struct ssb_bus { #endif /* DEBUG */ }; +enum ssb_quirks { + /* SDIO connected card requires performing a read after writing a 32-bit value */ + SSB_QUIRK_SDIO_READ_AFTER_WRITE32 = (1 << 0), +}; + /* The initialization-invariants. */ struct ssb_init_invariants { /* Versioning information about the PCB. */ @@ -336,9 +381,19 @@ extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus, struct pcmcia_device *pcmcia_dev, unsigned long baseaddr); #endif /* CONFIG_SSB_PCMCIAHOST */ +#ifdef CONFIG_SSB_SDIOHOST +extern int ssb_bus_sdiobus_register(struct ssb_bus *bus, + struct sdio_func *sdio_func, + unsigned int quirks); +#endif /* CONFIG_SSB_SDIOHOST */ + extern void ssb_bus_unregister(struct ssb_bus *bus); +/* Set a fallback SPROM. + * See kdoc at the function definition for complete documentation. */ +extern int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom); + /* Suspend a SSB bus. * Call this from the parent bus suspend routine. */ extern int ssb_bus_suspend(struct ssb_bus *bus); diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index 7d7e03dcf77..4e27acf0a92 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h @@ -181,6 +181,16 @@ #define SSB_CHIPCO_PROG_WAITCNT 0x0124 #define SSB_CHIPCO_FLASH_CFG 0x0128 #define SSB_CHIPCO_FLASH_WAITCNT 0x012C +#define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */ +#define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */ +#define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */ +#define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */ +#define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */ +#define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */ +#define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */ +#define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00010000 /* HT available */ +#define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00020000 /* APL available */ +#define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */ #define SSB_CHIPCO_UART0_DATA 0x0300 #define SSB_CHIPCO_UART0_IMR 0x0304 #define SSB_CHIPCO_UART0_FCR 0x0308 @@ -197,6 +207,196 @@ #define SSB_CHIPCO_UART1_LSR 0x0414 #define SSB_CHIPCO_UART1_MSR 0x0418 #define SSB_CHIPCO_UART1_SCRATCH 0x041C +/* PMU registers (rev >= 20) */ +#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 +#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2 +#define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ +#define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */ +#define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */ +#define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */ +#define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ +#define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ +#define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ +#define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */ +#define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */ +#define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */ +#define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */ +#define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */ +#define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */ +#define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */ +#define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */ +#define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */ +#define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */ +#define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */ +#define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */ +#define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */ +#define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ +#define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */ +#define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */ +#define SSB_CHIPCO_CHIPCTL_ADDR 0x0650 +#define SSB_CHIPCO_CHIPCTL_DATA 0x0654 +#define SSB_CHIPCO_REGCTL_ADDR 0x0658 +#define SSB_CHIPCO_REGCTL_DATA 0x065C +#define SSB_CHIPCO_PLLCTL_ADDR 0x0660 +#define SSB_CHIPCO_PLLCTL_DATA 0x0664 + + + +/** PMU PLL registers */ + +/* PMU rev 0 PLL registers */ +#define SSB_PMU0_PLLCTL0 0 +#define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001 +#define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */ +#define SSB_PMU0_PLLCTL1 1 +#define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */ +#define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28 +#define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */ +#define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8 +#define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */ +#define SSB_PMU0_PLLCTL2 2 +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */ +#define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0 + +/* PMU rev 1 PLL registers */ +#define SSB_PMU1_PLLCTL0 0 +#define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */ +#define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20 +#define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */ +#define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL1 1 +#define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */ +#define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */ +#define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */ +#define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16 +#define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */ +#define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24 +#define SSB_PMU1_PLLCTL2 2 +#define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */ +#define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0 +#define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */ +#define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8 +#define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */ +#define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17 +#define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */ +#define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20 +#define SSB_PMU1_PLLCTL3 3 +#define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */ +#define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0 +#define SSB_PMU1_PLLCTL4 4 +#define SSB_PMU1_PLLCTL5 5 +#define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */ +#define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8 + +/* BCM4312 PLL resource numbers. */ +#define SSB_PMURES_4312_SWITCHER_BURST 0 +#define SSB_PMURES_4312_SWITCHER_PWM 1 +#define SSB_PMURES_4312_PA_REF_LDO 2 +#define SSB_PMURES_4312_CORE_LDO_BURST 3 +#define SSB_PMURES_4312_CORE_LDO_PWM 4 +#define SSB_PMURES_4312_RADIO_LDO 5 +#define SSB_PMURES_4312_ILP_REQUEST 6 +#define SSB_PMURES_4312_BG_FILTBYP 7 +#define SSB_PMURES_4312_TX_FILTBYP 8 +#define SSB_PMURES_4312_RX_FILTBYP 9 +#define SSB_PMURES_4312_XTAL_PU 10 +#define SSB_PMURES_4312_ALP_AVAIL 11 +#define SSB_PMURES_4312_BB_PLL_FILTBYP 12 +#define SSB_PMURES_4312_RF_PLL_FILTBYP 13 +#define SSB_PMURES_4312_HT_AVAIL 14 + +/* BCM4325 PLL resource numbers. */ +#define SSB_PMURES_4325_BUCK_BOOST_BURST 0 +#define SSB_PMURES_4325_CBUCK_BURST 1 +#define SSB_PMURES_4325_CBUCK_PWM 2 +#define SSB_PMURES_4325_CLDO_CBUCK_BURST 3 +#define SSB_PMURES_4325_CLDO_CBUCK_PWM 4 +#define SSB_PMURES_4325_BUCK_BOOST_PWM 5 +#define SSB_PMURES_4325_ILP_REQUEST 6 +#define SSB_PMURES_4325_ABUCK_BURST 7 +#define SSB_PMURES_4325_ABUCK_PWM 8 +#define SSB_PMURES_4325_LNLDO1_PU 9 +#define SSB_PMURES_4325_LNLDO2_PU 10 +#define SSB_PMURES_4325_LNLDO3_PU 11 +#define SSB_PMURES_4325_LNLDO4_PU 12 +#define SSB_PMURES_4325_XTAL_PU 13 +#define SSB_PMURES_4325_ALP_AVAIL 14 +#define SSB_PMURES_4325_RX_PWRSW_PU 15 +#define SSB_PMURES_4325_TX_PWRSW_PU 16 +#define SSB_PMURES_4325_RFPLL_PWRSW_PU 17 +#define SSB_PMURES_4325_LOGEN_PWRSW_PU 18 +#define SSB_PMURES_4325_AFE_PWRSW_PU 19 +#define SSB_PMURES_4325_BBPLL_PWRSW_PU 20 +#define SSB_PMURES_4325_HT_AVAIL 21 + +/* BCM4328 PLL resource numbers. */ +#define SSB_PMURES_4328_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_4328_BB_SWITCHER_PWM 1 +#define SSB_PMURES_4328_BB_SWITCHER_BURST 2 +#define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_4328_ILP_REQUEST 4 +#define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_4328_ROM_SWITCH 7 +#define SSB_PMURES_4328_PA_REF_LDO 8 +#define SSB_PMURES_4328_RADIO_LDO 9 +#define SSB_PMURES_4328_AFE_LDO 10 +#define SSB_PMURES_4328_PLL_LDO 11 +#define SSB_PMURES_4328_BG_FILTBYP 12 +#define SSB_PMURES_4328_TX_FILTBYP 13 +#define SSB_PMURES_4328_RX_FILTBYP 14 +#define SSB_PMURES_4328_XTAL_PU 15 +#define SSB_PMURES_4328_XTAL_EN 16 +#define SSB_PMURES_4328_BB_PLL_FILTBYP 17 +#define SSB_PMURES_4328_RF_PLL_FILTBYP 18 +#define SSB_PMURES_4328_BB_PLL_PU 19 + +/* BCM5354 PLL resource numbers. */ +#define SSB_PMURES_5354_EXT_SWITCHER_PWM 0 +#define SSB_PMURES_5354_BB_SWITCHER_PWM 1 +#define SSB_PMURES_5354_BB_SWITCHER_BURST 2 +#define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3 +#define SSB_PMURES_5354_ILP_REQUEST 4 +#define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5 +#define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6 +#define SSB_PMURES_5354_ROM_SWITCH 7 +#define SSB_PMURES_5354_PA_REF_LDO 8 +#define SSB_PMURES_5354_RADIO_LDO 9 +#define SSB_PMURES_5354_AFE_LDO 10 +#define SSB_PMURES_5354_PLL_LDO 11 +#define SSB_PMURES_5354_BG_FILTBYP 12 +#define SSB_PMURES_5354_TX_FILTBYP 13 +#define SSB_PMURES_5354_RX_FILTBYP 14 +#define SSB_PMURES_5354_XTAL_PU 15 +#define SSB_PMURES_5354_XTAL_EN 16 +#define SSB_PMURES_5354_BB_PLL_FILTBYP 17 +#define SSB_PMURES_5354_RF_PLL_FILTBYP 18 +#define SSB_PMURES_5354_BB_PLL_PU 19 + + + +/** Chip specific Chip-Status register contents. */ +#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003 +#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ +#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */ +#define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004 +#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008 +#define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0 +#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4 +#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */ @@ -353,11 +553,20 @@ struct ssb_device; struct ssb_serial_port; +/* Data for the PMU, if available. + * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU) + */ +struct ssb_chipcommon_pmu { + u8 rev; /* PMU revision */ + u32 crystalfreq; /* The active crystal frequency (in kHz) */ +}; + struct ssb_chipcommon { struct ssb_device *dev; u32 capabilities; /* Fast Powerup Delay constant */ u16 fast_pwrup_delay; + struct ssb_chipcommon_pmu pmu; }; static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) @@ -365,6 +574,17 @@ static inline bool ssb_chipco_available(struct ssb_chipcommon *cc) return (cc->dev != NULL); } +/* Register access */ +#define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset) +#define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val) + +#define chipco_mask32(cc, offset, mask) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask)) +#define chipco_set32(cc, offset, set) \ + chipco_write32(cc, offset, chipco_read32(cc, offset) | (set)) +#define chipco_maskset32(cc, offset, mask, set) \ + chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set)) + extern void ssb_chipcommon_init(struct ssb_chipcommon *cc); extern void ssb_chipco_suspend(struct ssb_chipcommon *cc); @@ -406,4 +626,18 @@ extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc, struct ssb_serial_port *ports); #endif /* CONFIG_SSB_SERIAL */ +/* PMU support */ +extern void ssb_pmu_init(struct ssb_chipcommon *cc); + +enum ssb_pmu_ldo_volt_id { + LDO_PAREF = 0, + LDO_VOLT1, + LDO_VOLT2, + LDO_VOLT3, +}; + +void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, + enum ssb_pmu_ldo_volt_id id, u32 voltage); +void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); + #endif /* LINUX_SSB_CHIPCO_H_ */ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 99a0f991e85..9ae9082eaeb 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -162,7 +162,7 @@ /* SPROM shadow area. If not otherwise noted, fields are * two bytes wide. Note that the SPROM can _only_ be read - * in two-byte quantinies. + * in two-byte quantities. */ #define SSB_SPROMSIZE_WORDS 64 #define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16)) @@ -326,6 +326,94 @@ #define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */ #define SSB_SPROM5_GPIOB_P3_SHIFT 8 +/* SPROM Revision 8 */ +#define SSB_SPROM8_BOARDREV 0x1082 /* Board revision */ +#define SSB_SPROM8_BFLLO 0x1084 /* Board flags (bits 0-15) */ +#define SSB_SPROM8_BFLHI 0x1086 /* Board flags (bits 16-31) */ +#define SSB_SPROM8_BFL2LO 0x1088 /* Board flags (bits 32-47) */ +#define SSB_SPROM8_BFL2HI 0x108A /* Board flags (bits 48-63) */ +#define SSB_SPROM8_IL0MAC 0x108C /* 6 byte MAC address */ +#define SSB_SPROM8_CCODE 0x1092 /* 2 byte country code */ +#define SSB_SPROM8_ANTAVAIL 0x109C /* Antenna available bitfields*/ +#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8 +#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */ +#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0 +#define SSB_SPROM8_AGAIN01 0x109E /* Antenna Gain (in dBm Q5.2) */ +#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */ +#define SSB_SPROM8_AGAIN0_SHIFT 0 +#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */ +#define SSB_SPROM8_AGAIN1_SHIFT 8 +#define SSB_SPROM8_AGAIN23 0x10A0 +#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */ +#define SSB_SPROM8_AGAIN2_SHIFT 0 +#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */ +#define SSB_SPROM8_AGAIN3_SHIFT 8 +#define SSB_SPROM8_GPIOA 0x1096 /*Gen. Purpose IO # 0 and 1 */ +#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */ +#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */ +#define SSB_SPROM8_GPIOA_P1_SHIFT 8 +#define SSB_SPROM8_GPIOB 0x1098 /* Gen. Purpose IO # 2 and 3 */ +#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */ +#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */ +#define SSB_SPROM8_GPIOB_P3_SHIFT 8 +#define SSB_SPROM8_RSSIPARM2G 0x10A4 /* RSSI params for 2GHz */ +#define SSB_SPROM8_RSSISMF2G 0x000F +#define SSB_SPROM8_RSSISMC2G 0x00F0 +#define SSB_SPROM8_RSSISMC2G_SHIFT 4 +#define SSB_SPROM8_RSSISAV2G 0x0700 +#define SSB_SPROM8_RSSISAV2G_SHIFT 8 +#define SSB_SPROM8_BXA2G 0x1800 +#define SSB_SPROM8_BXA2G_SHIFT 11 +#define SSB_SPROM8_RSSIPARM5G 0x10A6 /* RSSI params for 5GHz */ +#define SSB_SPROM8_RSSISMF5G 0x000F +#define SSB_SPROM8_RSSISMC5G 0x00F0 +#define SSB_SPROM8_RSSISMC5G_SHIFT 4 +#define SSB_SPROM8_RSSISAV5G 0x0700 +#define SSB_SPROM8_RSSISAV5G_SHIFT 8 +#define SSB_SPROM8_BXA5G 0x1800 +#define SSB_SPROM8_BXA5G_SHIFT 11 +#define SSB_SPROM8_TRI25G 0x10A8 /* TX isolation 2.4&5.3GHz */ +#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */ +#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */ +#define SSB_SPROM8_TRI5G_SHIFT 8 +#define SSB_SPROM8_TRI5GHL 0x10AA /* TX isolation 5.2/5.8GHz */ +#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */ +#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */ +#define SSB_SPROM8_TRI5GH_SHIFT 8 +#define SSB_SPROM8_RXPO 0x10AC /* RX power offsets */ +#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */ +#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */ +#define SSB_SPROM8_RXPO5G_SHIFT 8 +#define SSB_SPROM8_MAXP_BG 0x10C0 /* Max Power 2GHz in path 1 */ +#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */ +#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ +#define SSB_SPROM8_ITSSI_BG_SHIFT 8 +#define SSB_SPROM8_PA0B0 0x10C2 /* 2GHz power amp settings */ +#define SSB_SPROM8_PA0B1 0x10C4 +#define SSB_SPROM8_PA0B2 0x10C6 +#define SSB_SPROM8_MAXP_A 0x10C8 /* Max Power 5.3GHz */ +#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */ +#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */ +#define SSB_SPROM8_ITSSI_A_SHIFT 8 +#define SSB_SPROM8_MAXP_AHL 0x10CA /* Max Power 5.2/5.8GHz */ +#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */ +#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */ +#define SSB_SPROM8_MAXP_AL_SHIFT 8 +#define SSB_SPROM8_PA1B0 0x10CC /* 5.3GHz power amp settings */ +#define SSB_SPROM8_PA1B1 0x10CE +#define SSB_SPROM8_PA1B2 0x10D0 +#define SSB_SPROM8_PA1LOB0 0x10D2 /* 5.2GHz power amp settings */ +#define SSB_SPROM8_PA1LOB1 0x10D4 +#define SSB_SPROM8_PA1LOB2 0x10D6 +#define SSB_SPROM8_PA1HIB0 0x10D8 /* 5.8GHz power amp settings */ +#define SSB_SPROM8_PA1HIB1 0x10DA +#define SSB_SPROM8_PA1HIB2 0x10DC +#define SSB_SPROM8_CCK2GPO 0x1140 /* CCK power offset */ +#define SSB_SPROM8_OFDM2GPO 0x1142 /* 2.4GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GPO 0x1146 /* 5.3GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GLPO 0x114A /* 5.2GHz OFDM power offset */ +#define SSB_SPROM8_OFDM5GHPO 0x114E /* 5.8GHz OFDM power offset */ /* Values for SSB_SPROM1_BINF_CCODE */ enum { diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h new file mode 100644 index 00000000000..6f3e54c704c --- /dev/null +++ b/include/linux/stackprotector.h @@ -0,0 +1,16 @@ +#ifndef _LINUX_STACKPROTECTOR_H +#define _LINUX_STACKPROTECTOR_H 1 + +#include <linux/compiler.h> +#include <linux/sched.h> +#include <linux/random.h> + +#ifdef CONFIG_CC_STACKPROTECTOR +# include <asm/stackprotector.h> +#else +static inline void boot_init_stack_canary(void) +{ +} +#endif + +#endif diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 1a8cecc4f38..51efbef38fb 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -4,6 +4,8 @@ struct task_struct; #ifdef CONFIG_STACKTRACE +struct task_struct; + struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; @@ -11,6 +13,7 @@ struct stack_trace { }; extern void save_stack_trace(struct stack_trace *trace); +extern void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp); extern void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace); diff --git a/include/linux/string.h b/include/linux/string.h index d18fc198aa2..489019ef169 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -10,8 +10,10 @@ #include <linux/compiler.h> /* for inline */ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ +#include <stdarg.h> extern char *strndup_user(const char __user *, long); +extern void *memdup_user(const void __user *, size_t); /* * Include machine specific inline routines @@ -111,8 +113,23 @@ extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); +#ifdef CONFIG_BINARY_PRINTF +int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); +int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); +int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); +#endif + extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, const void *from, size_t available); +/** + * strstarts - does @str start with @prefix? + * @str: string to examine + * @prefix: prefix to look for. + */ +static inline bool strstarts(const char *str, const char *prefix) +{ + return strncmp(str, prefix, strlen(prefix)) == 0; +} #endif #endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/stringify.h b/include/linux/stringify.h index 0b4388356c8..841cec8ed52 100644 --- a/include/linux/stringify.h +++ b/include/linux/stringify.h @@ -6,7 +6,7 @@ * converts to "bar". */ -#define __stringify_1(x) #x -#define __stringify(x) __stringify_1(x) +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) #endif /* !__LINUX_STRINGIFY_H */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h new file mode 100644 index 00000000000..6508f0dc0ef --- /dev/null +++ b/include/linux/sunrpc/bc_xprt.h @@ -0,0 +1,49 @@ +/****************************************************************************** + +(c) 2008 NetApp. All Rights Reserved. + +NetApp provides this source code under the GPL v2 License. +The GPL v2 license is available at +http://opensource.org/licenses/gpl-license.php. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ + +/* + * Functions to create and manage the backchannel + */ + +#ifndef _LINUX_SUNRPC_BC_XPRT_H +#define _LINUX_SUNRPC_BC_XPRT_H + +#include <linux/sunrpc/svcsock.h> +#include <linux/sunrpc/xprt.h> +#include <linux/sunrpc/sched.h> + +#ifdef CONFIG_NFS_V4_1 +struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt); +void xprt_free_bc_request(struct rpc_rqst *req); +int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); +void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs); +void bc_release_request(struct rpc_task *); +int bc_send(struct rpc_rqst *req); +#else /* CONFIG_NFS_V4_1 */ +static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, + unsigned int min_reqs) +{ + return 0; +} +#endif /* CONFIG_NFS_V4_1 */ +#endif /* _LINUX_SUNRPC_BC_XPRT_H */ + diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 2d8b211b932..6f52b4d7c44 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -59,6 +59,15 @@ struct cache_head { #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ +struct cache_detail_procfs { + struct proc_dir_entry *proc_ent; + struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; +}; + +struct cache_detail_pipefs { + struct dentry *dir; +}; + struct cache_detail { struct module * owner; int hash_size; @@ -70,15 +79,17 @@ struct cache_detail { char *name; void (*cache_put)(struct kref *); - void (*cache_request)(struct cache_detail *cd, - struct cache_head *h, - char **bpp, int *blen); + int (*cache_upcall)(struct cache_detail *, + struct cache_head *); + int (*cache_parse)(struct cache_detail *, char *buf, int len); int (*cache_show)(struct seq_file *m, struct cache_detail *cd, struct cache_head *h); + void (*warn_no_listener)(struct cache_detail *cd, + int has_died); struct cache_head * (*alloc)(void); int (*match)(struct cache_head *orig, struct cache_head *new); @@ -96,13 +107,15 @@ struct cache_detail { /* fields for communication over channel */ struct list_head queue; - struct proc_dir_entry *proc_ent; - struct proc_dir_entry *flush_ent, *channel_ent, *content_ent; atomic_t readers; /* how many time is /chennel open */ time_t last_close; /* if no readers, when did last close */ time_t last_warn; /* when we last warned about no readers */ - void (*warn_no_listener)(struct cache_detail *cd); + + union { + struct cache_detail_procfs procfs; + struct cache_detail_pipefs pipefs; + } u; }; @@ -127,6 +140,10 @@ struct cache_deferred_req { }; +extern const struct file_operations cache_file_operations_pipefs; +extern const struct file_operations content_file_operations_pipefs; +extern const struct file_operations cache_flush_operations_pipefs; + extern struct cache_head * sunrpc_cache_lookup(struct cache_detail *detail, struct cache_head *key, int hash); @@ -134,6 +151,13 @@ extern struct cache_head * sunrpc_cache_update(struct cache_detail *detail, struct cache_head *new, struct cache_head *old, int hash); +extern int +sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, + void (*cache_request)(struct cache_detail *, + struct cache_head *, + char **, + int *)); + extern void cache_clean_deferred(void *owner); @@ -171,6 +195,10 @@ extern void cache_purge(struct cache_detail *detail); extern int cache_register(struct cache_detail *cd); extern void cache_unregister(struct cache_detail *cd); +extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *, + mode_t, struct cache_detail *); +extern void sunrpc_cache_unregister_pipefs(struct cache_detail *); + extern void qword_add(char **bpp, int *lp, char *str); extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); extern int qword_get(char **bpp, char *dest, int bufsize); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index c39a21040dc..ab3f6e90caa 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -9,6 +9,10 @@ #ifndef _LINUX_SUNRPC_CLNT_H #define _LINUX_SUNRPC_CLNT_H +#include <linux/socket.h> +#include <linux/in.h> +#include <linux/in6.h> + #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/sched.h> #include <linux/sunrpc/xprt.h> @@ -17,6 +21,7 @@ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/timer.h> #include <asm/signal.h> +#include <linux/path.h> struct rpc_inode; @@ -50,9 +55,7 @@ struct rpc_clnt { int cl_nodelen; /* nodename length */ char cl_nodename[UNX_MAXNODENAME]; - char cl_pathname[30];/* Path in rpc_pipe_fs */ - struct vfsmount * cl_vfsmnt; - struct dentry * cl_dentry; /* inode */ + struct path cl_path; struct rpc_clnt * cl_parent; /* Points to parent of clones */ struct rpc_rtt cl_rtt_default; struct rpc_timeout cl_timeout_default; @@ -143,6 +146,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags); struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags); +void rpc_restart_call_prepare(struct rpc_task *); void rpc_restart_call(struct rpc_task *); void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); size_t rpc_max_payload(struct rpc_clnt *); @@ -150,5 +154,39 @@ void rpc_force_rebind(struct rpc_clnt *); size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); +size_t rpc_ntop(const struct sockaddr *, char *, const size_t); +size_t rpc_pton(const char *, const size_t, + struct sockaddr *, const size_t); +char * rpc_sockaddr2uaddr(const struct sockaddr *); +size_t rpc_uaddr2sockaddr(const char *, const size_t, + struct sockaddr *, const size_t); + +static inline unsigned short rpc_get_port(const struct sockaddr *sap) +{ + switch (sap->sa_family) { + case AF_INET: + return ntohs(((struct sockaddr_in *)sap)->sin_port); + case AF_INET6: + return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); + } + return 0; +} + +static inline void rpc_set_port(struct sockaddr *sap, + const unsigned short port) +{ + switch (sap->sa_family) { + case AF_INET: + ((struct sockaddr_in *)sap)->sin_port = htons(port); + break; + case AF_INET6: + ((struct sockaddr_in6 *)sap)->sin6_port = htons(port); + break; + } +} + +#define IPV6_SCOPE_DELIMITER '%' +#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 70df4f1d884..77e62488339 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -189,7 +189,22 @@ typedef __be32 rpc_fraghdr; * Additionally, the two alternative forms specified in Section 2.2 of * [RFC2373] are also acceptable. */ -#define RPCBIND_MAXUADDRLEN (56u) + +#include <linux/inet.h> + +/* Maximum size of the port number part of a universal address */ +#define RPCBIND_MAXUADDRPLEN sizeof(".255.255") + +/* Maximum size of an IPv4 universal address */ +#define RPCBIND_MAXUADDR4LEN \ + (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) + +/* Maximum size of an IPv6 universal address */ +#define RPCBIND_MAXUADDR6LEN \ + (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN) + +/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */ +#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index cea764c2359..cf14db975da 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -3,6 +3,8 @@ #ifdef __KERNEL__ +#include <linux/workqueue.h> + struct rpc_pipe_msg { struct list_head list; void *data; @@ -32,8 +34,8 @@ struct rpc_inode { wait_queue_head_t waitq; #define RPC_PIPE_WAIT_FOR_OPEN 1 int flags; - struct rpc_pipe_ops *ops; struct delayed_work queue_timeout; + const struct rpc_pipe_ops *ops; }; static inline struct rpc_inode * @@ -44,9 +46,19 @@ RPC_I(struct inode *inode) extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); -extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *); -extern int rpc_rmdir(struct dentry *); -extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, struct rpc_pipe_ops *, int flags); +struct rpc_clnt; +extern struct dentry *rpc_create_client_dir(struct dentry *, struct qstr *, struct rpc_clnt *); +extern int rpc_remove_client_dir(struct dentry *); + +struct cache_detail; +extern struct dentry *rpc_create_cache_dir(struct dentry *, + struct qstr *, + mode_t umode, + struct cache_detail *); +extern void rpc_remove_cache_dir(struct dentry *); + +extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, + const struct rpc_pipe_ops *, int flags); extern int rpc_unlink(struct dentry *); extern struct vfsmount *rpc_get_mount(void); extern void rpc_put_mount(void); diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 64981a2f1ca..401097781fc 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -210,6 +210,8 @@ struct rpc_wait_queue { */ struct rpc_task *rpc_new_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_task(const struct rpc_task_setup *); +struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, + const struct rpc_call_ops *ops); void rpc_put_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *); void rpc_release_calldata(const struct rpc_call_ops *, void *); @@ -237,6 +239,7 @@ void rpc_show_tasks(void); int rpc_init_mempool(void); void rpc_destroy_mempool(void); extern struct workqueue_struct *rpciod_workqueue; +void rpc_prepare_task(struct rpc_task *task); static inline void rpc_exit(struct rpc_task *task, int status) { diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 3435d24bfe5..ea8009695c6 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -24,6 +24,15 @@ */ typedef int (*svc_thread_fn)(void *); +/* statistics for svc_pool structures */ +struct svc_pool_stats { + unsigned long packets; + unsigned long sockets_queued; + unsigned long threads_woken; + unsigned long overloads_avoided; + unsigned long threads_timedout; +}; + /* * * RPC service thread pool. @@ -41,6 +50,8 @@ struct svc_pool { struct list_head sp_sockets; /* pending sockets */ unsigned int sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ + int sp_nwaking; /* number of threads woken but not yet active */ + struct svc_pool_stats sp_stats; /* statistics on pool operation */ } ____cacheline_aligned_in_smp; /* @@ -69,7 +80,6 @@ struct svc_serv { struct list_head sv_tempsocks; /* all temporary sockets */ int sv_tmpcnt; /* count of temporary sockets */ struct timer_list sv_temptimer; /* timer for aging temporary sockets */ - sa_family_t sv_family; /* listener's address family */ char * sv_name; /* service name */ @@ -84,6 +94,17 @@ struct svc_serv { struct module * sv_module; /* optional module to count when * adding threads */ svc_thread_fn sv_function; /* main function for threads */ + unsigned int sv_drc_max_pages; /* Total pages for DRC */ + unsigned int sv_drc_pages_used;/* DRC pages used */ +#if defined(CONFIG_NFS_V4_1) + struct list_head sv_cb_list; /* queue for callback requests + * that arrive over the same + * connection */ + spinlock_t sv_cb_lock; /* protects the svc_cb_list */ + wait_queue_head_t sv_cb_waitq; /* sleep here if there are no + * entries in the svc_cb_list */ + struct svc_xprt *bc_xprt; +#endif /* CONFIG_NFS_V4_1 */ }; /* @@ -219,6 +240,7 @@ struct svc_rqst { struct svc_cred rq_cred; /* auth info */ void * rq_xprt_ctxt; /* transport specific context ptr */ struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ + int rq_usedeferral; /* use deferral */ size_t rq_xprt_hlen; /* xprt header len */ struct xdr_buf rq_arg; @@ -264,6 +286,7 @@ struct svc_rqst { * cache pages */ wait_queue_head_t rq_wait; /* synchronization */ struct task_struct *rq_task; /* service thread */ + int rq_waking; /* 1 if thread is being woken */ }; /* @@ -385,19 +408,22 @@ struct svc_procedure { /* * Function prototypes. */ -struct svc_serv *svc_create(struct svc_program *, unsigned int, sa_family_t, +struct svc_serv *svc_create(struct svc_program *, unsigned int, void (*shutdown)(struct svc_serv *)); struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool); void svc_exit_thread(struct svc_rqst *); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, - sa_family_t, void (*shutdown)(struct svc_serv *), + void (*shutdown)(struct svc_serv *), svc_thread_fn, struct module *); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); +int svc_pool_stats_open(struct svc_serv *serv, struct file *file); void svc_destroy(struct svc_serv *); int svc_process(struct svc_rqst *); -int svc_register(const struct svc_serv *, const unsigned short, - const unsigned short); +int bc_svc_process(struct svc_serv *, struct rpc_rqst *, + struct svc_rqst *); +int svc_register(const struct svc_serv *, const int, + const unsigned short, const unsigned short); void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 0127daca435..2223ae0b5ed 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -71,7 +71,8 @@ int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, struct svc_serv *); -int svc_create_xprt(struct svc_serv *, char *, unsigned short, int); +int svc_create_xprt(struct svc_serv *, const char *, const int, + const unsigned short, int); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_received(struct svc_xprt *); void svc_xprt_put(struct svc_xprt *xprt); @@ -80,40 +81,44 @@ void svc_close_xprt(struct svc_xprt *xprt); void svc_delete_xprt(struct svc_xprt *xprt); int svc_port_is_privileged(struct sockaddr *sin); int svc_print_xprts(char *buf, int maxlen); -struct svc_xprt *svc_find_xprt(struct svc_serv *, char *, int, int); -int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen); +struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, + const sa_family_t af, const unsigned short port); +int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen); static inline void svc_xprt_get(struct svc_xprt *xprt) { kref_get(&xprt->xpt_ref); } static inline void svc_xprt_set_local(struct svc_xprt *xprt, - struct sockaddr *sa, int salen) + const struct sockaddr *sa, + const size_t salen) { memcpy(&xprt->xpt_local, sa, salen); xprt->xpt_locallen = salen; } static inline void svc_xprt_set_remote(struct svc_xprt *xprt, - struct sockaddr *sa, int salen) + const struct sockaddr *sa, + const size_t salen) { memcpy(&xprt->xpt_remote, sa, salen); xprt->xpt_remotelen = salen; } -static inline unsigned short svc_addr_port(struct sockaddr *sa) +static inline unsigned short svc_addr_port(const struct sockaddr *sa) { - unsigned short ret = 0; + const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; + const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa; + switch (sa->sa_family) { case AF_INET: - ret = ntohs(((struct sockaddr_in *)sa)->sin_port); - break; + return ntohs(sin->sin_port); case AF_INET6: - ret = ntohs(((struct sockaddr_in6 *)sa)->sin6_port); - break; + return ntohs(sin6->sin6_port); } - return ret; + + return 0; } -static inline size_t svc_addr_len(struct sockaddr *sa) +static inline size_t svc_addr_len(const struct sockaddr *sa) { switch (sa->sa_family) { case AF_INET: @@ -121,39 +126,43 @@ static inline size_t svc_addr_len(struct sockaddr *sa) case AF_INET6: return sizeof(struct sockaddr_in6); } - return -EAFNOSUPPORT; + + return 0; } -static inline unsigned short svc_xprt_local_port(struct svc_xprt *xprt) +static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt) { - return svc_addr_port((struct sockaddr *)&xprt->xpt_local); + return svc_addr_port((const struct sockaddr *)&xprt->xpt_local); } -static inline unsigned short svc_xprt_remote_port(struct svc_xprt *xprt) +static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt) { - return svc_addr_port((struct sockaddr *)&xprt->xpt_remote); + return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote); } -static inline char *__svc_print_addr(struct sockaddr *addr, - char *buf, size_t len) +static inline char *__svc_print_addr(const struct sockaddr *addr, + char *buf, const size_t len) { + const struct sockaddr_in *sin = (const struct sockaddr_in *)addr; + const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr; + switch (addr->sa_family) { case AF_INET: - snprintf(buf, len, "%pI4, port=%u", - &((struct sockaddr_in *)addr)->sin_addr, - ntohs(((struct sockaddr_in *) addr)->sin_port)); + snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr, + ntohs(sin->sin_port)); break; case AF_INET6: snprintf(buf, len, "%pI6, port=%u", - &((struct sockaddr_in6 *)addr)->sin6_addr, - ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); + &sin6->sin6_addr, + ntohs(sin6->sin6_port)); break; default: snprintf(buf, len, "unknown address type: %d", addr->sa_family); break; } + return buf; } #endif /* SUNRPC_SVC_XPRT_H */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 483e10380aa..04dba23c59f 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -38,10 +38,15 @@ int svc_recv(struct svc_rqst *, long); int svc_send(struct svc_rqst *); void svc_drop(struct svc_rqst *); void svc_sock_update_bufs(struct svc_serv *serv); -int svc_sock_names(char *buf, struct svc_serv *serv, char *toclose); -int svc_addsock(struct svc_serv *serv, int fd, char *name_return); +int svc_sock_names(struct svc_serv *serv, char *buf, + const size_t buflen, + const char *toclose); +int svc_addsock(struct svc_serv *serv, const int fd, + char *name_return, const size_t len); void svc_init_xprt_sock(void); void svc_cleanup_xprt_sock(void); +struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot); +void svc_sock_destroy(struct svc_xprt *); /* * svc_makesock socket characteristics diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 49e1eb45446..7da466ba4b0 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -12,7 +12,6 @@ #include <linux/uio.h> #include <asm/byteorder.h> #include <linux/scatterlist.h> -#include <linux/smp_lock.h> /* * Buffer adjustment @@ -69,27 +68,27 @@ struct xdr_buf { * pre-xdr'ed macros. */ -#define xdr_zero __constant_htonl(0) -#define xdr_one __constant_htonl(1) -#define xdr_two __constant_htonl(2) - -#define rpc_success __constant_htonl(RPC_SUCCESS) -#define rpc_prog_unavail __constant_htonl(RPC_PROG_UNAVAIL) -#define rpc_prog_mismatch __constant_htonl(RPC_PROG_MISMATCH) -#define rpc_proc_unavail __constant_htonl(RPC_PROC_UNAVAIL) -#define rpc_garbage_args __constant_htonl(RPC_GARBAGE_ARGS) -#define rpc_system_err __constant_htonl(RPC_SYSTEM_ERR) -#define rpc_drop_reply __constant_htonl(RPC_DROP_REPLY) - -#define rpc_auth_ok __constant_htonl(RPC_AUTH_OK) -#define rpc_autherr_badcred __constant_htonl(RPC_AUTH_BADCRED) -#define rpc_autherr_rejectedcred __constant_htonl(RPC_AUTH_REJECTEDCRED) -#define rpc_autherr_badverf __constant_htonl(RPC_AUTH_BADVERF) -#define rpc_autherr_rejectedverf __constant_htonl(RPC_AUTH_REJECTEDVERF) -#define rpc_autherr_tooweak __constant_htonl(RPC_AUTH_TOOWEAK) -#define rpcsec_gsserr_credproblem __constant_htonl(RPCSEC_GSS_CREDPROBLEM) -#define rpcsec_gsserr_ctxproblem __constant_htonl(RPCSEC_GSS_CTXPROBLEM) -#define rpc_autherr_oldseqnum __constant_htonl(101) +#define xdr_zero cpu_to_be32(0) +#define xdr_one cpu_to_be32(1) +#define xdr_two cpu_to_be32(2) + +#define rpc_success cpu_to_be32(RPC_SUCCESS) +#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) +#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) +#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL) +#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS) +#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) +#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) + +#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) +#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) +#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) +#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF) +#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF) +#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) +#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) +#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) +#define rpc_autherr_oldseqnum cpu_to_be32(101) /* * Miscellaneous XDR helper functions @@ -118,17 +117,15 @@ static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int le static inline __be32 * xdr_encode_hyper(__be32 *p, __u64 val) { - *p++ = htonl(val >> 32); - *p++ = htonl(val & 0xFFFFFFFF); - return p; + *(__be64 *)p = cpu_to_be64(val); + return p + 2; } static inline __be32 * xdr_decode_hyper(__be32 *p, __u64 *valp) { - *valp = ((__u64) ntohl(*p++)) << 32; - *valp |= ntohl(*p++); - return p; + *valp = be64_to_cpup((__be64 *)p); + return p + 2; } /* diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 11fc71d50c1..c090df44257 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -38,10 +38,8 @@ enum rpc_display_format_t { RPC_DISPLAY_ADDR = 0, RPC_DISPLAY_PORT, RPC_DISPLAY_PROTO, - RPC_DISPLAY_ALL, RPC_DISPLAY_HEX_ADDR, RPC_DISPLAY_HEX_PORT, - RPC_DISPLAY_UNIVERSAL_ADDR, RPC_DISPLAY_NETID, RPC_DISPLAY_MAX, }; @@ -67,7 +65,8 @@ struct rpc_rqst { struct rpc_task * rq_task; /* RPC task data */ __be32 rq_xid; /* request XID */ int rq_cong; /* has incremented xprt->cong */ - int rq_received; /* receive completed */ + int rq_reply_bytes_recvd; /* number of reply */ + /* bytes received */ u32 rq_seqno; /* gss seq no. used on req. */ int rq_enc_pages_num; struct page **rq_enc_pages; /* scratch pages for use by @@ -97,6 +96,12 @@ struct rpc_rqst { unsigned long rq_xtime; /* when transmitted */ int rq_ntrans; + +#if defined(CONFIG_NFS_V4_1) + struct list_head rq_bc_list; /* Callback service list */ + unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ + struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ +#endif /* CONFIG_NFS_V4_1 */ }; #define rq_svec rq_snd_buf.head #define rq_slen rq_snd_buf.len @@ -174,6 +179,15 @@ struct rpc_xprt { spinlock_t reserve_lock; /* lock slot table */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ +#if defined(CONFIG_NFS_V4_1) + struct svc_serv *bc_serv; /* The RPC service which will */ + /* process the callback */ + unsigned int bc_alloc_count; /* Total number of preallocs */ + spinlock_t bc_pa_lock; /* Protects the preallocated + * items */ + struct list_head bc_pa_list; /* List of preallocated + * backchannel rpc_rqst's */ +#endif /* CONFIG_NFS_V4_1 */ struct list_head recv; struct { @@ -192,6 +206,26 @@ struct rpc_xprt { const char *address_strings[RPC_DISPLAY_MAX]; }; +#if defined(CONFIG_NFS_V4_1) +/* + * Backchannel flags + */ +#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */ + /* buffer in use */ +#endif /* CONFIG_NFS_V4_1 */ + +#if defined(CONFIG_NFS_V4_1) +static inline int bc_prealloc(struct rpc_rqst *req) +{ + return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); +} +#else +static inline int bc_prealloc(struct rpc_rqst *req) +{ + return 0; +} +#endif /* CONFIG_NFS_V4_1 */ + struct xprt_create { int ident; /* XPRT_TRANSPORT identifier */ struct sockaddr * srcaddr; /* optional local address */ @@ -235,6 +269,7 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 * */ int xprt_register_transport(struct xprt_class *type); int xprt_unregister_transport(struct xprt_class *type); +int xprt_load_transport(const char *); void xprt_set_retrans_timeout_def(struct rpc_task *task); void xprt_set_retrans_timeout_rtt(struct rpc_task *task); void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); @@ -259,6 +294,8 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); #define XPRT_BOUND (4) #define XPRT_BINDING (5) #define XPRT_CLOSING (6) +#define XPRT_CONNECTION_ABORT (7) +#define XPRT_CONNECTION_CLOSE (8) static inline void xprt_set_connected(struct rpc_xprt *xprt) { diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 2b409c44db8..cd15df6c63c 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -1,9 +1,6 @@ #ifndef _LINUX_SUSPEND_H #define _LINUX_SUSPEND_H -#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64) -#include <asm/suspend.h> -#endif #include <linux/swap.h> #include <linux/notifier.h> #include <linux/init.h> @@ -61,10 +58,17 @@ typedef int __bitwise suspend_state_t; * by @begin(). * @prepare() is called right after devices have been suspended (ie. the * appropriate .suspend() method has been executed for each device) and - * before the nonboot CPUs are disabled (it is executed with IRQs enabled). - * This callback is optional. It returns 0 on success or a negative - * error code otherwise, in which case the system cannot enter the desired - * sleep state (@enter() and @finish() will not be called in that case). + * before device drivers' late suspend callbacks are executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@prepare_late(), @enter(), + * @wake(), and @finish() will not be called in that case). + * + * @prepare_late: Finish preparing the platform for entering the system sleep + * state indicated by @begin(). + * @prepare_late is called before disabling nonboot CPUs and after + * device drivers' late suspend callbacks have been executed. It returns + * 0 on success or a negative error code otherwise, in which case the + * system cannot enter the desired sleep state (@enter() and @wake()). * * @enter: Enter the system sleep state indicated by @begin() or represented by * the argument if @begin() is not implemented. @@ -72,19 +76,26 @@ typedef int __bitwise suspend_state_t; * error code otherwise, in which case the system cannot enter the desired * sleep state. * - * @finish: Called when the system has just left a sleep state, right after - * the nonboot CPUs have been enabled and before devices are resumed (it is - * executed with IRQs enabled). + * @wake: Called when the system has just left a sleep state, right after + * the nonboot CPUs have been enabled and before device drivers' early + * resume callbacks are executed. + * This callback is optional, but should be implemented by the platforms + * that implement @prepare_late(). If implemented, it is always called + * after @enter(), even if @enter() fails. + * + * @finish: Finish wake-up of the platform. + * @finish is called right prior to calling device drivers' regular suspend + * callbacks. * This callback is optional, but should be implemented by the platforms * that implement @prepare(). If implemented, it is always called after - * @enter() (even if @enter() fails). + * @enter() and @wake(), if implemented, even if any of them fails. * * @end: Called by the PM core right after resuming devices, to indicate to * the platform that the system has returned to the working state or * the transition to the sleep state has been aborted. * This callback is optional, but should be implemented by the platforms - * that implement @begin(), but platforms implementing @begin() should - * also provide a @end() which cleans up transitions aborted before + * that implement @begin(). Accordingly, platforms implementing @begin() + * should also provide a @end() which cleans up transitions aborted before * @enter(). * * @recover: Recover the platform from a suspend failure. @@ -96,7 +107,9 @@ struct platform_suspend_ops { int (*valid)(suspend_state_t state); int (*begin)(suspend_state_t state); int (*prepare)(void); + int (*prepare_late)(void); int (*enter)(suspend_state_t state); + void (*wake)(void); void (*finish)(void); void (*end)(void); void (*recover)(void); @@ -232,11 +245,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); extern void hibernation_set_ops(struct platform_hibernation_ops *ops); extern int hibernate(void); -extern int hibernate_nvs_register(unsigned long start, unsigned long size); -extern int hibernate_nvs_alloc(void); -extern void hibernate_nvs_free(void); -extern void hibernate_nvs_save(void); -extern void hibernate_nvs_restore(void); +extern bool system_entering_hibernation(void); #else /* CONFIG_HIBERNATION */ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} @@ -244,6 +253,16 @@ static inline void swsusp_unset_page_free(struct page *p) {} static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} static inline int hibernate(void) { return -ENOSYS; } +static inline bool system_entering_hibernation(void) { return false; } +#endif /* CONFIG_HIBERNATION */ + +#ifdef CONFIG_HIBERNATION_NVS +extern int hibernate_nvs_register(unsigned long start, unsigned long size); +extern int hibernate_nvs_alloc(void); +extern void hibernate_nvs_free(void); +extern void hibernate_nvs_save(void); +extern void hibernate_nvs_restore(void); +#else /* CONFIG_HIBERNATION_NVS */ static inline int hibernate_nvs_register(unsigned long a, unsigned long b) { return 0; @@ -252,7 +271,7 @@ static inline int hibernate_nvs_alloc(void) { return 0; } static inline void hibernate_nvs_free(void) {} static inline void hibernate_nvs_save(void) {} static inline void hibernate_nvs_restore(void) {} -#endif /* CONFIG_HIBERNATION */ +#endif /* CONFIG_HIBERNATION_NVS */ #ifdef CONFIG_PM_SLEEP void save_processor_state(void); diff --git a/include/linux/suspend_ioctls.h b/include/linux/suspend_ioctls.h index 2c6faec96bd..0b30382984f 100644 --- a/include/linux/suspend_ioctls.h +++ b/include/linux/suspend_ioctls.h @@ -1,14 +1,15 @@ #ifndef _LINUX_SUSPEND_IOCTLS_H #define _LINUX_SUSPEND_IOCTLS_H +#include <linux/types.h> /* * This structure is used to pass the values needed for the identification * of the resume swap area from a user space to the kernel via the * SNAPSHOT_SET_SWAP_AREA ioctl */ struct resume_swap_area { - loff_t offset; - u_int32_t dev; + __kernel_loff_t offset; + __u32 dev; } __attribute__((packed)); #define SNAPSHOT_IOC_MAGIC '3' @@ -20,13 +21,13 @@ struct resume_swap_area { #define SNAPSHOT_S2RAM _IO(SNAPSHOT_IOC_MAGIC, 11) #define SNAPSHOT_SET_SWAP_AREA _IOW(SNAPSHOT_IOC_MAGIC, 13, \ struct resume_swap_area) -#define SNAPSHOT_GET_IMAGE_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 14, loff_t) +#define SNAPSHOT_GET_IMAGE_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 14, __kernel_loff_t) #define SNAPSHOT_PLATFORM_SUPPORT _IO(SNAPSHOT_IOC_MAGIC, 15) #define SNAPSHOT_POWER_OFF _IO(SNAPSHOT_IOC_MAGIC, 16) #define SNAPSHOT_CREATE_IMAGE _IOW(SNAPSHOT_IOC_MAGIC, 17, int) #define SNAPSHOT_PREF_IMAGE_SIZE _IO(SNAPSHOT_IOC_MAGIC, 18) -#define SNAPSHOT_AVAIL_SWAP_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 19, loff_t) -#define SNAPSHOT_ALLOC_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 20, loff_t) +#define SNAPSHOT_AVAIL_SWAP_SIZE _IOR(SNAPSHOT_IOC_MAGIC, 19, __kernel_loff_t) +#define SNAPSHOT_ALLOC_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 20, __kernel_loff_t) #define SNAPSHOT_IOC_MAXNR 20 #endif /* _LINUX_SUSPEND_IOCTLS_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index d3021557887..7c15334f3ff 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -129,9 +129,10 @@ enum { #define SWAP_CLUSTER_MAX 32 -#define SWAP_MAP_MAX 0x7fff -#define SWAP_MAP_BAD 0x8000 - +#define SWAP_MAP_MAX 0x7ffe +#define SWAP_MAP_BAD 0x7fff +#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ +#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) /* * The in-memory structure used to track swap areas. */ @@ -212,7 +213,7 @@ static inline void lru_cache_add_active_file(struct page *page) /* linux/mm/vmscan.c */ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, - gfp_t gfp_mask); + gfp_t gfp_mask, nodemask_t *mask); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, unsigned int swappiness); @@ -235,7 +236,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) } #endif -#ifdef CONFIG_UNEVICTABLE_LRU extern int page_evictable(struct page *page, struct vm_area_struct *vma); extern void scan_mapping_unevictable_pages(struct address_space *); @@ -244,24 +244,6 @@ extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); extern int scan_unevictable_register_node(struct node *node); extern void scan_unevictable_unregister_node(struct node *node); -#else -static inline int page_evictable(struct page *page, - struct vm_area_struct *vma) -{ - return 1; -} - -static inline void scan_mapping_unevictable_pages(struct address_space *mapping) -{ -} - -static inline int scan_unevictable_register_node(struct node *node) -{ - return 0; -} - -static inline void scan_unevictable_unregister_node(struct node *node) { } -#endif extern int kswapd_run(int nid); @@ -274,7 +256,7 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); #ifdef CONFIG_SWAP /* linux/mm/page_io.c */ -extern int swap_readpage(struct file *, struct page *); +extern int swap_readpage(struct page *); extern int swap_writepage(struct page *page, struct writeback_control *wbc); extern void end_swap_bio_read(struct bio *bio, int err); @@ -300,9 +282,11 @@ extern long total_swap_pages; extern void si_swapinfo(struct sysinfo *); extern swp_entry_t get_swap_page(void); extern swp_entry_t get_swap_page_of_type(int); -extern int swap_duplicate(swp_entry_t); +extern void swap_duplicate(swp_entry_t); +extern int swapcache_prepare(swp_entry_t); extern int valid_swaphandles(swp_entry_t, unsigned long *); extern void swap_free(swp_entry_t); +extern void swapcache_free(swp_entry_t, struct page *page); extern int free_swap_and_cache(swp_entry_t); extern int swap_type_of(dev_t, sector_t, struct block_device **); extern unsigned int count_swap_pages(int, int); @@ -314,8 +298,8 @@ extern int try_to_free_swap(struct page *); struct backing_dev_info; /* linux/mm/thrash.c */ -extern struct mm_struct * swap_token_mm; -extern void grab_swap_token(void); +extern struct mm_struct *swap_token_mm; +extern void grab_swap_token(struct mm_struct *); extern void __put_swap_token(struct mm_struct *); static inline int has_swap_token(struct mm_struct *mm) @@ -335,10 +319,11 @@ static inline void disable_swap_token(void) } #ifdef CONFIG_CGROUP_MEM_RES_CTLR -extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent); +extern void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); #else static inline void -mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) { } #endif @@ -370,18 +355,31 @@ static inline void show_swap_cache_info(void) } #define free_swap_and_cache(swp) is_migration_entry(swp) -#define swap_duplicate(swp) is_migration_entry(swp) +#define swapcache_prepare(swp) is_migration_entry(swp) + +static inline void swap_duplicate(swp_entry_t swp) +{ +} static inline void swap_free(swp_entry_t swp) { } +static inline void swapcache_free(swp_entry_t swp, struct page *page) +{ +} + static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { return NULL; } +static inline int swap_writepage(struct page *p, struct writeback_control *wbc) +{ + return 0; +} + static inline struct page *lookup_swap_cache(swp_entry_t swp) { return NULL; @@ -421,15 +419,14 @@ static inline swp_entry_t get_swap_page(void) } /* linux/mm/thrash.c */ -#define put_swap_token(x) do { } while(0) -#define grab_swap_token() do { } while(0) -#define has_swap_token(x) 0 -#define disable_swap_token() do { } while(0) +#define put_swap_token(mm) do { } while (0) +#define grab_swap_token(mm) do { } while (0) +#define has_swap_token(mm) 0 +#define disable_swap_token() do { } while (0) -static inline int mem_cgroup_cache_charge_swapin(struct page *page, - struct mm_struct *mm, gfp_t mask, bool locked) +static inline void +mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) { - return 0; } #endif /* CONFIG_SWAP */ diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index dedd3c0cfe3..73b1f1cec42 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -14,7 +14,6 @@ struct scatterlist; */ #define IO_TLB_SEGSIZE 128 - /* * log of the size of each IO TLB slab. The number of slabs is command line * controllable. @@ -24,15 +23,6 @@ struct scatterlist; extern void swiotlb_init(void); -extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); -extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); - -extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, - phys_addr_t address); -extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); - -extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); - extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags); @@ -41,20 +31,13 @@ extern void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); -extern dma_addr_t -swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); - -extern void -swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); - -extern dma_addr_t -swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, - int dir, struct dma_attrs *attrs); - -extern void -swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir, struct dma_attrs *attrs); +extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs); +extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, @@ -66,36 +49,38 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir, struct dma_attrs *attrs); + enum dma_data_direction dir, struct dma_attrs *attrs); extern void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, int dir, struct dma_attrs *attrs); + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); + size_t size, enum dma_data_direction dir); extern void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir); + int nelems, enum dma_data_direction dir); extern void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir); + size_t size, enum dma_data_direction dir); extern void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir); + int nelems, enum dma_data_direction dir); extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir); + unsigned long offset, size_t size, + enum dma_data_direction dir); extern void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, unsigned long offset, size_t size, - int dir); + enum dma_data_direction dir); extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); diff --git a/include/linux/synclink.h b/include/linux/synclink.h index c844a229acc..0ff2779c44d 100644 --- a/include/linux/synclink.h +++ b/include/linux/synclink.h @@ -13,6 +13,8 @@ #define _SYNCLINK_H_ #define SYNCLINK_H_VERSION 3.6 +#include <linux/types.h> + #define BIT0 0x0001 #define BIT1 0x0002 #define BIT2 0x0004 @@ -123,6 +125,7 @@ #define MGSL_MODE_MONOSYNC 3 #define MGSL_MODE_BISYNC 4 #define MGSL_MODE_RAW 6 +#define MGSL_MODE_BASE_CLOCK 7 #define MGSL_BUS_TYPE_ISA 1 #define MGSL_BUS_TYPE_EISA 2 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 16875f89e6a..a8e37821cc6 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -55,6 +55,7 @@ struct compat_timeval; struct robust_list_head; struct getcpu_cache; struct old_linux_dirent; +struct perf_counter_attr; #include <linux/types.h> #include <linux/aio_abi.h> @@ -63,8 +64,10 @@ struct old_linux_dirent; #include <linux/sem.h> #include <asm/siginfo.h> #include <asm/signal.h> +#include <linux/unistd.h> #include <linux/quota.h> #include <linux/key.h> +#include <trace/syscall.h> #define __SC_DECL1(t1, a1) t1 a1 #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) @@ -95,42 +98,228 @@ struct old_linux_dirent; #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) -#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) -#define SYSCALL_DEFINE1(...) SYSCALL_DEFINEx(1, __VA_ARGS__) -#define SYSCALL_DEFINE2(...) SYSCALL_DEFINEx(2, __VA_ARGS__) -#define SYSCALL_DEFINE3(...) SYSCALL_DEFINEx(3, __VA_ARGS__) -#define SYSCALL_DEFINE4(...) SYSCALL_DEFINEx(4, __VA_ARGS__) -#define SYSCALL_DEFINE5(...) SYSCALL_DEFINEx(5, __VA_ARGS__) -#define SYSCALL_DEFINE6(...) SYSCALL_DEFINEx(6, __VA_ARGS__) +#ifdef CONFIG_EVENT_PROFILE +#define TRACE_SYS_ENTER_PROFILE(sname) \ +static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \ +{ \ + int ret = 0; \ + if (!atomic_inc_return(&event_enter_##sname.profile_count)) \ + ret = reg_prof_syscall_enter("sys"#sname); \ + return ret; \ +} \ + \ +static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\ +{ \ + if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \ + unreg_prof_syscall_enter("sys"#sname); \ +} + +#define TRACE_SYS_EXIT_PROFILE(sname) \ +static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \ +{ \ + int ret = 0; \ + if (!atomic_inc_return(&event_exit_##sname.profile_count)) \ + ret = reg_prof_syscall_exit("sys"#sname); \ + return ret; \ +} \ + \ +static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \ +{ \ + if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \ + unreg_prof_syscall_exit("sys"#sname); \ +} + +#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ + .profile_count = ATOMIC_INIT(-1), \ + .profile_enable = prof_sysenter_enable_##sname, \ + .profile_disable = prof_sysenter_disable_##sname, + +#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ + .profile_count = ATOMIC_INIT(-1), \ + .profile_enable = prof_sysexit_enable_##sname, \ + .profile_disable = prof_sysexit_disable_##sname, +#else +#define TRACE_SYS_ENTER_PROFILE(sname) +#define TRACE_SYS_ENTER_PROFILE_INIT(sname) +#define TRACE_SYS_EXIT_PROFILE(sname) +#define TRACE_SYS_EXIT_PROFILE_INIT(sname) +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define __SC_STR_ADECL1(t, a) #a +#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) +#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__) +#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__) +#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__) +#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__) + +#define __SC_STR_TDECL1(t, a) #t +#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__) +#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__) +#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__) +#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) +#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) + +#define SYSCALL_TRACE_ENTER_EVENT(sname) \ + static struct ftrace_event_call event_enter_##sname; \ + struct trace_event enter_syscall_print_##sname = { \ + .trace = print_syscall_enter, \ + }; \ + static int init_enter_##sname(void) \ + { \ + int num, id; \ + num = syscall_name_to_nr("sys"#sname); \ + if (num < 0) \ + return -ENOSYS; \ + id = register_ftrace_event(&enter_syscall_print_##sname);\ + if (!id) \ + return -ENODEV; \ + event_enter_##sname.id = id; \ + set_syscall_enter_id(num, id); \ + INIT_LIST_HEAD(&event_enter_##sname.fields); \ + return 0; \ + } \ + TRACE_SYS_ENTER_PROFILE(sname); \ + static struct ftrace_event_call __used \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_events"))) \ + event_enter_##sname = { \ + .name = "sys_enter"#sname, \ + .system = "syscalls", \ + .event = &event_syscall_enter, \ + .raw_init = init_enter_##sname, \ + .show_format = syscall_enter_format, \ + .define_fields = syscall_enter_define_fields, \ + .regfunc = reg_event_syscall_enter, \ + .unregfunc = unreg_event_syscall_enter, \ + .data = "sys"#sname, \ + TRACE_SYS_ENTER_PROFILE_INIT(sname) \ + } + +#define SYSCALL_TRACE_EXIT_EVENT(sname) \ + static struct ftrace_event_call event_exit_##sname; \ + struct trace_event exit_syscall_print_##sname = { \ + .trace = print_syscall_exit, \ + }; \ + static int init_exit_##sname(void) \ + { \ + int num, id; \ + num = syscall_name_to_nr("sys"#sname); \ + if (num < 0) \ + return -ENOSYS; \ + id = register_ftrace_event(&exit_syscall_print_##sname);\ + if (!id) \ + return -ENODEV; \ + event_exit_##sname.id = id; \ + set_syscall_exit_id(num, id); \ + INIT_LIST_HEAD(&event_exit_##sname.fields); \ + return 0; \ + } \ + TRACE_SYS_EXIT_PROFILE(sname); \ + static struct ftrace_event_call __used \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_events"))) \ + event_exit_##sname = { \ + .name = "sys_exit"#sname, \ + .system = "syscalls", \ + .event = &event_syscall_exit, \ + .raw_init = init_exit_##sname, \ + .show_format = syscall_exit_format, \ + .define_fields = syscall_exit_define_fields, \ + .regfunc = reg_event_syscall_exit, \ + .unregfunc = unreg_event_syscall_exit, \ + .data = "sys"#sname, \ + TRACE_SYS_EXIT_PROFILE_INIT(sname) \ + } + +#define SYSCALL_METADATA(sname, nb) \ + SYSCALL_TRACE_ENTER_EVENT(sname); \ + SYSCALL_TRACE_EXIT_EVENT(sname); \ + static const struct syscall_metadata __used \ + __attribute__((__aligned__(4))) \ + __attribute__((section("__syscalls_metadata"))) \ + __syscall_meta_##sname = { \ + .name = "sys"#sname, \ + .nb_args = nb, \ + .types = types_##sname, \ + .args = args_##sname, \ + .enter_event = &event_enter_##sname, \ + .exit_event = &event_exit_##sname, \ + }; + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_TRACE_ENTER_EVENT(_##sname); \ + SYSCALL_TRACE_EXIT_EVENT(_##sname); \ + static const struct syscall_metadata __used \ + __attribute__((__aligned__(4))) \ + __attribute__((section("__syscalls_metadata"))) \ + __syscall_meta_##sname = { \ + .name = "sys_"#sname, \ + .nb_args = 0, \ + .enter_event = &event_enter__##sname, \ + .exit_event = &event_exit__##sname, \ + }; \ + asmlinkage long sys_##sname(void) +#else +#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) +#endif + +#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE4(name, ...) SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) +#define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) #ifdef CONFIG_PPC64 #define SYSCALL_ALIAS(alias, name) \ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) #else +#if defined(CONFIG_ALPHA) || defined(CONFIG_MIPS) +#define SYSCALL_ALIAS(alias, name) \ + asm ( #alias " = " #name "\n\t.globl " #alias) +#else #define SYSCALL_ALIAS(alias, name) \ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name) #endif +#endif + +#ifdef CONFIG_FTRACE_SYSCALLS +#define SYSCALL_DEFINEx(x, sname, ...) \ + static const char *types_##sname[] = { \ + __SC_STR_TDECL##x(__VA_ARGS__) \ + }; \ + static const char *args_##sname[] = { \ + __SC_STR_ADECL##x(__VA_ARGS__) \ + }; \ + SYSCALL_METADATA(sname, x); \ + __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) +#else +#define SYSCALL_DEFINEx(x, sname, ...) \ + __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) +#endif #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS #define SYSCALL_DEFINE(name) static inline long SYSC_##name -#define SYSCALL_DEFINEx(x, name, ...) \ - asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__)); \ - static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__)); \ - asmlinkage long SyS_##name(__SC_LONG##x(__VA_ARGS__)) \ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ + static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ + asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ { \ __SC_TEST##x(__VA_ARGS__); \ - return (long) SYSC_##name(__SC_CAST##x(__VA_ARGS__)); \ + return (long) SYSC##name(__SC_CAST##x(__VA_ARGS__)); \ } \ - SYSCALL_ALIAS(sys_##name, SyS_##name); \ - static inline long SYSC_##name(__SC_DECL##x(__VA_ARGS__)) + SYSCALL_ALIAS(sys##name, SyS##name); \ + static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)) #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ #define SYSCALL_DEFINE(name) asmlinkage long sys_##name -#define SYSCALL_DEFINEx(x, name, ...) \ - asmlinkage long sys_##name(__SC_DECL##x(__VA_ARGS__)) +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ @@ -259,6 +448,8 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese, siginfo_t __user *uinfo, const struct timespec __user *uts, size_t sigsetsize); +asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, + siginfo_t __user *uinfo); asmlinkage long sys_kill(int pid, int sig); asmlinkage long sys_tgkill(int tgid, int pid, int sig); asmlinkage long sys_tkill(int pid, int sig); @@ -372,6 +563,8 @@ asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg); #endif +asmlinkage long sys_pipe(int __user *fildes); +asmlinkage long sys_pipe2(int __user *fildes, int flags); asmlinkage long sys_dup(unsigned int fildes); asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd); asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); @@ -456,6 +649,10 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf, size_t count, loff_t pos); asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, size_t count, loff_t pos); +asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); +asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, + unsigned long vlen, unsigned long pos_l, unsigned long pos_h); asmlinkage long sys_getcwd(char __user *buf, unsigned long size); asmlinkage long sys_mkdir(const char __user *pathname, int mode); asmlinkage long sys_chdir(const char __user *filename); @@ -684,9 +881,11 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, struct timespec __user *, const sigset_t __user *, size_t); -asmlinkage long sys_pipe2(int __user *, int); -asmlinkage long sys_pipe(int __user *); int kernel_execve(const char *filename, char *const argv[], char *const envp[]); + +asmlinkage long sys_perf_counter_open( + struct perf_counter_attr __user *attr_uptr, + pid_t pid, int cpu, int group_fd, unsigned long flags); #endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 39d471d1163..e76d3b22a46 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -490,6 +490,7 @@ enum NET_IPV4_CONF_ARP_IGNORE=19, NET_IPV4_CONF_PROMOTE_SECONDARIES=20, NET_IPV4_CONF_ARP_ACCEPT=21, + NET_IPV4_CONF_ARP_NOTIFY=22, __NET_IPV4_CONF_MAX }; diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 98a1d8cfb73..99adcdc0d3c 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -14,6 +14,8 @@ #ifndef _LINUX_SYSRQ_H #define _LINUX_SYSRQ_H +#include <linux/errno.h> + struct pt_regs; struct tty_struct; diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h index 18269e956a7..341dddb5509 100644 --- a/include/linux/taskstats.h +++ b/include/linux/taskstats.h @@ -16,6 +16,8 @@ #ifndef _LINUX_TASKSTATS_H #define _LINUX_TASKSTATS_H +#include <linux/types.h> + /* Format for per-task data returned to userland when * - a task exits * - listener requests stats for a task diff --git a/include/linux/tc_act/tc_gact.h b/include/linux/tc_act/tc_gact.h index 23a03eb630d..e895c0a3962 100644 --- a/include/linux/tc_act/tc_gact.h +++ b/include/linux/tc_act/tc_gact.h @@ -1,6 +1,7 @@ #ifndef __LINUX_TC_GACT_H #define __LINUX_TC_GACT_H +#include <linux/types.h> #include <linux/pkt_cls.h> #define TCA_ACT_GACT 5 diff --git a/include/linux/tc_act/tc_mirred.h b/include/linux/tc_act/tc_mirred.h index 71d63409d56..0a99ab60d61 100644 --- a/include/linux/tc_act/tc_mirred.h +++ b/include/linux/tc_act/tc_mirred.h @@ -1,6 +1,7 @@ #ifndef __LINUX_TC_MIR_H #define __LINUX_TC_MIR_H +#include <linux/types.h> #include <linux/pkt_cls.h> #define TCA_ACT_MIRRED 8 diff --git a/include/linux/tc_act/tc_pedit.h b/include/linux/tc_act/tc_pedit.h index 83e56e32e8e..54ce9064115 100644 --- a/include/linux/tc_act/tc_pedit.h +++ b/include/linux/tc_act/tc_pedit.h @@ -1,6 +1,7 @@ #ifndef __LINUX_TC_PED_H #define __LINUX_TC_PED_H +#include <linux/types.h> #include <linux/pkt_cls.h> #define TCA_ACT_PEDIT 7 diff --git a/include/linux/tc_ematch/tc_em_cmp.h b/include/linux/tc_ematch/tc_em_cmp.h index c7f4d43618f..38e7f7b25ec 100644 --- a/include/linux/tc_ematch/tc_em_cmp.h +++ b/include/linux/tc_ematch/tc_em_cmp.h @@ -1,6 +1,7 @@ #ifndef __LINUX_TC_EM_CMP_H #define __LINUX_TC_EM_CMP_H +#include <linux/types.h> #include <linux/pkt_cls.h> struct tcf_em_cmp diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index c50d2ba5caf..dcfb733fa1f 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h @@ -1,6 +1,7 @@ #ifndef __LINUX_TC_EM_META_H #define __LINUX_TC_EM_META_H +#include <linux/types.h> #include <linux/pkt_cls.h> enum diff --git a/include/linux/tc_ematch/tc_em_nbyte.h b/include/linux/tc_ematch/tc_em_nbyte.h index f19d1f58ec9..9ed8c2e5848 100644 --- a/include/linux/tc_ematch/tc_em_nbyte.h +++ b/include/linux/tc_ematch/tc_em_nbyte.h @@ -1,6 +1,7 @@ #ifndef __LINUX_TC_EM_NBYTE_H #define __LINUX_TC_EM_NBYTE_H +#include <linux/types.h> #include <linux/pkt_cls.h> struct tcf_em_nbyte diff --git a/include/linux/tc_ematch/tc_em_text.h b/include/linux/tc_ematch/tc_em_text.h index 7cd43e99c7f..d12a73a225f 100644 --- a/include/linux/tc_ematch/tc_em_text.h +++ b/include/linux/tc_ematch/tc_em_text.h @@ -1,6 +1,7 @@ #ifndef __LINUX_TC_EM_TEXT_H #define __LINUX_TC_EM_TEXT_H +#include <linux/types.h> #include <linux/pkt_cls.h> #define TC_EM_TEXT_ALGOSIZ 16 diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fe77e1499ab..61723a7c21f 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -69,16 +69,16 @@ union tcp_word_hdr { #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) enum { - TCP_FLAG_CWR = __constant_htonl(0x00800000), - TCP_FLAG_ECE = __constant_htonl(0x00400000), - TCP_FLAG_URG = __constant_htonl(0x00200000), - TCP_FLAG_ACK = __constant_htonl(0x00100000), - TCP_FLAG_PSH = __constant_htonl(0x00080000), - TCP_FLAG_RST = __constant_htonl(0x00040000), - TCP_FLAG_SYN = __constant_htonl(0x00020000), - TCP_FLAG_FIN = __constant_htonl(0x00010000), - TCP_RESERVED_BITS = __constant_htonl(0x0F000000), - TCP_DATA_OFFSET = __constant_htonl(0xF0000000) + TCP_FLAG_CWR = __cpu_to_be32(0x00800000), + TCP_FLAG_ECE = __cpu_to_be32(0x00400000), + TCP_FLAG_URG = __cpu_to_be32(0x00200000), + TCP_FLAG_ACK = __cpu_to_be32(0x00100000), + TCP_FLAG_PSH = __cpu_to_be32(0x00080000), + TCP_FLAG_RST = __cpu_to_be32(0x00040000), + TCP_FLAG_SYN = __cpu_to_be32(0x00020000), + TCP_FLAG_FIN = __cpu_to_be32(0x00010000), + TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), + TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) }; /* TCP socket options */ @@ -218,7 +218,6 @@ struct tcp_options_received { snd_wscale : 4, /* Window scaling received from sender */ rcv_wscale : 4; /* Window scaling to send to receiver */ /* SACKs data */ - u8 eff_sacks; /* Size of SACK array to send with next packet */ u8 num_sacks; /* Number of SACK blocks */ u16 user_mss; /* mss requested by user in ioctl */ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ @@ -234,7 +233,7 @@ struct tcp_request_sock { struct inet_request_sock req; #ifdef CONFIG_TCP_MD5SIG /* Only used by TCP MD5 Signature so far. */ - struct tcp_request_sock_ops *af_specific; + const struct tcp_request_sock_ops *af_specific; #endif u32 rcv_isn; u32 snt_isn; @@ -249,7 +248,7 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ - u16 xmit_size_goal; /* Goal for segmenting output packets */ + u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ /* * Header prediction flags @@ -378,7 +377,7 @@ struct tcp_sock { unsigned int keepalive_time; /* time before keep alive takes place */ unsigned int keepalive_intvl; /* time interval between keep alive probes */ - unsigned long last_synq_overflow; + int linger2; /* Receiver side RTT estimation */ struct { @@ -402,13 +401,11 @@ struct tcp_sock { #ifdef CONFIG_TCP_MD5SIG /* TCP AF-Specific parts; only used by MD5 Signature support so far */ - struct tcp_sock_af_ops *af_specific; + const struct tcp_sock_af_ops *af_specific; -/* TCP MD5 Signagure Option information */ +/* TCP MD5 Signature Option information */ struct tcp_md5sig_info *md5sig_info; #endif - - int linger2; }; static inline struct tcp_sock *tcp_sk(const struct sock *sk) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 917707e6151..1de8b9eb841 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -27,27 +27,46 @@ #include <linux/idr.h> #include <linux/device.h> +#include <linux/workqueue.h> struct thermal_zone_device; struct thermal_cooling_device; +enum thermal_device_mode { + THERMAL_DEVICE_DISABLED = 0, + THERMAL_DEVICE_ENABLED, +}; + +enum thermal_trip_type { + THERMAL_TRIP_ACTIVE = 0, + THERMAL_TRIP_PASSIVE, + THERMAL_TRIP_HOT, + THERMAL_TRIP_CRITICAL, +}; + struct thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); int (*unbind) (struct thermal_zone_device *, struct thermal_cooling_device *); - int (*get_temp) (struct thermal_zone_device *, char *); - int (*get_mode) (struct thermal_zone_device *, char *); - int (*set_mode) (struct thermal_zone_device *, const char *); - int (*get_trip_type) (struct thermal_zone_device *, int, char *); - int (*get_trip_temp) (struct thermal_zone_device *, int, char *); + int (*get_temp) (struct thermal_zone_device *, unsigned long *); + int (*get_mode) (struct thermal_zone_device *, + enum thermal_device_mode *); + int (*set_mode) (struct thermal_zone_device *, + enum thermal_device_mode); + int (*get_trip_type) (struct thermal_zone_device *, int, + enum thermal_trip_type *); + int (*get_trip_temp) (struct thermal_zone_device *, int, + unsigned long *); int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); + int (*notify) (struct thermal_zone_device *, int, + enum thermal_trip_type); }; struct thermal_cooling_device_ops { - int (*get_max_state) (struct thermal_cooling_device *, char *); - int (*get_cur_state) (struct thermal_cooling_device *, char *); - int (*set_cur_state) (struct thermal_cooling_device *, unsigned int); + int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); + int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); + int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); }; #define THERMAL_TRIPS_NONE -1 @@ -88,11 +107,19 @@ struct thermal_zone_device { struct device device; void *devdata; int trips; + int tc1; + int tc2; + int passive_delay; + int polling_delay; + int last_temperature; + bool passive; + unsigned int forced_passive; struct thermal_zone_device_ops *ops; struct list_head cooling_devices; struct idr idr; struct mutex lock; /* protect cooling devices list */ struct list_head node; + struct delayed_work poll_queue; #if defined(CONFIG_THERMAL_HWMON) struct list_head hwmon_node; struct thermal_hwmon_device *hwmon; @@ -104,13 +131,16 @@ struct thermal_zone_device { struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, struct thermal_zone_device_ops - *); + *, int tc1, int tc2, + int passive_freq, + int polling_freq); void thermal_zone_device_unregister(struct thermal_zone_device *); int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); +void thermal_zone_device_update(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, struct thermal_cooling_device_ops diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index e6b820f8b56..a8cc4e13434 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -21,13 +21,14 @@ struct restart_block { struct { unsigned long arg0, arg1, arg2, arg3; }; - /* For futex_wait */ + /* For futex_wait and futex_wait_requeue_pi */ struct { u32 *uaddr; u32 val; u32 flags; u32 bitset; u64 time; + u32 *uaddr2; } futex; /* For nanosleep */ struct { diff --git a/include/linux/tick.h b/include/linux/tick.h index 469b82d88b3..0482229c07d 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -97,10 +97,12 @@ extern void tick_clock_notify(void); extern int tick_check_oneshot_change(int allow_nohz); extern struct tick_sched *tick_get_tick_sched(int cpu); extern void tick_check_idle(int cpu); +extern int tick_oneshot_mode_active(void); # else static inline void tick_clock_notify(void) { } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline void tick_check_idle(int cpu) { } +static inline int tick_oneshot_mode_active(void) { return 0; } # endif #else /* CONFIG_GENERIC_CLOCKEVENTS */ @@ -109,6 +111,7 @@ static inline void tick_cancel_sched_timer(int cpu) { } static inline void tick_clock_notify(void) { } static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } static inline void tick_check_idle(int cpu) { } +static inline int tick_oneshot_mode_active(void) { return 0; } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ # ifdef CONFIG_NO_HZ diff --git a/include/linux/time.h b/include/linux/time.h index fbbd2a1c92b..ea16c1a01d5 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -12,14 +12,14 @@ #ifndef _STRUCT_TIMESPEC #define _STRUCT_TIMESPEC struct timespec { - time_t tv_sec; /* seconds */ - long tv_nsec; /* nanoseconds */ + __kernel_time_t tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ }; #endif struct timeval { - time_t tv_sec; /* seconds */ - suseconds_t tv_usec; /* microseconds */ + __kernel_time_t tv_sec; /* seconds */ + __kernel_suseconds_t tv_usec; /* microseconds */ }; struct timezone { @@ -113,6 +113,21 @@ struct timespec current_kernel_time(void); #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) +/* Some architectures do not supply their own clocksource. + * This is mainly the case in architectures that get their + * inter-tick times by reading the counter on their interval + * timer. Since these timers wrap every tick, they're not really + * useful as clocksources. Wrapping them to act like one is possible + * but not very efficient. So we provide a callout these arches + * can implement for use with the jiffies clocksource to provide + * finer then tick granular time. + */ +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET +extern u32 arch_gettimeoffset(void); +#else +static inline u32 arch_gettimeoffset(void) { return 0; } +#endif + extern void do_gettimeofday(struct timeval *tv); extern int do_settimeofday(struct timespec *tv); extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); diff --git a/include/linux/timecompare.h b/include/linux/timecompare.h new file mode 100644 index 00000000000..546e2234e4b --- /dev/null +++ b/include/linux/timecompare.h @@ -0,0 +1,125 @@ +/* + * Utility code which helps transforming between two different time + * bases, called "source" and "target" time in this code. + * + * Source time has to be provided via the timecounter API while target + * time is accessed via a function callback whose prototype + * intentionally matches ktime_get() and ktime_get_real(). These + * interfaces where chosen like this so that the code serves its + * initial purpose without additional glue code. + * + * This purpose is synchronizing a hardware clock in a NIC with system + * time, in order to implement the Precision Time Protocol (PTP, + * IEEE1588) with more accurate hardware assisted time stamping. In + * that context only synchronization against system time (= + * ktime_get_real()) is currently needed. But this utility code might + * become useful in other situations, which is why it was written as + * general purpose utility code. + * + * The source timecounter is assumed to return monotonically + * increasing time (but this code does its best to compensate if that + * is not the case) whereas target time may jump. + * + * The target time corresponding to a source time is determined by + * reading target time, reading source time, reading target time + * again, then assuming that average target time corresponds to source + * time. In other words, the assumption is that reading the source + * time is slow and involves equal time for sending the request and + * receiving the reply, whereas reading target time is assumed to be + * fast. + * + * Copyright (C) 2009 Intel Corporation. + * Author: Patrick Ohly <patrick.ohly@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef _LINUX_TIMECOMPARE_H +#define _LINUX_TIMECOMPARE_H + +#include <linux/clocksource.h> +#include <linux/ktime.h> + +/** + * struct timecompare - stores state and configuration for the two clocks + * + * Initialize to zero, then set source/target/num_samples. + * + * Transformation between source time and target time is done with: + * target_time = source_time + offset + + * (source_time - last_update) * skew / + * TIMECOMPARE_SKEW_RESOLUTION + * + * @source: used to get source time stamps via timecounter_read() + * @target: function returning target time (for example, ktime_get + * for monotonic time, or ktime_get_real for wall clock) + * @num_samples: number of times that source time and target time are to + * be compared when determining their offset + * @offset: (target time - source time) at the time of the last update + * @skew: average (target time - source time) / delta source time * + * TIMECOMPARE_SKEW_RESOLUTION + * @last_update: last source time stamp when time offset was measured + */ +struct timecompare { + struct timecounter *source; + ktime_t (*target)(void); + int num_samples; + + s64 offset; + s64 skew; + u64 last_update; +}; + +/** + * timecompare_transform - transform source time stamp into target time base + * @sync: context for time sync + * @source_tstamp: the result of timecounter_read() or + * timecounter_cyc2time() + */ +extern ktime_t timecompare_transform(struct timecompare *sync, + u64 source_tstamp); + +/** + * timecompare_offset - measure current (target time - source time) offset + * @sync: context for time sync + * @offset: average offset during sample period returned here + * @source_tstamp: average source time during sample period returned here + * + * Returns number of samples used. Might be zero (= no result) in the + * unlikely case that target time was monotonically decreasing for all + * samples (= broken). + */ +extern int timecompare_offset(struct timecompare *sync, + s64 *offset, + u64 *source_tstamp); + +extern void __timecompare_update(struct timecompare *sync, + u64 source_tstamp); + +/** + * timecompare_update - update offset and skew by measuring current offset + * @sync: context for time sync + * @source_tstamp: the result of timecounter_read() or + * timecounter_cyc2time(), pass zero to force update + * + * Updates are only done at most once per second. + */ +static inline void timecompare_update(struct timecompare *sync, + u64 source_tstamp) +{ + if (!source_tstamp || + (s64)(source_tstamp - sync->last_update) >= NSEC_PER_SEC) + __timecompare_update(sync, source_tstamp); +} + +#endif /* _LINUX_TIMECOMPARE_H */ diff --git a/include/linux/timer.h b/include/linux/timer.h index daf9685b861..be62ec2ebea 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -5,6 +5,7 @@ #include <linux/ktime.h> #include <linux/stddef.h> #include <linux/debugobjects.h> +#include <linux/stringify.h> struct tvec_base; @@ -21,52 +22,126 @@ struct timer_list { char start_comm[16]; int start_pid; #endif +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif }; extern struct tvec_base boot_tvec_bases; +#ifdef CONFIG_LOCKDEP +/* + * NB: because we have to copy the lockdep_map, setting the lockdep_map key + * (second argument) here is required, otherwise it could be initialised to + * the copy of the lockdep_map later! We use the pointer to and the string + * "<file>:<line>" as the key resp. the name of the lockdep_map. + */ +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \ + .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn), +#else +#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) +#endif + #define TIMER_INITIALIZER(_function, _expires, _data) { \ .entry = { .prev = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .expires = (_expires), \ .data = (_data), \ .base = &boot_tvec_bases, \ + __TIMER_LOCKDEP_MAP_INITIALIZER( \ + __FILE__ ":" __stringify(__LINE__)) \ } #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) -void init_timer(struct timer_list *timer); -void init_timer_deferrable(struct timer_list *timer); +void init_timer_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); +void init_timer_deferrable_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); + +#ifdef CONFIG_LOCKDEP +#define init_timer(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_deferrable(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_deferrable_key((timer), #timer, &__key); \ + } while (0) + +#define init_timer_on_stack(timer) \ + do { \ + static struct lock_class_key __key; \ + init_timer_on_stack_key((timer), #timer, &__key); \ + } while (0) + +#define setup_timer(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_key((timer), #timer, &__key, (fn), (data));\ + } while (0) + +#define setup_timer_on_stack(timer, fn, data) \ + do { \ + static struct lock_class_key __key; \ + setup_timer_on_stack_key((timer), #timer, &__key, \ + (fn), (data)); \ + } while (0) +#else +#define init_timer(timer)\ + init_timer_key((timer), NULL, NULL) +#define init_timer_deferrable(timer)\ + init_timer_deferrable_key((timer), NULL, NULL) +#define init_timer_on_stack(timer)\ + init_timer_on_stack_key((timer), NULL, NULL) +#define setup_timer(timer, fn, data)\ + setup_timer_key((timer), NULL, NULL, (fn), (data)) +#define setup_timer_on_stack(timer, fn, data)\ + setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) +#endif #ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void init_timer_on_stack(struct timer_list *timer); +extern void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key); extern void destroy_timer_on_stack(struct timer_list *timer); #else static inline void destroy_timer_on_stack(struct timer_list *timer) { } -static inline void init_timer_on_stack(struct timer_list *timer) +static inline void init_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key) { - init_timer(timer); + init_timer_key(timer, name, key); } #endif -static inline void setup_timer(struct timer_list * timer, +static inline void setup_timer_key(struct timer_list * timer, + const char *name, + struct lock_class_key *key, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; - init_timer(timer); + init_timer_key(timer, name, key); } -static inline void setup_timer_on_stack(struct timer_list *timer, +static inline void setup_timer_on_stack_key(struct timer_list *timer, + const char *name, + struct lock_class_key *key, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; - init_timer_on_stack(timer); + init_timer_on_stack_key(timer, name, key); } /** @@ -86,9 +161,12 @@ static inline int timer_pending(const struct timer_list * timer) extern void add_timer_on(struct timer_list *timer, int cpu); extern int del_timer(struct timer_list * timer); -extern int __mod_timer(struct timer_list *timer, unsigned long expires); extern int mod_timer(struct timer_list *timer, unsigned long expires); +extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); +extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); +#define TIMER_NOT_PINNED 0 +#define TIMER_PINNED 1 /* * The jiffies value which is added to now, when there is no timer * in the timer wheel: @@ -112,6 +190,8 @@ extern unsigned long get_next_timer_interrupt(unsigned long now); */ #ifdef CONFIG_TIMER_STATS +extern int timer_stats_active; + #define TIMER_STATS_FLAG_DEFERRABLE 0x1 extern void init_timer_stats(void); @@ -125,6 +205,8 @@ extern void __timer_stats_timer_set_start_info(struct timer_list *timer, static inline void timer_stats_timer_set_start_info(struct timer_list *timer) { + if (likely(!timer_stats_active)) + return; __timer_stats_timer_set_start_info(timer, __builtin_return_address(0)); } @@ -146,25 +228,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer) } #endif -/** - * add_timer - start a timer - * @timer: the timer to be added - * - * The kernel will do a ->function(->data) callback from the - * timer interrupt at the ->expires point in the future. The - * current time is 'jiffies'. - * - * The timer's ->expires, ->function (and if the handler uses it, ->data) - * fields must be set prior calling this function. - * - * Timers with an ->expires field in the past will be executed in the next - * timer tick. - */ -static inline void add_timer(struct timer_list *timer) -{ - BUG_ON(timer_pending(timer)); - __mod_timer(timer, timer->expires); -} +extern void add_timer(struct timer_list *timer); #ifdef CONFIG_SMP extern int try_to_del_timer_sync(struct timer_list *timer); diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h index 86cb0501d3e..2d0792983f8 100644 --- a/include/linux/timerfd.h +++ b/include/linux/timerfd.h @@ -11,13 +11,21 @@ /* For O_CLOEXEC and O_NONBLOCK */ #include <linux/fcntl.h> -/* Flags for timerfd_settime. */ +/* + * CAREFUL: Check include/asm-generic/fcntl.h when defining + * new flags, since they might collide with O_* ones. We want + * to re-use O_* flags that couldn't possibly have a meaning + * from eventfd, in order to leave a free define-space for + * shared O_* flags. + */ #define TFD_TIMER_ABSTIME (1 << 0) - -/* Flags for timerfd_create. */ #define TFD_CLOEXEC O_CLOEXEC #define TFD_NONBLOCK O_NONBLOCK +#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK) +/* Flags for timerfd_create. */ +#define TFD_CREATE_FLAGS TFD_SHARED_FCNTL_FLAGS +/* Flags for timerfd_settime. */ +#define TFD_SETTIME_FLAGS TFD_TIMER_ABSTIME #endif /* _LINUX_TIMERFD_H */ - diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h new file mode 100644 index 00000000000..3e08a1c8683 --- /dev/null +++ b/include/linux/timeriomem-rng.h @@ -0,0 +1,21 @@ +/* + * linux/include/linux/timeriomem-rng.h + * + * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/completion.h> + +struct timeriomem_rng_data { + struct completion completion; + unsigned int present:1; + + void __iomem *address; + + /* measures in usecs */ + unsigned int period; +}; diff --git a/include/linux/times.h b/include/linux/times.h index e2d3020742a..87b62615ced 100644 --- a/include/linux/times.h +++ b/include/linux/times.h @@ -4,10 +4,10 @@ #include <linux/types.h> struct tms { - clock_t tms_utime; - clock_t tms_stime; - clock_t tms_cutime; - clock_t tms_cstime; + __kernel_clock_t tms_utime; + __kernel_clock_t tms_stime; + __kernel_clock_t tms_cutime; + __kernel_clock_t tms_cstime; }; #endif diff --git a/include/linux/timex.h b/include/linux/timex.h index 998a55d80ac..e6967d10d9e 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -170,17 +170,37 @@ struct timex { #include <asm/timex.h> /* - * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen - * for a slightly underdamped convergence characteristic. SHIFT_KH - * establishes the damping of the FLL and is chosen by wisdom and black - * art. + * SHIFT_PLL is used as a dampening factor to define how much we + * adjust the frequency correction for a given offset in PLL mode. + * It also used in dampening the offset correction, to define how + * much of the current value in time_offset we correct for each + * second. Changing this value changes the stiffness of the ntp + * adjustment code. A lower value makes it more flexible, reducing + * NTP convergence time. A higher value makes it stiffer, increasing + * convergence time, but making the clock more stable. * - * MAXTC establishes the maximum time constant of the PLL. With the - * SHIFT_KG and SHIFT_KF values given and a time constant range from - * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours, - * respectively. + * In David Mills' nanokernel reference implementation SHIFT_PLL is 4. + * However this seems to increase convergence time much too long. + * + * https://lists.ntp.org/pipermail/hackers/2008-January/003487.html + * + * In the above mailing list discussion, it seems the value of 4 + * was appropriate for other Unix systems with HZ=100, and that + * SHIFT_PLL should be decreased as HZ increases. However, Linux's + * clock steering implementation is HZ independent. + * + * Through experimentation, a SHIFT_PLL value of 2 was found to allow + * for fast convergence (very similar to the NTPv3 code used prior to + * v2.6.19), with good clock stability. + * + * + * SHIFT_FLL is used as a dampening factor to define how much we + * adjust the frequency correction for a given offset in FLL mode. + * In David Mills' nanokernel reference implementation SHIFT_FLL is 2. + * + * MAXTC establishes the maximum time constant of the PLL. */ -#define SHIFT_PLL 4 /* PLL frequency factor (shift) */ +#define SHIFT_PLL 2 /* PLL frequency factor (shift) */ #define SHIFT_FLL 2 /* FLL frequency factor (shift) */ #define MAXTC 10 /* maximum time constant (shift) */ @@ -190,12 +210,12 @@ struct timex { * offset and maximum frequency tolerance. */ #define SHIFT_USEC 16 /* frequency offset scale (shift) */ -#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) +#define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) #define PPM_SCALE_INV_SHIFT 19 -#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ +#define PPM_SCALE_INV ((1LL << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ PPM_SCALE + 1) -#define MAXPHASE 500000000l /* max phase error (ns) */ +#define MAXPHASE 500000000L /* max phase error (ns) */ #define MAXFREQ 500000 /* max frequency error (ns/s) */ #define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT) #define MINSEC 256 /* min interval between updates (s) */ @@ -260,6 +280,9 @@ extern int do_adjtimex(struct timex *); int read_current_timer(unsigned long *timer_val); +/* The clock frequency of the i8253/i8254 PIT */ +#define PIT_TICK_RATE 1193182ul + #endif /* KERNEL */ #endif /* LINUX_TIMEX_H */ diff --git a/include/linux/tipc.h b/include/linux/tipc.h index bea469455a0..3d92396639d 100644 --- a/include/linux/tipc.h +++ b/include/linux/tipc.h @@ -209,5 +209,7 @@ struct sockaddr_tipc { #define TIPC_SRC_DROPPABLE 128 /* Default: 0 (resend congested msg) */ #define TIPC_DEST_DROPPABLE 129 /* Default: based on socket type */ #define TIPC_CONN_TIMEOUT 130 /* Default: 8000 (ms) */ +#define TIPC_NODE_RECVQ_DEPTH 131 /* Default: none (read only) */ +#define TIPC_SOCK_RECVQ_DEPTH 132 /* Default: none (read only) */ #endif diff --git a/include/linux/topology.h b/include/linux/topology.h index e632d29f054..85e8cf7d393 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -38,11 +38,7 @@ #endif #ifndef nr_cpus_node -#define nr_cpus_node(node) \ - ({ \ - node_to_cpumask_ptr(__tmp__, node); \ - cpus_weight(*__tmp__); \ - }) +#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) #endif #define for_each_node_with_cpus(node) \ @@ -89,20 +85,29 @@ int arch_update_cpu_topology(void); #define ARCH_HAS_SCHED_WAKE_IDLE /* Common values for SMT siblings */ #ifndef SD_SIBLING_INIT -#define SD_SIBLING_INIT (struct sched_domain) { \ - .min_interval = 1, \ - .max_interval = 2, \ - .busy_factor = 64, \ - .imbalance_pct = 110, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ - | SD_BALANCE_FORK \ - | SD_BALANCE_EXEC \ - | SD_WAKE_AFFINE \ - | SD_WAKE_BALANCE \ - | SD_SHARE_CPUPOWER, \ - .last_balance = jiffies, \ - .balance_interval = 1, \ +#define SD_SIBLING_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 2, \ + .busy_factor = 64, \ + .imbalance_pct = 110, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_WAKE_IDLE \ + | 1*SD_WAKE_AFFINE \ + | 1*SD_WAKE_BALANCE \ + | 1*SD_SHARE_CPUPOWER \ + | 0*SD_POWERSAVINGS_BALANCE \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + | 0*SD_WAKE_IDLE_FAR \ + | 0*SD_PREFER_SIBLING \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .smt_gain = 1178, /* 15% */ \ } #endif #endif /* CONFIG_SCHED_SMT */ @@ -110,69 +115,94 @@ int arch_update_cpu_topology(void); #ifdef CONFIG_SCHED_MC /* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ #ifndef SD_MC_INIT -#define SD_MC_INIT (struct sched_domain) { \ - .min_interval = 1, \ - .max_interval = 4, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 2, \ - .wake_idx = 1, \ - .forkexec_idx = 1, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_FORK \ - | SD_BALANCE_EXEC \ - | SD_WAKE_AFFINE \ - | SD_WAKE_BALANCE \ - | SD_SHARE_PKG_RESOURCES\ - | sd_balance_for_mc_power()\ - | sd_power_saving_flags(),\ - .last_balance = jiffies, \ - .balance_interval = 1, \ +#define SD_MC_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .wake_idx = 1, \ + .forkexec_idx = 1, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 1*SD_WAKE_IDLE \ + | 1*SD_WAKE_AFFINE \ + | 1*SD_WAKE_BALANCE \ + | 0*SD_SHARE_CPUPOWER \ + | 1*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + | 0*SD_WAKE_IDLE_FAR \ + | sd_balance_for_mc_power() \ + | sd_power_saving_flags() \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ } #endif #endif /* CONFIG_SCHED_MC */ /* Common values for CPUs */ #ifndef SD_CPU_INIT -#define SD_CPU_INIT (struct sched_domain) { \ - .min_interval = 1, \ - .max_interval = 4, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 2, \ - .wake_idx = 1, \ - .forkexec_idx = 1, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_EXEC \ - | SD_BALANCE_FORK \ - | SD_WAKE_AFFINE \ - | SD_WAKE_BALANCE \ - | sd_balance_for_package_power()\ - | sd_power_saving_flags(),\ - .last_balance = jiffies, \ - .balance_interval = 1, \ +#define SD_CPU_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 2, \ + .wake_idx = 1, \ + .forkexec_idx = 1, \ + \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 1*SD_WAKE_IDLE \ + | 0*SD_WAKE_AFFINE \ + | 1*SD_WAKE_BALANCE \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 0*SD_SERIALIZE \ + | 0*SD_WAKE_IDLE_FAR \ + | sd_balance_for_package_power() \ + | sd_power_saving_flags() \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ } #endif /* sched_domains SD_ALLNODES_INIT for NUMA machines */ -#define SD_ALLNODES_INIT (struct sched_domain) { \ - .min_interval = 64, \ - .max_interval = 64*num_online_cpus(), \ - .busy_factor = 128, \ - .imbalance_pct = 133, \ - .cache_nice_tries = 1, \ - .busy_idx = 3, \ - .idle_idx = 3, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_NEWIDLE \ - | SD_WAKE_AFFINE \ - | SD_SERIALIZE, \ - .last_balance = jiffies, \ - .balance_interval = 64, \ +#define SD_ALLNODES_INIT (struct sched_domain) { \ + .min_interval = 64, \ + .max_interval = 64*num_online_cpus(), \ + .busy_factor = 128, \ + .imbalance_pct = 133, \ + .cache_nice_tries = 1, \ + .busy_idx = 3, \ + .idle_idx = 3, \ + .flags = 1*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 0*SD_BALANCE_EXEC \ + | 0*SD_BALANCE_FORK \ + | 0*SD_WAKE_IDLE \ + | 1*SD_WAKE_AFFINE \ + | 0*SD_WAKE_BALANCE \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_POWERSAVINGS_BALANCE \ + | 0*SD_SHARE_PKG_RESOURCES \ + | 1*SD_SERIALIZE \ + | 1*SD_WAKE_IDLE_FAR \ + | 0*SD_PREFER_SIBLING \ + , \ + .last_balance = jiffies, \ + .balance_interval = 64, \ } #ifdef CONFIG_NUMA @@ -193,5 +223,16 @@ int arch_update_cpu_topology(void); #ifndef topology_core_siblings #define topology_core_siblings(cpu) cpumask_of_cpu(cpu) #endif +#ifndef topology_thread_cpumask +#define topology_thread_cpumask(cpu) cpumask_of(cpu) +#endif +#ifndef topology_core_cpumask +#define topology_core_cpumask(cpu) cpumask_of(cpu) +#endif + +/* Returns the number of the current Node. */ +#ifndef numa_node_id +#define numa_node_id() (cpu_to_node(raw_smp_processor_id())) +#endif #endif /* _LINUX_TOPOLOGY_H */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h new file mode 100644 index 00000000000..3338b3f5c21 --- /dev/null +++ b/include/linux/tpm.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2004,2007,2008 IBM Corporation + * + * Authors: + * Leendert van Doorn <leendert@watson.ibm.com> + * Dave Safford <safford@watson.ibm.com> + * Reiner Sailer <sailer@watson.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * Debora Velarde <dvelarde@us.ibm.com> + * + * Maintained by: <tpmdd_devel@lists.sourceforge.net> + * + * Device driver for TCG/TCPA TPM (trusted platform module). + * Specifications at www.trustedcomputinggroup.org + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#ifndef __LINUX_TPM_H__ +#define __LINUX_TPM_H__ + +/* + * Chip num is this value or a valid tpm idx + */ +#define TPM_ANY_NUM 0xFFFF + +#if defined(CONFIG_TCG_TPM) + +extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); +extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); +#endif +#endif diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h new file mode 100644 index 00000000000..7a813038408 --- /dev/null +++ b/include/linux/trace_clock.h @@ -0,0 +1,19 @@ +#ifndef _LINUX_TRACE_CLOCK_H +#define _LINUX_TRACE_CLOCK_H + +/* + * 3 trace clock variants, with differing scalability/precision + * tradeoffs: + * + * - local: CPU-local trace clock + * - medium: scalable global clock with some jitter + * - global: globally monotonic, serialized clock + */ +#include <linux/compiler.h> +#include <linux/types.h> + +extern u64 notrace trace_clock_local(void); +extern u64 notrace trace_clock(void); +extern u64 notrace trace_clock_global(void); + +#endif /* _LINUX_TRACE_CLOCK_H */ diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h new file mode 100644 index 00000000000..c134dd1fe6b --- /dev/null +++ b/include/linux/trace_seq.h @@ -0,0 +1,94 @@ +#ifndef _LINUX_TRACE_SEQ_H +#define _LINUX_TRACE_SEQ_H + +#include <linux/fs.h> + +#include <asm/page.h> + +/* + * Trace sequences are used to allow a function to call several other functions + * to create a string of data to use (up to a max of PAGE_SIZE. + */ + +struct trace_seq { + unsigned char buffer[PAGE_SIZE]; + unsigned int len; + unsigned int readpos; +}; + +static inline void +trace_seq_init(struct trace_seq *s) +{ + s->len = 0; + s->readpos = 0; +} + +/* + * Currently only defined when tracing is enabled. + */ +#ifdef CONFIG_TRACING +extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) + __attribute__ ((format (printf, 2, 0))); +extern int +trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); +extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); +extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + size_t cnt); +extern int trace_seq_puts(struct trace_seq *s, const char *str); +extern int trace_seq_putc(struct trace_seq *s, unsigned char c); +extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); +extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, + size_t len); +extern void *trace_seq_reserve(struct trace_seq *s, size_t len); +extern int trace_seq_path(struct trace_seq *s, struct path *path); + +#else /* CONFIG_TRACING */ +static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) +{ + return 0; +} +static inline int +trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) +{ + return 0; +} + +static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) +{ +} +static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + size_t cnt) +{ + return 0; +} +static inline int trace_seq_puts(struct trace_seq *s, const char *str) +{ + return 0; +} +static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) +{ + return 0; +} +static inline int +trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) +{ + return 0; +} +static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, + size_t len) +{ + return 0; +} +static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) +{ + return NULL; +} +static inline int trace_seq_path(struct trace_seq *s, struct path *path) +{ + return 0; +} +#endif /* CONFIG_TRACING */ + +#endif /* _LINUX_TRACE_SEQ_H */ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 6186a789d6c..17ba82efa48 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -143,7 +143,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) * * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. * - * Called with task_lock() held on @task. + * @task->cred_guard_mutex is held by the caller through the do_execve(). */ static inline int tracehook_unsafe_exec(struct task_struct *task) { @@ -259,14 +259,12 @@ static inline void tracehook_finish_clone(struct task_struct *child, /** * tracehook_report_clone - in parent, new child is about to start running - * @trace: return value from tracehook_prepare_clone() * @regs: parent's user register state * @clone_flags: flags from parent's system call * @pid: new child's PID in the parent's namespace * @child: new child task * - * Called after a child is set up, but before it has been started - * running. @trace is the value returned by tracehook_prepare_clone(). + * Called after a child is set up, but before it has been started running. * This is not a good place to block, because the child has not started * yet. Suspend the child here if desired, and then block in * tracehook_report_clone_complete(). This must prevent the child from @@ -276,13 +274,14 @@ static inline void tracehook_finish_clone(struct task_struct *child, * * Called with no locks held, but the child cannot run until this returns. */ -static inline void tracehook_report_clone(int trace, struct pt_regs *regs, +static inline void tracehook_report_clone(struct pt_regs *regs, unsigned long clone_flags, pid_t pid, struct task_struct *child) { - if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { + if (unlikely(task_ptrace(child))) { /* - * The child starts up with an immediate SIGSTOP. + * It doesn't matter who attached/attaching to this + * task, the pending SIGSTOP is right in any case. */ sigaddset(&child->pending.signal, SIGSTOP); set_tsk_thread_flag(child, TIF_SIGPENDING); @@ -388,17 +387,14 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info, * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal * @task: task receiving the signal * @sig: signal number being sent - * @handler: %SIG_IGN or %SIG_DFL * * Return zero iff tracing doesn't care to examine this ignored signal, * so it can short-circuit normal delivery and never even get queued. - * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN. * * Called with @task->sighand->siglock held. */ static inline int tracehook_consider_ignored_signal(struct task_struct *task, - int sig, - void __user *handler) + int sig) { return (task_ptrace(task) & PT_PTRACED) != 0; } @@ -407,19 +403,17 @@ static inline int tracehook_consider_ignored_signal(struct task_struct *task, * tracehook_consider_fatal_signal - suppress special handling of fatal signal * @task: task receiving the signal * @sig: signal number being sent - * @handler: %SIG_DFL or %SIG_IGN * * Return nonzero to prevent special handling of this termination signal. - * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, - * in which case force_sig() is about to reset it to %SIG_DFL. + * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is + * ignored, in which case force_sig() is about to reset it to %SIG_DFL. * When this returns zero, this signal might cause a quick termination * that does not give the debugger a chance to intercept the signal. * * Called with or without @task->sighand->siglock held. */ static inline int tracehook_consider_fatal_signal(struct task_struct *task, - int sig, - void __user *handler) + int sig) { return (task_ptrace(task) & PT_PTRACED) != 0; } @@ -507,7 +501,7 @@ static inline int tracehook_notify_jctl(int notify, int why) static inline int tracehook_notify_death(struct task_struct *task, void **death_cookie, int group_dead) { - if (task->exit_signal == -1) + if (task_detached(task)) return task->ptrace ? SIGCHLD : DEATH_REAP; /* diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 75700545836..63a3f7a8058 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -23,6 +23,8 @@ struct tracepoint; struct tracepoint { const char *name; /* Tracepoint name */ int state; /* State. */ + void (*regfunc)(void); + void (*unregfunc)(void); void **funcs; } __attribute__((aligned(32))); /* * Aligned on 32 bytes because it is @@ -31,8 +33,10 @@ struct tracepoint { * Keep in sync with vmlinux.lds.h. */ -#define TPPROTO(args...) args -#define TPARGS(args...) args +#ifndef DECLARE_TRACE + +#define TP_PROTO(args...) args +#define TP_ARGS(args...) args #ifdef CONFIG_TRACEPOINTS @@ -65,7 +69,7 @@ struct tracepoint { { \ if (unlikely(__tracepoint_##name.state)) \ __DO_TRACE(&__tracepoint_##name, \ - TPPROTO(proto), TPARGS(args)); \ + TP_PROTO(proto), TP_ARGS(args)); \ } \ static inline int register_trace_##name(void (*probe)(proto)) \ { \ @@ -76,12 +80,16 @@ struct tracepoint { return tracepoint_probe_unregister(#name, (void *)probe);\ } -#define DEFINE_TRACE(name) \ + +#define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ __attribute__((section("__tracepoints_strings"))) = #name; \ struct tracepoint __tracepoint_##name \ __attribute__((section("__tracepoints"), aligned(32))) = \ - { __tpstrtab_##name, 0, NULL } + { __tpstrtab_##name, 0, reg, unreg, NULL } + +#define DEFINE_TRACE(name) \ + DEFINE_TRACE_FN(name, NULL, NULL); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ EXPORT_SYMBOL_GPL(__tracepoint_##name) @@ -106,6 +114,7 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin, return -ENOSYS; \ } +#define DEFINE_TRACE_FN(name, reg, unreg) #define DEFINE_TRACE(name) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) @@ -114,6 +123,7 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end) { } #endif /* CONFIG_TRACEPOINTS */ +#endif /* DECLARE_TRACE */ /* * Connect a probe to a tracepoint. @@ -153,4 +163,127 @@ static inline void tracepoint_synchronize_unregister(void) synchronize_sched(); } -#endif +#define PARAMS(args...) args + +#endif /* _LINUX_TRACEPOINT_H */ + +/* + * Note: we keep the TRACE_EVENT outside the include file ifdef protection. + * This is due to the way trace events work. If a file includes two + * trace event headers under one "CREATE_TRACE_POINTS" the first include + * will override the TRACE_EVENT and break the second include. + */ + +#ifndef TRACE_EVENT +/* + * For use with the TRACE_EVENT macro: + * + * We define a tracepoint, its arguments, its printk format + * and its 'fast binay record' layout. + * + * Firstly, name your tracepoint via TRACE_EVENT(name : the + * 'subsystem_event' notation is fine. + * + * Think about this whole construct as the + * 'trace_sched_switch() function' from now on. + * + * + * TRACE_EVENT(sched_switch, + * + * * + * * A function has a regular function arguments + * * prototype, declare it via TP_PROTO(): + * * + * + * TP_PROTO(struct rq *rq, struct task_struct *prev, + * struct task_struct *next), + * + * * + * * Define the call signature of the 'function'. + * * (Design sidenote: we use this instead of a + * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) + * * + * + * TP_ARGS(rq, prev, next), + * + * * + * * Fast binary tracing: define the trace record via + * * TP_STRUCT__entry(). You can think about it like a + * * regular C structure local variable definition. + * * + * * This is how the trace record is structured and will + * * be saved into the ring buffer. These are the fields + * * that will be exposed to user-space in + * * /sys/kernel/debug/tracing/events/<*>/format. + * * + * * The declared 'local variable' is called '__entry' + * * + * * __field(pid_t, prev_prid) is equivalent to a standard declariton: + * * + * * pid_t prev_pid; + * * + * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: + * * + * * char prev_comm[TASK_COMM_LEN]; + * * + * + * TP_STRUCT__entry( + * __array( char, prev_comm, TASK_COMM_LEN ) + * __field( pid_t, prev_pid ) + * __field( int, prev_prio ) + * __array( char, next_comm, TASK_COMM_LEN ) + * __field( pid_t, next_pid ) + * __field( int, next_prio ) + * ), + * + * * + * * Assign the entry into the trace record, by embedding + * * a full C statement block into TP_fast_assign(). You + * * can refer to the trace record as '__entry' - + * * otherwise you can put arbitrary C code in here. + * * + * * Note: this C code will execute every time a trace event + * * happens, on an active tracepoint. + * * + * + * TP_fast_assign( + * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); + * __entry->prev_pid = prev->pid; + * __entry->prev_prio = prev->prio; + * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); + * __entry->next_pid = next->pid; + * __entry->next_prio = next->prio; + * ) + * + * * + * * Formatted output of a trace record via TP_printk(). + * * This is how the tracepoint will appear under ftrace + * * plugins that make use of this tracepoint. + * * + * * (raw-binary tracing wont actually perform this step.) + * * + * + * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", + * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, + * __entry->next_comm, __entry->next_pid, __entry->next_prio), + * + * ); + * + * This macro construct is thus used for the regular printk format + * tracing setup, it is used to construct a function pointer based + * tracepoint callback (this is used by programmatic plugins and + * can also by used by generic instrumentation like SystemTap), and + * it is also used to expose a structured trace record in + * /sys/kernel/debug/tracing/events/. + * + * A set of (un)registration functions can be passed to the variant + * TRACE_EVENT_FN to perform any (un)registration work. + */ + +#define TRACE_EVENT(name, proto, args, struct, assign, print) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) +#define TRACE_EVENT_FN(name, proto, args, struct, \ + assign, print, reg, unreg) \ + DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) + +#endif /* ifdef TRACE_EVENT (see note above) */ diff --git a/include/linux/tty.h b/include/linux/tty.h index fc39db95499..0d3974f59c5 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -23,7 +23,7 @@ */ #define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ #define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ -#define NR_LDISCS 19 +#define NR_LDISCS 20 /* line disciplines */ #define N_TTY 0 @@ -47,6 +47,8 @@ #define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ #define N_PPS 18 /* Pulse per Second */ +#define N_V253 19 /* Codec control over voice modem */ + /* * This character is the same as _POSIX_VDISABLE: it cannot be used as * a c_cc[] character, but indicates that a particular special character @@ -185,7 +187,7 @@ struct tty_port; struct tty_port_operations { /* Return 1 if the carrier is raised */ int (*carrier_raised)(struct tty_port *port); - void (*raise_dtr_rts)(struct tty_port *port); + void (*dtr_rts)(struct tty_port *port, int raise); }; struct tty_port { @@ -201,6 +203,9 @@ struct tty_port { unsigned char *xmit_buf; /* Optional buffer */ int close_delay; /* Close port delay */ int closing_wait; /* Delay for output */ + int drain_delay; /* Set to zero if no pure time + based drain is needed else + set to size of fifo */ }; /* @@ -223,8 +228,11 @@ struct tty_struct { struct tty_driver *driver; const struct tty_operations *ops; int index; - /* The ldisc objects are protected by tty_ldisc_lock at the moment */ - struct tty_ldisc ldisc; + + /* Protects ldisc changes: Lock tty not pty */ + struct mutex ldisc_mutex; + struct tty_ldisc *ldisc; + struct mutex termios_mutex; spinlock_t ctrl_lock; /* Termios values are protected by the termios mutex */ @@ -311,6 +319,7 @@ struct tty_struct { #define TTY_CLOSING 7 /* ->close() in progress */ #define TTY_LDISC 9 /* Line discipline attached */ #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ +#define TTY_LDISC_OPEN 11 /* Line discipline is open */ #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ #define TTY_PTY_LOCK 16 /* pty private */ @@ -387,6 +396,7 @@ extern void __do_SAK(struct tty_struct *tty); extern void disassociate_ctty(int priv); extern void no_tty(void); extern void tty_flip_buffer_push(struct tty_struct *tty); +extern void tty_flush_to_ldisc(struct tty_struct *tty); extern void tty_buffer_free_all(struct tty_struct *tty); extern void tty_buffer_flush(struct tty_struct *tty); extern void tty_buffer_init(struct tty_struct *tty); @@ -403,6 +413,7 @@ extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); extern void tty_ldisc_deref(struct tty_ldisc *); extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); +extern void tty_ldisc_hangup(struct tty_struct *tty); extern const struct file_operations tty_ldiscs_proc_fops; extern void tty_wakeup(struct tty_struct *tty); @@ -425,6 +436,9 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, extern void tty_release_dev(struct file *filp); extern int tty_init_termios(struct tty_struct *tty); +extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); +extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); + extern struct mutex tty_mutex; extern void tty_write_unlock(struct tty_struct *tty); @@ -438,6 +452,7 @@ extern struct tty_struct *tty_port_tty_get(struct tty_port *port); extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); extern int tty_port_carrier_raised(struct tty_port *port); extern void tty_port_raise_dtr_rts(struct tty_port *port); +extern void tty_port_lower_dtr_rts(struct tty_port *port); extern void tty_port_hangup(struct tty_port *port); extern int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, struct file *filp); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 08e088334db..3566129384a 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -127,7 +127,8 @@ * the line discipline are close to full, and it should somehow * signal that no more characters should be sent to the tty. * - * Optional: Always invoke via tty_throttle(); + * Optional: Always invoke via tty_throttle(), called under the + * termios lock. * * void (*unthrottle)(struct tty_struct * tty); * @@ -135,7 +136,8 @@ * that characters can now be sent to the tty without fear of * overrunning the input buffers of the line disciplines. * - * Optional: Always invoke via tty_unthrottle(); + * Optional: Always invoke via tty_unthrottle(), called under the + * termios lock. * * void (*stop)(struct tty_struct *tty); * @@ -252,8 +254,6 @@ struct tty_operations { void (*set_ldisc)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, int timeout); void (*send_xchar)(struct tty_struct *tty, char ch); - int (*read_proc)(char *page, char **start, off_t off, - int count, int *eof, void *data); int (*tiocmget)(struct tty_struct *tty, struct file *file); int (*tiocmset)(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); @@ -264,6 +264,7 @@ struct tty_operations { int (*poll_get_char)(struct tty_driver *driver, int line); void (*poll_put_char)(struct tty_driver *driver, int line, char ch); #endif + const struct file_operations *proc_fops; }; struct tty_driver { @@ -310,7 +311,8 @@ extern void tty_set_operations(struct tty_driver *driver, extern struct tty_driver *tty_find_polling_driver(char *name, int *line); extern void tty_driver_kref_put(struct tty_driver *driver); -extern inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) + +static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 40f38d89677..0c4ee9b88f8 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -144,7 +144,7 @@ struct tty_ldisc_ops { struct tty_ldisc { struct tty_ldisc_ops *ops; - int refcount; + atomic_t users; }; #define TTY_LDISC_MAGIC 0x5403 diff --git a/include/linux/types.h b/include/linux/types.h index 712ca53bc34..c42724f8c80 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -1,6 +1,9 @@ #ifndef _LINUX_TYPES_H #define _LINUX_TYPES_H +#include <asm/types.h> + +#ifndef __ASSEMBLY__ #ifdef __KERNEL__ #define DECLARE_BITMAP(name,bits) \ @@ -9,9 +12,8 @@ #endif #include <linux/posix_types.h> -#include <asm/types.h> -#ifndef __KERNEL_STRICT_NAMES +#ifdef __KERNEL__ typedef __u32 __kernel_dev_t; @@ -29,7 +31,6 @@ typedef __kernel_timer_t timer_t; typedef __kernel_clockid_t clockid_t; typedef __kernel_mqd_t mqd_t; -#ifdef __KERNEL__ typedef _Bool bool; typedef __kernel_uid32_t uid_t; @@ -45,14 +46,6 @@ typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; #endif /* CONFIG_UID16 */ -/* libc5 includes this file to define uid_t, thus uid_t can never change - * when it is included by non-kernel code - */ -#else -typedef __kernel_uid_t uid_t; -typedef __kernel_gid_t gid_t; -#endif /* __KERNEL__ */ - #if defined(__GNUC__) typedef __kernel_loff_t loff_t; #endif @@ -138,7 +131,7 @@ typedef __s64 int64_t; * * blkcnt_t is the type of the inode's block count. */ -#ifdef CONFIG_LBD +#ifdef CONFIG_LBDAF typedef u64 sector_t; typedef u64 blkcnt_t; #else @@ -154,7 +147,7 @@ typedef unsigned long blkcnt_t; #define pgoff_t unsigned long #endif -#endif /* __KERNEL_STRICT_NAMES */ +#endif /* __KERNEL__ */ /* * Below are truly Linux-specific types that should never collide with @@ -212,5 +205,5 @@ struct ustat { }; #endif /* __KERNEL__ */ - +#endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h index 970473bf8d5..ae779bb8cc0 100644 --- a/include/linux/ucb1400.h +++ b/include/linux/ucb1400.h @@ -73,6 +73,10 @@ #define UCB_ADC_DATA 0x68 #define UCB_ADC_DAT_VALID (1 << 15) + +#define UCB_FCSR 0x6c +#define UCB_FCSR_AVE (1 << 12) + #define UCB_ADC_DAT_MASK 0x3ff #define UCB_ID 0x7e @@ -134,28 +138,13 @@ static inline void ucb1400_adc_enable(struct snd_ac97 *ac97) ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA); } -static unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, - int adcsync) -{ - unsigned int val; - - if (adcsync) - adc_channel |= UCB_ADC_SYNC_ENA; - - ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel); - ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel | - UCB_ADC_START); - - while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA)) - & UCB_ADC_DAT_VALID)) - schedule_timeout_uninterruptible(1); - - return val & UCB_ADC_DAT_MASK; -} - static inline void ucb1400_adc_disable(struct snd_ac97 *ac97) { ucb1400_reg_write(ac97, UCB_ADC_CR, 0); } + +unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, + int adcsync); + #endif diff --git a/include/linux/uio.h b/include/linux/uio.h index b7fe13883bd..98c114323a8 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -19,15 +19,6 @@ struct iovec __kernel_size_t iov_len; /* Must be size_t (1003.1g) */ }; -#ifdef __KERNEL__ - -struct kvec { - void *iov_base; /* and that should *never* hold a userland pointer */ - size_t iov_len; -}; - -#endif - /* * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1) */ @@ -35,6 +26,13 @@ struct kvec { #define UIO_FASTIOV 8 #define UIO_MAXIOV 1024 +#ifdef __KERNEL__ + +struct kvec { + void *iov_base; /* and that should *never* hold a userland pointer */ + size_t iov_len; +}; + /* * Total number of bytes covered by an iovec. * @@ -53,5 +51,6 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) } unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); +#endif #endif diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index a0bb6bd2e5c..5dcc9ff72f6 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -22,6 +22,7 @@ struct uio_map; /** * struct uio_mem - description of a UIO memory region + * @name: name of the memory region for identification * @addr: address of the device's memory * @size: size of IO * @memtype: type of memory addr points to @@ -29,6 +30,7 @@ struct uio_map; * @map: for use by the UIO core only. */ struct uio_mem { + const char *name; unsigned long addr; unsigned long size; int memtype; @@ -42,12 +44,14 @@ struct uio_portio; /** * struct uio_port - description of a UIO port region + * @name: name of the port region for identification * @start: start of port region * @size: size of port region * @porttype: type of port (see UIO_PORT_* below) * @portio: for use by the UIO core only. */ struct uio_port { + const char *name; unsigned long start; unsigned long size; int porttype; diff --git a/include/linux/ultrasound.h b/include/linux/ultrasound.h index 6b7703e75ce..71339dc531c 100644 --- a/include/linux/ultrasound.h +++ b/include/linux/ultrasound.h @@ -34,7 +34,7 @@ * _GUS_VOICEOFF - Stops voice (no parameters) * _GUS_VOICEFADE - Stops the voice smoothly. * _GUS_VOICEMODE - Alters the voice mode, don't start or stop voice (P1=voice mode) - * _GUS_VOICEBALA - Sets voice balence (P1, 0=left, 7=middle and 15=right, default 7) + * _GUS_VOICEBALA - Sets voice balance (P1, 0=left, 7=middle and 15=right, default 7) * _GUS_VOICEFREQ - Sets voice (sample) playback frequency (P1=Hz) * _GUS_VOICEVOL - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off) * _GUS_VOICEVOL2 - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off) diff --git a/include/linux/usb.h b/include/linux/usb.h index 85ee9be9361..b1e3c2fbfe1 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -36,6 +36,7 @@ struct wusb_dev; * - configs have one (often) or more interfaces; * - interfaces have one (usually) or more settings; * - each interface setting has zero or (usually) more endpoints. + * - a SuperSpeed endpoint has a companion descriptor * * And there might be other descriptors mixed in with those. * @@ -44,6 +45,19 @@ struct wusb_dev; struct ep_device; +/* For SS devices */ +/** + * struct usb_host_ss_ep_comp - Valid for SuperSpeed devices only + * @desc: endpoint companion descriptor, wMaxPacketSize in native byteorder + * @extra: descriptors following this endpoint companion descriptor + * @extralen: how many bytes of "extra" are valid + */ +struct usb_host_ss_ep_comp { + struct usb_ss_ep_comp_descriptor desc; + unsigned char *extra; /* Extra descriptors */ + int extralen; +}; + /** * struct usb_host_endpoint - host-side endpoint descriptor and queue * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder @@ -51,6 +65,7 @@ struct ep_device; * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH) * with one or more transfer descriptors (TDs) per urb * @ep_dev: ep_device for sysfs info + * @ss_ep_comp: companion descriptor information for this endpoint * @extra: descriptors following this endpoint in the configuration * @extralen: how many bytes of "extra" are valid * @enabled: URBs may be submitted to this endpoint @@ -63,6 +78,7 @@ struct usb_host_endpoint { struct list_head urb_list; void *hcpriv; struct ep_device *ep_dev; /* For sysfs info */ + struct usb_host_ss_ep_comp *ss_ep_comp; /* For SS devices */ unsigned char *extra; /* Extra descriptors */ int extralen; @@ -336,7 +352,6 @@ struct usb_bus { #ifdef CONFIG_USB_DEVICEFS struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */ #endif - struct device *dev; /* device for this bus */ #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) struct mon_bus *mon_bus; /* non-null when associated */ @@ -363,6 +378,7 @@ struct usb_tt; * struct usb_device - kernel's representation of a USB device * @devnum: device number; address on a USB bus * @devpath: device ID string for use in messages (e.g., /port/...) + * @route: tree topology hex string for use with xHCI * @state: device state: configured, not attached, etc. * @speed: device speed: high/full/low (or error) * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub @@ -418,6 +434,9 @@ struct usb_tt; * @autosuspend_disabled: autosuspend disabled by the user * @autoresume_disabled: autoresume disabled by the user * @skip_sys_resume: skip the next system resume + * @wusb_dev: if this is a Wireless USB device, link to the WUSB + * specific data for the device. + * @slot_id: Slot ID assigned by xHCI * * Notes: * Usbcore drivers should not set usbdev->state directly. Instead use @@ -426,6 +445,7 @@ struct usb_tt; struct usb_device { int devnum; char devpath [16]; + u32 route; enum usb_device_state state; enum usb_device_speed speed; @@ -501,6 +521,7 @@ struct usb_device { unsigned skip_sys_resume:1; #endif struct wusb_dev *wusb_dev; + int slot_id; }; #define to_usb_device(d) container_of(d, struct usb_device, dev) @@ -641,186 +662,6 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size) /*-------------------------------------------------------------------------*/ -/** - * usb_endpoint_num - get the endpoint's number - * @epd: endpoint to be checked - * - * Returns @epd's number: 0 to 15. - */ -static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd) -{ - return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; -} - -/** - * usb_endpoint_type - get the endpoint's transfer type - * @epd: endpoint to be checked - * - * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according - * to @epd's transfer type. - */ -static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd) -{ - return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; -} - -/** - * usb_endpoint_dir_in - check if the endpoint has IN direction - * @epd: endpoint to be checked - * - * Returns true if the endpoint is of type IN, otherwise it returns false. - */ -static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd) -{ - return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN); -} - -/** - * usb_endpoint_dir_out - check if the endpoint has OUT direction - * @epd: endpoint to be checked - * - * Returns true if the endpoint is of type OUT, otherwise it returns false. - */ -static inline int usb_endpoint_dir_out( - const struct usb_endpoint_descriptor *epd) -{ - return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); -} - -/** - * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type - * @epd: endpoint to be checked - * - * Returns true if the endpoint is of type bulk, otherwise it returns false. - */ -static inline int usb_endpoint_xfer_bulk( - const struct usb_endpoint_descriptor *epd) -{ - return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == - USB_ENDPOINT_XFER_BULK); -} - -/** - * usb_endpoint_xfer_control - check if the endpoint has control transfer type - * @epd: endpoint to be checked - * - * Returns true if the endpoint is of type control, otherwise it returns false. - */ -static inline int usb_endpoint_xfer_control( - const struct usb_endpoint_descriptor *epd) -{ - return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == - USB_ENDPOINT_XFER_CONTROL); -} - -/** - * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type - * @epd: endpoint to be checked - * - * Returns true if the endpoint is of type interrupt, otherwise it returns - * false. - */ -static inline int usb_endpoint_xfer_int( - const struct usb_endpoint_descriptor *epd) -{ - return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == - USB_ENDPOINT_XFER_INT); -} - -/** - * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type - * @epd: endpoint to be checked - * - * Returns true if the endpoint is of type isochronous, otherwise it returns - * false. - */ -static inline int usb_endpoint_xfer_isoc( - const struct usb_endpoint_descriptor *epd) -{ - return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == - USB_ENDPOINT_XFER_ISOC); -} - -/** - * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN - * @epd: endpoint to be checked - * - * Returns true if the endpoint has bulk transfer type and IN direction, - * otherwise it returns false. - */ -static inline int usb_endpoint_is_bulk_in( - const struct usb_endpoint_descriptor *epd) -{ - return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd)); -} - -/** - * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT - * @epd: endpoint to be checked - * - * Returns true if the endpoint has bulk transfer type and OUT direction, - * otherwise it returns false. - */ -static inline int usb_endpoint_is_bulk_out( - const struct usb_endpoint_descriptor *epd) -{ - return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd)); -} - -/** - * usb_endpoint_is_int_in - check if the endpoint is interrupt IN - * @epd: endpoint to be checked - * - * Returns true if the endpoint has interrupt transfer type and IN direction, - * otherwise it returns false. - */ -static inline int usb_endpoint_is_int_in( - const struct usb_endpoint_descriptor *epd) -{ - return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd)); -} - -/** - * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT - * @epd: endpoint to be checked - * - * Returns true if the endpoint has interrupt transfer type and OUT direction, - * otherwise it returns false. - */ -static inline int usb_endpoint_is_int_out( - const struct usb_endpoint_descriptor *epd) -{ - return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd)); -} - -/** - * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN - * @epd: endpoint to be checked - * - * Returns true if the endpoint has isochronous transfer type and IN direction, - * otherwise it returns false. - */ -static inline int usb_endpoint_is_isoc_in( - const struct usb_endpoint_descriptor *epd) -{ - return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd)); -} - -/** - * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT - * @epd: endpoint to be checked - * - * Returns true if the endpoint has isochronous transfer type and OUT direction, - * otherwise it returns false. - */ -static inline int usb_endpoint_is_isoc_out( - const struct usb_endpoint_descriptor *epd) -{ - return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd)); -} - -/*-------------------------------------------------------------------------*/ - #define USB_DEVICE_ID_MATCH_DEVICE \ (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT) #define USB_DEVICE_ID_MATCH_DEV_RANGE \ @@ -1081,6 +922,8 @@ extern struct bus_type usb_bus_type; /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number * @name: the usb class device name for this driver. Will show up in sysfs. + * @nodename: Callback to provide a naming hint for a possible + * device node to create. * @fops: pointer to the struct file_operations of this driver. * @minor_base: the start of the minor range for this driver. * @@ -1090,6 +933,7 @@ extern struct bus_type usb_bus_type; */ struct usb_class_driver { char *name; + char *(*nodename)(struct device *dev); const struct file_operations *fops; int minor_base; }; @@ -1202,6 +1046,8 @@ typedef void (*usb_complete_t)(struct urb *); * the device driver is saying that it provided this DMA address, * which the host controller driver should use in preference to the * transfer_buffer. + * @sg: scatter gather buffer list + * @num_sgs: number of entries in the sg list * @transfer_buffer_length: How big is transfer_buffer. The transfer may * be broken up into chunks according to the current maximum packet * size for the endpoint, which is a function of the configuration @@ -1219,7 +1065,9 @@ typedef void (*usb_complete_t)(struct urb *); * @setup_dma: For control transfers with URB_NO_SETUP_DMA_MAP set, the * device driver has provided this DMA address for the setup packet. * The host controller driver should use this in preference to - * setup_packet. + * setup_packet, but the HCD may chose to ignore the address if it must + * copy the setup packet into internal structures. Therefore, setup_packet + * must always point to a valid buffer. * @start_frame: Returns the initial frame for isochronous transfers. * @number_of_packets: Lists the number of ISO transfer buffers. * @interval: Specifies the polling interval for interrupt or isochronous @@ -1355,8 +1203,10 @@ struct urb { unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ void *transfer_buffer; /* (in) associated data buffer */ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ - int transfer_buffer_length; /* (in) data buffer length */ - int actual_length; /* (return) actual transfer length */ + struct usb_sg_request *sg; /* (in) scatter gather buffer list */ + int num_sgs; /* (in) number of entries in the sg list */ + u32 transfer_buffer_length; /* (in) data buffer length */ + u32 actual_length; /* (return) actual transfer length */ unsigned char *setup_packet; /* (in) setup packet (control only) */ dma_addr_t setup_dma; /* (in) dma addr for setup_packet */ int start_frame; /* (modify) start frame (ISO) */ @@ -1565,6 +1415,7 @@ extern int usb_string(struct usb_device *dev, int index, extern int usb_clear_halt(struct usb_device *dev, int pipe); extern int usb_reset_configuration(struct usb_device *dev); extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); +extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); /* this request isn't really synchronous, but it belongs with the others */ extern int usb_driver_set_configuration(struct usb_device *udev, int config); @@ -1599,8 +1450,8 @@ struct usb_sg_request { int status; size_t bytes; - /* - * members below are private: to usbcore, + /* private: + * members below are private to usbcore, * and are not provided for driver access! */ spinlock_t lock; @@ -1669,14 +1520,6 @@ void usb_sg_wait(struct usb_sg_request *io); #define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) #define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) -/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ -#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) -#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) -#define usb_settoggle(dev, ep, out, bit) \ - ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \ - ((bit) << (ep))) - - static inline unsigned int __create_pipe(struct usb_device *dev, unsigned int endpoint) { @@ -1743,6 +1586,9 @@ extern void usb_unregister_notify(struct notifier_block *nb); #define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \ format "\n" , ## arg) +/* debugfs stuff */ +extern struct dentry *usb_debug_root; + #endif /* __KERNEL__ */ #endif diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index 8cb025fef63..b5744bc218a 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -24,10 +24,75 @@ #define USB_SUBCLASS_AUDIOCONTROL 0x01 #define USB_SUBCLASS_AUDIOSTREAMING 0x02 #define USB_SUBCLASS_MIDISTREAMING 0x03 +#define USB_SUBCLASS_VENDOR_SPEC 0xff +/* A.5 Audio Class-Specific AC interface Descriptor Subtypes*/ +#define HEADER 0x01 +#define INPUT_TERMINAL 0x02 +#define OUTPUT_TERMINAL 0x03 +#define MIXER_UNIT 0x04 +#define SELECTOR_UNIT 0x05 +#define FEATURE_UNIT 0x06 +#define PROCESSING_UNIT 0x07 +#define EXTENSION_UNIT 0x08 + +#define AS_GENERAL 0x01 +#define FORMAT_TYPE 0x02 +#define FORMAT_SPECIFIC 0x03 + +#define EP_GENERAL 0x01 + +#define MS_GENERAL 0x01 +#define MIDI_IN_JACK 0x02 +#define MIDI_OUT_JACK 0x03 + +/* endpoint attributes */ +#define EP_ATTR_MASK 0x0c +#define EP_ATTR_ASYNC 0x04 +#define EP_ATTR_ADAPTIVE 0x08 +#define EP_ATTR_SYNC 0x0c + +/* cs endpoint attributes */ +#define EP_CS_ATTR_SAMPLE_RATE 0x01 +#define EP_CS_ATTR_PITCH_CONTROL 0x02 +#define EP_CS_ATTR_FILL_MAX 0x80 + +/* Audio Class specific Request Codes */ +#define USB_AUDIO_SET_INTF 0x21 +#define USB_AUDIO_SET_ENDPOINT 0x22 +#define USB_AUDIO_GET_INTF 0xa1 +#define USB_AUDIO_GET_ENDPOINT 0xa2 + +#define SET_ 0x00 +#define GET_ 0x80 + +#define _CUR 0x1 +#define _MIN 0x2 +#define _MAX 0x3 +#define _RES 0x4 +#define _MEM 0x5 + +#define SET_CUR (SET_ | _CUR) +#define GET_CUR (GET_ | _CUR) +#define SET_MIN (SET_ | _MIN) +#define GET_MIN (GET_ | _MIN) +#define SET_MAX (SET_ | _MAX) +#define GET_MAX (GET_ | _MAX) +#define SET_RES (SET_ | _RES) +#define GET_RES (GET_ | _RES) +#define SET_MEM (SET_ | _MEM) +#define GET_MEM (GET_ | _MEM) + +#define GET_STAT 0xff + +#define USB_AC_TERMINAL_UNDEFINED 0x100 +#define USB_AC_TERMINAL_STREAMING 0x101 +#define USB_AC_TERMINAL_VENDOR_SPEC 0x1FF + +/* Terminal Control Selectors */ /* 4.3.2 Class-Specific AC Interface Descriptor */ struct usb_ac_header_descriptor { - __u8 bLength; /* 8+n */ + __u8 bLength; /* 8 + n */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* USB_MS_HEADER */ __le16 bcdADC; /* 0x0100 */ @@ -36,7 +101,7 @@ struct usb_ac_header_descriptor { __u8 baInterfaceNr[]; /* [n] */ } __attribute__ ((packed)); -#define USB_DT_AC_HEADER_SIZE(n) (8+(n)) +#define USB_DT_AC_HEADER_SIZE(n) (8 + (n)) /* As above, but more useful for defining your own descriptors: */ #define DECLARE_USB_AC_HEADER_DESCRIPTOR(n) \ @@ -50,4 +115,200 @@ struct usb_ac_header_descriptor_##n { \ __u8 baInterfaceNr[n]; \ } __attribute__ ((packed)) +/* 4.3.2.1 Input Terminal Descriptor */ +struct usb_input_terminal_descriptor { + __u8 bLength; /* in bytes: 12 */ + __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ + __u8 bDescriptorSubtype; /* INPUT_TERMINAL descriptor subtype */ + __u8 bTerminalID; /* Constant uniquely terminal ID */ + __le16 wTerminalType; /* USB Audio Terminal Types */ + __u8 bAssocTerminal; /* ID of the Output Terminal associated */ + __u8 bNrChannels; /* Number of logical output channels */ + __le16 wChannelConfig; + __u8 iChannelNames; + __u8 iTerminal; +} __attribute__ ((packed)); + +#define USB_DT_AC_INPUT_TERMINAL_SIZE 12 + +#define USB_AC_INPUT_TERMINAL_UNDEFINED 0x200 +#define USB_AC_INPUT_TERMINAL_MICROPHONE 0x201 +#define USB_AC_INPUT_TERMINAL_DESKTOP_MICROPHONE 0x202 +#define USB_AC_INPUT_TERMINAL_PERSONAL_MICROPHONE 0x203 +#define USB_AC_INPUT_TERMINAL_OMNI_DIR_MICROPHONE 0x204 +#define USB_AC_INPUT_TERMINAL_MICROPHONE_ARRAY 0x205 +#define USB_AC_INPUT_TERMINAL_PROC_MICROPHONE_ARRAY 0x206 + +/* 4.3.2.2 Output Terminal Descriptor */ +struct usb_output_terminal_descriptor { + __u8 bLength; /* in bytes: 9 */ + __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ + __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ + __u8 bTerminalID; /* Constant uniquely terminal ID */ + __le16 wTerminalType; /* USB Audio Terminal Types */ + __u8 bAssocTerminal; /* ID of the Input Terminal associated */ + __u8 bSourceID; /* ID of the connected Unit or Terminal*/ + __u8 iTerminal; +} __attribute__ ((packed)); + +#define USB_DT_AC_OUTPUT_TERMINAL_SIZE 9 + +#define USB_AC_OUTPUT_TERMINAL_UNDEFINED 0x300 +#define USB_AC_OUTPUT_TERMINAL_SPEAKER 0x301 +#define USB_AC_OUTPUT_TERMINAL_HEADPHONES 0x302 +#define USB_AC_OUTPUT_TERMINAL_HEAD_MOUNTED_DISPLAY_AUDIO 0x303 +#define USB_AC_OUTPUT_TERMINAL_DESKTOP_SPEAKER 0x304 +#define USB_AC_OUTPUT_TERMINAL_ROOM_SPEAKER 0x305 +#define USB_AC_OUTPUT_TERMINAL_COMMUNICATION_SPEAKER 0x306 +#define USB_AC_OUTPUT_TERMINAL_LOW_FREQ_EFFECTS_SPEAKER 0x307 + +/* Set bControlSize = 2 as default setting */ +#define USB_DT_AC_FEATURE_UNIT_SIZE(ch) (7 + ((ch) + 1) * 2) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct usb_ac_feature_unit_descriptor_##ch { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bUnitID; \ + __u8 bSourceID; \ + __u8 bControlSize; \ + __le16 bmaControls[ch + 1]; \ + __u8 iFeature; \ +} __attribute__ ((packed)) + +/* 4.5.2 Class-Specific AS Interface Descriptor */ +struct usb_as_header_descriptor { + __u8 bLength; /* in bytes: 7 */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* AS_GENERAL */ + __u8 bTerminalLink; /* Terminal ID of connected Terminal */ + __u8 bDelay; /* Delay introduced by the data path */ + __le16 wFormatTag; /* The Audio Data Format */ +} __attribute__ ((packed)); + +#define USB_DT_AS_HEADER_SIZE 7 + +#define USB_AS_AUDIO_FORMAT_TYPE_I_UNDEFINED 0x0 +#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM 0x1 +#define USB_AS_AUDIO_FORMAT_TYPE_I_PCM8 0x2 +#define USB_AS_AUDIO_FORMAT_TYPE_I_IEEE_FLOAT 0x3 +#define USB_AS_AUDIO_FORMAT_TYPE_I_ALAW 0x4 +#define USB_AS_AUDIO_FORMAT_TYPE_I_MULAW 0x5 + +struct usb_as_format_type_i_continuous_descriptor { + __u8 bLength; /* in bytes: 8 + (ns * 3) */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* FORMAT_TYPE */ + __u8 bFormatType; /* FORMAT_TYPE_1 */ + __u8 bNrChannels; /* physical channels in the stream */ + __u8 bSubframeSize; /* */ + __u8 bBitResolution; + __u8 bSamFreqType; + __u8 tLowerSamFreq[3]; + __u8 tUpperSamFreq[3]; +} __attribute__ ((packed)); + +#define USB_AS_FORMAT_TYPE_I_CONTINUOUS_DESC_SIZE 14 + +struct usb_as_formate_type_i_discrete_descriptor { + __u8 bLength; /* in bytes: 8 + (ns * 3) */ + __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ + __u8 bDescriptorSubtype; /* FORMAT_TYPE */ + __u8 bFormatType; /* FORMAT_TYPE_1 */ + __u8 bNrChannels; /* physical channels in the stream */ + __u8 bSubframeSize; /* */ + __u8 bBitResolution; + __u8 bSamFreqType; + __u8 tSamFreq[][3]; +} __attribute__ ((packed)); + +#define DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(n) \ +struct usb_as_formate_type_i_discrete_descriptor_##n { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bFormatType; \ + __u8 bNrChannels; \ + __u8 bSubframeSize; \ + __u8 bBitResolution; \ + __u8 bSamFreqType; \ + __u8 tSamFreq[n][3]; \ +} __attribute__ ((packed)) + +#define USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(n) (8 + (n * 3)) + +#define USB_AS_FORMAT_TYPE_UNDEFINED 0x0 +#define USB_AS_FORMAT_TYPE_I 0x1 +#define USB_AS_FORMAT_TYPE_II 0x2 +#define USB_AS_FORMAT_TYPE_III 0x3 + +#define USB_AS_ENDPOINT_ASYNC (1 << 2) +#define USB_AS_ENDPOINT_ADAPTIVE (2 << 2) +#define USB_AS_ENDPOINT_SYNC (3 << 2) + +struct usb_as_iso_endpoint_descriptor { + __u8 bLength; /* in bytes: 7 */ + __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ + __u8 bDescriptorSubtype; /* EP_GENERAL */ + __u8 bmAttributes; + __u8 bLockDelayUnits; + __le16 wLockDelay; +}; +#define USB_AS_ISO_ENDPOINT_DESC_SIZE 7 + +#define FU_CONTROL_UNDEFINED 0x00 +#define MUTE_CONTROL 0x01 +#define VOLUME_CONTROL 0x02 +#define BASS_CONTROL 0x03 +#define MID_CONTROL 0x04 +#define TREBLE_CONTROL 0x05 +#define GRAPHIC_EQUALIZER_CONTROL 0x06 +#define AUTOMATIC_GAIN_CONTROL 0x07 +#define DELAY_CONTROL 0x08 +#define BASS_BOOST_CONTROL 0x09 +#define LOUDNESS_CONTROL 0x0a + +#define FU_MUTE (1 << (MUTE_CONTROL - 1)) +#define FU_VOLUME (1 << (VOLUME_CONTROL - 1)) +#define FU_BASS (1 << (BASS_CONTROL - 1)) +#define FU_MID (1 << (MID_CONTROL - 1)) +#define FU_TREBLE (1 << (TREBLE_CONTROL - 1)) +#define FU_GRAPHIC_EQ (1 << (GRAPHIC_EQUALIZER_CONTROL - 1)) +#define FU_AUTO_GAIN (1 << (AUTOMATIC_GAIN_CONTROL - 1)) +#define FU_DELAY (1 << (DELAY_CONTROL - 1)) +#define FU_BASS_BOOST (1 << (BASS_BOOST_CONTROL - 1)) +#define FU_LOUDNESS (1 << (LOUDNESS_CONTROL - 1)) + +struct usb_audio_control { + struct list_head list; + const char *name; + u8 type; + int data[5]; + int (*set)(struct usb_audio_control *con, u8 cmd, int value); + int (*get)(struct usb_audio_control *con, u8 cmd); +}; + +static inline int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value) +{ + con->data[cmd] = value; + + return 0; +} + +static inline int generic_get_cmd(struct usb_audio_control *con, u8 cmd) +{ + return con->data[cmd]; +} + +struct usb_audio_control_selector { + struct list_head list; + struct list_head control; + u8 id; + const char *name; + u8 type; + struct usb_descriptor_header *desc; +}; + #endif /* __LINUX_USB_AUDIO_H */ diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 18a729343ff..c24124a42ce 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h @@ -9,12 +9,15 @@ #ifndef __LINUX_USB_CDC_H #define __LINUX_USB_CDC_H +#include <linux/types.h> + #define USB_CDC_SUBCLASS_ACM 0x02 #define USB_CDC_SUBCLASS_ETHERNET 0x06 #define USB_CDC_SUBCLASS_WHCM 0x08 #define USB_CDC_SUBCLASS_DMM 0x09 #define USB_CDC_SUBCLASS_MDLM 0x0a #define USB_CDC_SUBCLASS_OBEX 0x0b +#define USB_CDC_SUBCLASS_EEM 0x0c #define USB_CDC_PROTO_NONE 0 @@ -26,6 +29,8 @@ #define USB_CDC_ACM_PROTO_AT_CDMA 6 #define USB_CDC_ACM_PROTO_VENDOR 0xff +#define USB_CDC_PROTO_EEM 7 + /*-------------------------------------------------------------------------*/ /* diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 9b42baed390..93223638f70 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -102,7 +102,7 @@ #define USB_REQ_LOOPBACK_DATA_READ 0x16 #define USB_REQ_SET_INTERFACE_DS 0x17 -/* The Link Power Mangement (LPM) ECN defines USB_REQ_TEST_AND_SET command, +/* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command, * used by hubs to put ports into a new L1 suspend state, except that it * forgot to define its number ... */ @@ -191,6 +191,8 @@ struct usb_ctrlrequest { #define USB_DT_WIRE_ADAPTER 0x21 #define USB_DT_RPIPE 0x22 #define USB_DT_CS_RADIO_CONTROL 0x23 +/* From the USB 3.0 spec */ +#define USB_DT_SS_ENDPOINT_COMP 0x30 /* Conventional codes for class-specific descriptors. The convention is * defined in the USB "Common Class" Spec (3.11). Individual class specs @@ -353,6 +355,199 @@ struct usb_endpoint_descriptor { #define USB_ENDPOINT_XFER_INT 3 #define USB_ENDPOINT_MAX_ADJUSTABLE 0x80 +/*-------------------------------------------------------------------------*/ + +/** + * usb_endpoint_num - get the endpoint's number + * @epd: endpoint to be checked + * + * Returns @epd's number: 0 to 15. + */ +static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd) +{ + return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; +} + +/** + * usb_endpoint_type - get the endpoint's transfer type + * @epd: endpoint to be checked + * + * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according + * to @epd's transfer type. + */ +static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd) +{ + return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; +} + +/** + * usb_endpoint_dir_in - check if the endpoint has IN direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type IN, otherwise it returns false. + */ +static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN); +} + +/** + * usb_endpoint_dir_out - check if the endpoint has OUT direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type OUT, otherwise it returns false. + */ +static inline int usb_endpoint_dir_out( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); +} + +/** + * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type bulk, otherwise it returns false. + */ +static inline int usb_endpoint_xfer_bulk( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK); +} + +/** + * usb_endpoint_xfer_control - check if the endpoint has control transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type control, otherwise it returns false. + */ +static inline int usb_endpoint_xfer_control( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_CONTROL); +} + +/** + * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type interrupt, otherwise it returns + * false. + */ +static inline int usb_endpoint_xfer_int( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_INT); +} + +/** + * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type isochronous, otherwise it returns + * false. + */ +static inline int usb_endpoint_xfer_isoc( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_ISOC); +} + +/** + * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has bulk transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_bulk_in( + const struct usb_endpoint_descriptor *epd) +{ + return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd)); +} + +/** + * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has bulk transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_bulk_out( + const struct usb_endpoint_descriptor *epd) +{ + return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd)); +} + +/** + * usb_endpoint_is_int_in - check if the endpoint is interrupt IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has interrupt transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_int_in( + const struct usb_endpoint_descriptor *epd) +{ + return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd)); +} + +/** + * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has interrupt transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_int_out( + const struct usb_endpoint_descriptor *epd) +{ + return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd)); +} + +/** + * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has isochronous transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_isoc_in( + const struct usb_endpoint_descriptor *epd) +{ + return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd)); +} + +/** + * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has isochronous transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_isoc_out( + const struct usb_endpoint_descriptor *epd) +{ + return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd)); +} + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */ +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bMaxBurst; + __u8 bmAttributes; + __u16 wBytesPerInterval; +} __attribute__ ((packed)); + +#define USB_DT_SS_EP_COMP_SIZE 6 /*-------------------------------------------------------------------------*/ @@ -573,6 +768,7 @@ enum usb_device_speed { USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */ USB_SPEED_HIGH, /* usb 2.0 */ USB_SPEED_VARIABLE, /* wireless (usb 2.5) */ + USB_SPEED_SUPER, /* usb 3.0 */ }; enum usb_device_state { @@ -584,8 +780,8 @@ enum usb_device_state { /* chapter 9 and authentication (wireless) device states */ USB_STATE_ATTACHED, USB_STATE_POWERED, /* wired */ - USB_STATE_UNAUTHENTICATED, /* auth */ USB_STATE_RECONNECTING, /* auth */ + USB_STATE_UNAUTHENTICATED, /* auth */ USB_STATE_DEFAULT, /* limited function */ USB_STATE_ADDRESS, USB_STATE_CONFIGURED, /* most functions */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 935c380ffe4..4f6bb3d2160 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -124,6 +124,7 @@ struct usb_function { void (*suspend)(struct usb_function *); void (*resume)(struct usb_function *); + /* private: */ /* internals */ struct list_head list; }; @@ -219,6 +220,7 @@ struct usb_configuration { struct usb_composite_dev *cdev; + /* private: */ /* internals */ struct list_head list; struct list_head functions; @@ -244,6 +246,10 @@ int usb_add_config(struct usb_composite_dev *, * value; it should return zero on successful initialization. * @unbind: Reverses @bind(); called as a side effect of unregistering * this driver. + * @suspend: Notifies when the host stops sending USB traffic, + * after function notifications + * @resume: Notifies configuration when the host restarts USB traffic, + * before function notifications * * Devices default to reporting self powered operation. Devices which rely * on bus powered operation should report this in their @bind() method. @@ -268,6 +274,10 @@ struct usb_composite_driver { int (*bind)(struct usb_composite_dev *); int (*unbind)(struct usb_composite_dev *); + + /* global suspend hooks */ + void (*suspend)(struct usb_composite_dev *); + void (*resume)(struct usb_composite_dev *); }; extern int usb_composite_register(struct usb_composite_driver *); @@ -313,6 +323,7 @@ struct usb_composite_dev { struct usb_configuration *config; + /* private: */ /* internals */ struct usb_device_descriptor desc; struct list_head configs; diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 0460a746480..bbf45d500b6 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -598,6 +598,7 @@ static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) /** * usb_gadget_vbus_connect - Notify controller that VBUS is powered * @gadget:The device which now has VBUS power. + * Context: can sleep * * This call is used by a driver for an external transceiver (or GPIO) * that detects a VBUS power session starting. Common responses include @@ -636,6 +637,7 @@ static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) /** * usb_gadget_vbus_disconnect - notify controller about VBUS session end * @gadget:the device whose VBUS supply is being described + * Context: can sleep * * This call is used by a driver for an external transceiver (or GPIO) * that detects a VBUS power session ending. Common responses include @@ -792,19 +794,20 @@ struct usb_gadget_driver { /** * usb_gadget_register_driver - register a gadget driver * @driver:the driver being registered + * Context: can sleep * * Call this in your gadget driver's module initialization function, * to tell the underlying usb controller driver about your driver. * The driver's bind() function will be called to bind it to a * gadget before this registration call returns. It's expected that * the bind() functions will be in init sections. - * This function must be called in a context that can sleep. */ int usb_gadget_register_driver(struct usb_gadget_driver *driver); /** * usb_gadget_unregister_driver - unregister a gadget driver * @driver:the driver being unregistered + * Context: can sleep * * Call this in your gadget driver's module cleanup function, * to tell the underlying usb controller that your driver is @@ -813,7 +816,6 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver); * to unbind() and clean up any device state, before this procedure * finally returns. It's expected that the unbind() functions * will in in exit sections, so may not be linked in some kernels. - * This function must be called in a context that can sleep. */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); diff --git a/include/linux/usb/gadgetfs.h b/include/linux/usb/gadgetfs.h index ea45f265ec0..612102e4d75 100644 --- a/include/linux/usb/gadgetfs.h +++ b/include/linux/usb/gadgetfs.h @@ -18,7 +18,7 @@ #ifndef __LINUX_USB_GADGETFS_H #define __LINUX_USB_GADGETFS_H -#include <asm/types.h> +#include <linux/types.h> #include <asm/ioctl.h> #include <linux/usb/ch9.h> diff --git a/include/linux/usb/langwell_udc.h b/include/linux/usb/langwell_udc.h new file mode 100644 index 00000000000..c949178a653 --- /dev/null +++ b/include/linux/usb/langwell_udc.h @@ -0,0 +1,310 @@ +/* + * Intel Langwell USB Device Controller driver + * Copyright (C) 2008-2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef __LANGWELL_UDC_H +#define __LANGWELL_UDC_H + + +/* MACRO defines */ +#define CAP_REG_OFFSET 0x0 +#define OP_REG_OFFSET 0x28 + +#define DMA_ADDR_INVALID (~(dma_addr_t)0) + +#define DQH_ALIGNMENT 2048 +#define DTD_ALIGNMENT 64 +#define DMA_BOUNDARY 4096 + +#define EP0_MAX_PKT_SIZE 64 +#define EP_DIR_IN 1 +#define EP_DIR_OUT 0 + +#define FLUSH_TIMEOUT 1000 +#define RESET_TIMEOUT 1000 +#define SETUPSTAT_TIMEOUT 100 +#define PRIME_TIMEOUT 100 + + +/* device memory space registers */ + +/* Capability Registers, BAR0 + CAP_REG_OFFSET */ +struct langwell_cap_regs { + /* offset: 0x0 */ + u8 caplength; /* offset of Operational Register */ + u8 _reserved3; + u16 hciversion; /* H: BCD encoding of host version */ + u32 hcsparams; /* H: host port steering logic capability */ + u32 hccparams; /* H: host multiple mode control capability */ +#define HCC_LEN BIT(17) /* Link power management (LPM) capability */ + u8 _reserved4[0x20-0xc]; + /* offset: 0x20 */ + u16 dciversion; /* BCD encoding of device version */ + u8 _reserved5[0x24-0x22]; + u32 dccparams; /* overall device controller capability */ +#define HOSTCAP BIT(8) /* host capable */ +#define DEVCAP BIT(7) /* device capable */ +#define DEN(d) \ + (((d)>>0)&0x1f) /* bits 4:0, device endpoint number */ +} __attribute__ ((packed)); + + +/* Operational Registers, BAR0 + OP_REG_OFFSET */ +struct langwell_op_regs { + /* offset: 0x28 */ + u32 extsts; +#define EXTS_TI1 BIT(4) /* general purpose timer interrupt 1 */ +#define EXTS_TI1TI0 BIT(3) /* general purpose timer interrupt 0 */ +#define EXTS_TI1UPI BIT(2) /* USB host periodic interrupt */ +#define EXTS_TI1UAI BIT(1) /* USB host asynchronous interrupt */ +#define EXTS_TI1NAKI BIT(0) /* NAK interrupt */ + u32 extintr; +#define EXTI_TIE1 BIT(4) /* general purpose timer interrupt enable 1 */ +#define EXTI_TIE0 BIT(3) /* general purpose timer interrupt enable 0 */ +#define EXTI_UPIE BIT(2) /* USB host periodic interrupt enable */ +#define EXTI_UAIE BIT(1) /* USB host asynchronous interrupt enable */ +#define EXTI_NAKE BIT(0) /* NAK interrupt enable */ + /* offset: 0x30 */ + u32 usbcmd; +#define CMD_HIRD(u) \ + (((u)>>24)&0xf) /* bits 27:24, host init resume duration */ +#define CMD_ITC(u) \ + (((u)>>16)&0xff) /* bits 23:16, interrupt threshold control */ +#define CMD_PPE BIT(15) /* per-port change events enable */ +#define CMD_ATDTW BIT(14) /* add dTD tripwire */ +#define CMD_SUTW BIT(13) /* setup tripwire */ +#define CMD_ASPE BIT(11) /* asynchronous schedule park mode enable */ +#define CMD_FS2 BIT(10) /* frame list size */ +#define CMD_ASP1 BIT(9) /* asynchronous schedule park mode count */ +#define CMD_ASP0 BIT(8) +#define CMD_LR BIT(7) /* light host/device controller reset */ +#define CMD_IAA BIT(6) /* interrupt on async advance doorbell */ +#define CMD_ASE BIT(5) /* asynchronous schedule enable */ +#define CMD_PSE BIT(4) /* periodic schedule enable */ +#define CMD_FS1 BIT(3) +#define CMD_FS0 BIT(2) +#define CMD_RST BIT(1) /* controller reset */ +#define CMD_RUNSTOP BIT(0) /* run/stop */ + u32 usbsts; +#define STS_PPCI(u) \ + (((u)>>16)&0xffff) /* bits 31:16, port-n change detect */ +#define STS_AS BIT(15) /* asynchronous schedule status */ +#define STS_PS BIT(14) /* periodic schedule status */ +#define STS_RCL BIT(13) /* reclamation */ +#define STS_HCH BIT(12) /* HC halted */ +#define STS_ULPII BIT(10) /* ULPI interrupt */ +#define STS_SLI BIT(8) /* DC suspend */ +#define STS_SRI BIT(7) /* SOF received */ +#define STS_URI BIT(6) /* USB reset received */ +#define STS_AAI BIT(5) /* interrupt on async advance */ +#define STS_SEI BIT(4) /* system error */ +#define STS_FRI BIT(3) /* frame list rollover */ +#define STS_PCI BIT(2) /* port change detect */ +#define STS_UEI BIT(1) /* USB error interrupt */ +#define STS_UI BIT(0) /* USB interrupt */ + u32 usbintr; +/* bits 31:16, per-port interrupt enable */ +#define INTR_PPCE(u) (((u)>>16)&0xffff) +#define INTR_ULPIE BIT(10) /* ULPI enable */ +#define INTR_SLE BIT(8) /* DC sleep/suspend enable */ +#define INTR_SRE BIT(7) /* SOF received enable */ +#define INTR_URE BIT(6) /* USB reset enable */ +#define INTR_AAE BIT(5) /* interrupt on async advance enable */ +#define INTR_SEE BIT(4) /* system error enable */ +#define INTR_FRE BIT(3) /* frame list rollover enable */ +#define INTR_PCE BIT(2) /* port change detect enable */ +#define INTR_UEE BIT(1) /* USB error interrupt enable */ +#define INTR_UE BIT(0) /* USB interrupt enable */ + u32 frindex; /* frame index */ +#define FRINDEX_MASK (0x3fff << 0) + u32 ctrldssegment; /* not used */ + u32 deviceaddr; +#define USBADR_SHIFT 25 +#define USBADR(d) \ + (((d)>>25)&0x7f) /* bits 31:25, device address */ +#define USBADR_MASK (0x7f << 25) +#define USBADRA BIT(24) /* device address advance */ + u32 endpointlistaddr;/* endpoint list top memory address */ +/* bits 31:11, endpoint list pointer */ +#define EPBASE(d) (((d)>>11)&0x1fffff) +#define ENDPOINTLISTADDR_MASK (0x1fffff << 11) + u32 ttctrl; /* H: TT operatin, not used */ + /* offset: 0x50 */ + u32 burstsize; /* burst size of data movement */ +#define TXPBURST(b) \ + (((b)>>8)&0xff) /* bits 15:8, TX burst length */ +#define RXPBURST(b) \ + (((b)>>0)&0xff) /* bits 7:0, RX burst length */ + u32 txfilltuning; /* TX tuning */ + u32 txttfilltuning; /* H: TX TT tuning */ + u32 ic_usb; /* control the IC_USB FS/LS transceiver */ + /* offset: 0x60 */ + u32 ulpi_viewport; /* indirect access to ULPI PHY */ +#define ULPIWU BIT(31) /* ULPI wakeup */ +#define ULPIRUN BIT(30) /* ULPI read/write run */ +#define ULPIRW BIT(29) /* ULPI read/write control */ +#define ULPISS BIT(27) /* ULPI sync state */ +#define ULPIPORT(u) \ + (((u)>>24)&7) /* bits 26:24, ULPI port number */ +#define ULPIADDR(u) \ + (((u)>>16)&0xff) /* bits 23:16, ULPI data address */ +#define ULPIDATRD(u) \ + (((u)>>8)&0xff) /* bits 15:8, ULPI data read */ +#define ULPIDATWR(u) \ + (((u)>>0)&0xff) /* bits 7:0, ULPI date write */ + u8 _reserved6[0x70-0x64]; + /* offset: 0x70 */ + u32 configflag; /* H: not used */ + u32 portsc1; /* port status */ +#define DA(p) \ + (((p)>>25)&0x7f) /* bits 31:25, device address */ +#define PORTS_SSTS (BIT(24) | BIT(23)) /* suspend status */ +#define PORTS_WKOC BIT(22) /* wake on over-current enable */ +#define PORTS_WKDS BIT(21) /* wake on disconnect enable */ +#define PORTS_WKCN BIT(20) /* wake on connect enable */ +#define PORTS_PTC(p) (((p)>>16)&0xf) /* bits 19:16, port test control */ +#define PORTS_PIC (BIT(15) | BIT(14)) /* port indicator control */ +#define PORTS_PO BIT(13) /* port owner */ +#define PORTS_PP BIT(12) /* port power */ +#define PORTS_LS (BIT(11) | BIT(10)) /* line status */ +#define PORTS_SLP BIT(9) /* suspend using L1 */ +#define PORTS_PR BIT(8) /* port reset */ +#define PORTS_SUSP BIT(7) /* suspend */ +#define PORTS_FPR BIT(6) /* force port resume */ +#define PORTS_OCC BIT(5) /* over-current change */ +#define PORTS_OCA BIT(4) /* over-current active */ +#define PORTS_PEC BIT(3) /* port enable/disable change */ +#define PORTS_PE BIT(2) /* port enable/disable */ +#define PORTS_CSC BIT(1) /* connect status change */ +#define PORTS_CCS BIT(0) /* current connect status */ + u8 _reserved7[0xb4-0x78]; + /* offset: 0xb4 */ + u32 devlc; /* control LPM and each USB port behavior */ +/* bits 31:29, parallel transceiver select */ +#define LPM_PTS(d) (((d)>>29)&7) +#define LPM_STS BIT(28) /* serial transceiver select */ +#define LPM_PTW BIT(27) /* parallel transceiver width */ +#define LPM_PSPD(d) (((d)>>25)&3) /* bits 26:25, port speed */ +#define LPM_PSPD_MASK (BIT(26) | BIT(25)) +#define LPM_SPEED_FULL 0 +#define LPM_SPEED_LOW 1 +#define LPM_SPEED_HIGH 2 +#define LPM_SRT BIT(24) /* shorten reset time */ +#define LPM_PFSC BIT(23) /* port force full speed connect */ +#define LPM_PHCD BIT(22) /* PHY low power suspend clock disable */ +#define LPM_STL BIT(16) /* STALL reply to LPM token */ +#define LPM_BA(d) \ + (((d)>>1)&0x7ff) /* bits 11:1, BmAttributes */ +#define LPM_NYT_ACK BIT(0) /* NYET/ACK reply to LPM token */ + u8 _reserved8[0xf4-0xb8]; + /* offset: 0xf4 */ + u32 otgsc; /* On-The-Go status and control */ +#define OTGSC_DPIE BIT(30) /* data pulse interrupt enable */ +#define OTGSC_MSE BIT(29) /* 1 ms timer interrupt enable */ +#define OTGSC_BSEIE BIT(28) /* B session end interrupt enable */ +#define OTGSC_BSVIE BIT(27) /* B session valid interrupt enable */ +#define OTGSC_ASVIE BIT(26) /* A session valid interrupt enable */ +#define OTGSC_AVVIE BIT(25) /* A VBUS valid interrupt enable */ +#define OTGSC_IDIE BIT(24) /* USB ID interrupt enable */ +#define OTGSC_DPIS BIT(22) /* data pulse interrupt status */ +#define OTGSC_MSS BIT(21) /* 1 ms timer interrupt status */ +#define OTGSC_BSEIS BIT(20) /* B session end interrupt status */ +#define OTGSC_BSVIS BIT(19) /* B session valid interrupt status */ +#define OTGSC_ASVIS BIT(18) /* A session valid interrupt status */ +#define OTGSC_AVVIS BIT(17) /* A VBUS valid interrupt status */ +#define OTGSC_IDIS BIT(16) /* USB ID interrupt status */ +#define OTGSC_DPS BIT(14) /* data bus pulsing status */ +#define OTGSC_MST BIT(13) /* 1 ms timer toggle */ +#define OTGSC_BSE BIT(12) /* B session end */ +#define OTGSC_BSV BIT(11) /* B session valid */ +#define OTGSC_ASV BIT(10) /* A session valid */ +#define OTGSC_AVV BIT(9) /* A VBUS valid */ +#define OTGSC_USBID BIT(8) /* USB ID */ +#define OTGSC_HABA BIT(7) /* hw assist B-disconnect to A-connect */ +#define OTGSC_HADP BIT(6) /* hw assist data pulse */ +#define OTGSC_IDPU BIT(5) /* ID pullup */ +#define OTGSC_DP BIT(4) /* data pulsing */ +#define OTGSC_OT BIT(3) /* OTG termination */ +#define OTGSC_HAAR BIT(2) /* hw assist auto reset */ +#define OTGSC_VC BIT(1) /* VBUS charge */ +#define OTGSC_VD BIT(0) /* VBUS discharge */ + u32 usbmode; +#define MODE_VBPS BIT(5) /* R/W VBUS power select */ +#define MODE_SDIS BIT(4) /* R/W stream disable mode */ +#define MODE_SLOM BIT(3) /* R/W setup lockout mode */ +#define MODE_ENSE BIT(2) /* endian select */ +#define MODE_CM(u) (((u)>>0)&3) /* bits 1:0, controller mode */ +#define MODE_IDLE 0 +#define MODE_DEVICE 2 +#define MODE_HOST 3 + u8 _reserved9[0x100-0xfc]; + /* offset: 0x100 */ + u32 endptnak; +#define EPTN(e) \ + (((e)>>16)&0xffff) /* bits 31:16, TX endpoint NAK */ +#define EPRN(e) \ + (((e)>>0)&0xffff) /* bits 15:0, RX endpoint NAK */ + u32 endptnaken; +#define EPTNE(e) \ + (((e)>>16)&0xffff) /* bits 31:16, TX endpoint NAK enable */ +#define EPRNE(e) \ + (((e)>>0)&0xffff) /* bits 15:0, RX endpoint NAK enable */ + u32 endptsetupstat; +#define SETUPSTAT_MASK (0xffff << 0) /* bits 15:0 */ +#define EP0SETUPSTAT_MASK 1 + u32 endptprime; +/* bits 31:16, prime endpoint transmit buffer */ +#define PETB(e) (((e)>>16)&0xffff) +/* bits 15:0, prime endpoint receive buffer */ +#define PERB(e) (((e)>>0)&0xffff) + /* offset: 0x110 */ + u32 endptflush; +/* bits 31:16, flush endpoint transmit buffer */ +#define FETB(e) (((e)>>16)&0xffff) +/* bits 15:0, flush endpoint receive buffer */ +#define FERB(e) (((e)>>0)&0xffff) + u32 endptstat; +/* bits 31:16, endpoint transmit buffer ready */ +#define ETBR(e) (((e)>>16)&0xffff) +/* bits 15:0, endpoint receive buffer ready */ +#define ERBR(e) (((e)>>0)&0xffff) + u32 endptcomplete; +/* bits 31:16, endpoint transmit complete event */ +#define ETCE(e) (((e)>>16)&0xffff) +/* bits 15:0, endpoint receive complete event */ +#define ERCE(e) (((e)>>0)&0xffff) + /* offset: 0x11c */ + u32 endptctrl[16]; +#define EPCTRL_TXE BIT(23) /* TX endpoint enable */ +#define EPCTRL_TXR BIT(22) /* TX data toggle reset */ +#define EPCTRL_TXI BIT(21) /* TX data toggle inhibit */ +#define EPCTRL_TXT(e) (((e)>>18)&3) /* bits 19:18, TX endpoint type */ +#define EPCTRL_TXT_SHIFT 18 +#define EPCTRL_TXD BIT(17) /* TX endpoint data source */ +#define EPCTRL_TXS BIT(16) /* TX endpoint STALL */ +#define EPCTRL_RXE BIT(7) /* RX endpoint enable */ +#define EPCTRL_RXR BIT(6) /* RX data toggle reset */ +#define EPCTRL_RXI BIT(5) /* RX data toggle inhibit */ +#define EPCTRL_RXT(e) (((e)>>2)&3) /* bits 3:2, RX endpoint type */ +#define EPCTRL_RXT_SHIFT 2 /* bits 19:18, TX endpoint type */ +#define EPCTRL_RXD BIT(1) /* RX endpoint data sink */ +#define EPCTRL_RXS BIT(0) /* RX endpoint STALL */ +} __attribute__ ((packed)); + +#endif /* __LANGWELL_UDC_H */ + diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index d6aad0ea603..d4375566926 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h @@ -7,6 +7,9 @@ * key configuration differences between boards. */ +#ifndef __LINUX_USB_MUSB_H +#define __LINUX_USB_MUSB_H + /* The USB role is defined by the connector used on the board, so long as * standards are being followed. (Developer boards sometimes won't.) */ @@ -101,3 +104,5 @@ extern int __init tusb6010_setup_interface( extern int tusb6010_platform_retime(unsigned is_refclk); #endif /* OMAP2 */ + +#endif /* __LINUX_USB_MUSB_H */ diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 94df4fe6c6c..2443c0e7a80 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -81,11 +81,16 @@ struct otg_transceiver { /* for board-specific init logic */ extern int otg_set_transceiver(struct otg_transceiver *); +/* sometimes transceivers are accessed only through e.g. ULPI */ +extern void usb_nop_xceiv_register(void); +extern void usb_nop_xceiv_unregister(void); + /* for usb host and peripheral controller drivers */ extern struct otg_transceiver *otg_get_transceiver(void); extern void otg_put_transceiver(struct otg_transceiver *); +/* Context: can sleep */ static inline int otg_start_hnp(struct otg_transceiver *otg) { @@ -102,6 +107,8 @@ otg_set_host(struct otg_transceiver *otg, struct usb_bus *host) /* for usb peripheral controller drivers */ + +/* Context: can sleep */ static inline int otg_set_peripheral(struct otg_transceiver *otg, struct usb_gadget *periph) { @@ -114,6 +121,7 @@ otg_set_power(struct otg_transceiver *otg, unsigned mA) return otg->set_power(otg, mA); } +/* Context: can sleep */ static inline int otg_set_suspend(struct otg_transceiver *otg, int suspend) { diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 7f6c603db65..2526f3bbd27 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -16,4 +16,7 @@ /* device can't handle Set-Interface requests */ #define USB_QUIRK_NO_SET_INTF 0x00000004 +/* device can't handle its Configuration or Interface strings */ +#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008 + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h new file mode 100644 index 00000000000..e9f0384fa20 --- /dev/null +++ b/include/linux/usb/r8a66597.h @@ -0,0 +1,44 @@ +/* + * R8A66597 driver platform data + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __LINUX_USB_R8A66597_H +#define __LINUX_USB_R8A66597_H + +#define R8A66597_PLATDATA_XTAL_12MHZ 0x01 +#define R8A66597_PLATDATA_XTAL_24MHZ 0x02 +#define R8A66597_PLATDATA_XTAL_48MHZ 0x03 + +struct r8a66597_platdata { + /* This ops can controll port power instead of DVSTCTR register. */ + void (*port_power)(int port, int power); + + /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */ + unsigned xtal:2; + + /* set one = 3.3V, set zero = 1.5V */ + unsigned vif:1; + + /* set one = big endian, set zero = little endian */ + unsigned endian:1; +}; +#endif + diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h index 0a6e6d4b929..1ef1ebc2b04 100644 --- a/include/linux/usb/rndis_host.h +++ b/include/linux/usb/rndis_host.h @@ -49,48 +49,46 @@ struct rndis_msg_hdr { */ #define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000) - -#define ccpu2 __constant_cpu_to_le32 - -#define RNDIS_MSG_COMPLETION ccpu2(0x80000000) +#define RNDIS_MSG_COMPLETION cpu_to_le32(0x80000000) /* codes for "msg_type" field of rndis messages; * only the data channel uses packet messages (maybe batched); * everything else goes on the control channel. */ -#define RNDIS_MSG_PACKET ccpu2(0x00000001) /* 1-N packets */ -#define RNDIS_MSG_INIT ccpu2(0x00000002) +#define RNDIS_MSG_PACKET cpu_to_le32(0x00000001) /* 1-N packets */ +#define RNDIS_MSG_INIT cpu_to_le32(0x00000002) #define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_HALT ccpu2(0x00000003) -#define RNDIS_MSG_QUERY ccpu2(0x00000004) +#define RNDIS_MSG_HALT cpu_to_le32(0x00000003) +#define RNDIS_MSG_QUERY cpu_to_le32(0x00000004) #define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_SET ccpu2(0x00000005) +#define RNDIS_MSG_SET cpu_to_le32(0x00000005) #define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_RESET ccpu2(0x00000006) +#define RNDIS_MSG_RESET cpu_to_le32(0x00000006) #define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION) -#define RNDIS_MSG_INDICATE ccpu2(0x00000007) -#define RNDIS_MSG_KEEPALIVE ccpu2(0x00000008) +#define RNDIS_MSG_INDICATE cpu_to_le32(0x00000007) +#define RNDIS_MSG_KEEPALIVE cpu_to_le32(0x00000008) #define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION) /* codes for "status" field of completion messages */ -#define RNDIS_STATUS_SUCCESS ccpu2(0x00000000) -#define RNDIS_STATUS_FAILURE ccpu2(0xc0000001) -#define RNDIS_STATUS_INVALID_DATA ccpu2(0xc0010015) -#define RNDIS_STATUS_NOT_SUPPORTED ccpu2(0xc00000bb) -#define RNDIS_STATUS_MEDIA_CONNECT ccpu2(0x4001000b) -#define RNDIS_STATUS_MEDIA_DISCONNECT ccpu2(0x4001000c) +#define RNDIS_STATUS_SUCCESS cpu_to_le32(0x00000000) +#define RNDIS_STATUS_FAILURE cpu_to_le32(0xc0000001) +#define RNDIS_STATUS_INVALID_DATA cpu_to_le32(0xc0010015) +#define RNDIS_STATUS_NOT_SUPPORTED cpu_to_le32(0xc00000bb) +#define RNDIS_STATUS_MEDIA_CONNECT cpu_to_le32(0x4001000b) +#define RNDIS_STATUS_MEDIA_DISCONNECT cpu_to_le32(0x4001000c) +#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION cpu_to_le32(0x40010012) /* codes for OID_GEN_PHYSICAL_MEDIUM */ -#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED ccpu2(0x00000000) -#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN ccpu2(0x00000001) -#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM ccpu2(0x00000002) -#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE ccpu2(0x00000003) -#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE ccpu2(0x00000004) -#define RNDIS_PHYSICAL_MEDIUM_DSL ccpu2(0x00000005) -#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL ccpu2(0x00000006) -#define RNDIS_PHYSICAL_MEDIUM_1394 ccpu2(0x00000007) -#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN ccpu2(0x00000008) -#define RNDIS_PHYSICAL_MEDIUM_MAX ccpu2(0x00000009) +#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED cpu_to_le32(0x00000000) +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN cpu_to_le32(0x00000001) +#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM cpu_to_le32(0x00000002) +#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE cpu_to_le32(0x00000003) +#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE cpu_to_le32(0x00000004) +#define RNDIS_PHYSICAL_MEDIUM_DSL cpu_to_le32(0x00000005) +#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL cpu_to_le32(0x00000006) +#define RNDIS_PHYSICAL_MEDIUM_1394 cpu_to_le32(0x00000007) +#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN cpu_to_le32(0x00000008) +#define RNDIS_PHYSICAL_MEDIUM_MAX cpu_to_le32(0x00000009) struct rndis_data_hdr { __le32 msg_type; /* RNDIS_MSG_PACKET */ @@ -228,24 +226,24 @@ struct rndis_keepalive_c { /* IN (optionally OUT) */ * there are gobs more that may optionally be supported. We'll avoid as much * of that mess as possible. */ -#define OID_802_3_PERMANENT_ADDRESS ccpu2(0x01010101) -#define OID_GEN_MAXIMUM_FRAME_SIZE ccpu2(0x00010106) -#define OID_GEN_CURRENT_PACKET_FILTER ccpu2(0x0001010e) -#define OID_GEN_PHYSICAL_MEDIUM ccpu2(0x00010202) +#define OID_802_3_PERMANENT_ADDRESS cpu_to_le32(0x01010101) +#define OID_GEN_MAXIMUM_FRAME_SIZE cpu_to_le32(0x00010106) +#define OID_GEN_CURRENT_PACKET_FILTER cpu_to_le32(0x0001010e) +#define OID_GEN_PHYSICAL_MEDIUM cpu_to_le32(0x00010202) /* packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */ -#define RNDIS_PACKET_TYPE_DIRECTED ccpu2(0x00000001) -#define RNDIS_PACKET_TYPE_MULTICAST ccpu2(0x00000002) -#define RNDIS_PACKET_TYPE_ALL_MULTICAST ccpu2(0x00000004) -#define RNDIS_PACKET_TYPE_BROADCAST ccpu2(0x00000008) -#define RNDIS_PACKET_TYPE_SOURCE_ROUTING ccpu2(0x00000010) -#define RNDIS_PACKET_TYPE_PROMISCUOUS ccpu2(0x00000020) -#define RNDIS_PACKET_TYPE_SMT ccpu2(0x00000040) -#define RNDIS_PACKET_TYPE_ALL_LOCAL ccpu2(0x00000080) -#define RNDIS_PACKET_TYPE_GROUP ccpu2(0x00001000) -#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL ccpu2(0x00002000) -#define RNDIS_PACKET_TYPE_FUNCTIONAL ccpu2(0x00004000) -#define RNDIS_PACKET_TYPE_MAC_FRAME ccpu2(0x00008000) +#define RNDIS_PACKET_TYPE_DIRECTED cpu_to_le32(0x00000001) +#define RNDIS_PACKET_TYPE_MULTICAST cpu_to_le32(0x00000002) +#define RNDIS_PACKET_TYPE_ALL_MULTICAST cpu_to_le32(0x00000004) +#define RNDIS_PACKET_TYPE_BROADCAST cpu_to_le32(0x00000008) +#define RNDIS_PACKET_TYPE_SOURCE_ROUTING cpu_to_le32(0x00000010) +#define RNDIS_PACKET_TYPE_PROMISCUOUS cpu_to_le32(0x00000020) +#define RNDIS_PACKET_TYPE_SMT cpu_to_le32(0x00000040) +#define RNDIS_PACKET_TYPE_ALL_LOCAL cpu_to_le32(0x00000080) +#define RNDIS_PACKET_TYPE_GROUP cpu_to_le32(0x00001000) +#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL cpu_to_le32(0x00002000) +#define RNDIS_PACKET_TYPE_FUNCTIONAL cpu_to_le32(0x00004000) +#define RNDIS_PACKET_TYPE_MAC_FRAME cpu_to_le32(0x00008000) /* default filter used with RNDIS devices */ #define RNDIS_DEFAULT_FILTER ( \ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 0b8617a9176..0ec50ba6213 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -15,6 +15,7 @@ #include <linux/kref.h> #include <linux/mutex.h> +#include <linux/sysrq.h> #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ @@ -26,10 +27,17 @@ /* parity check flag */ #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) +enum port_dev_state { + PORT_UNREGISTERED, + PORT_REGISTERING, + PORT_REGISTERED, + PORT_UNREGISTERING, +}; + /** * usb_serial_port: structure for the specific ports of a device. * @serial: pointer back to the struct usb_serial owner of this port. - * @tty: pointer to the corresponding tty for this port. + * @port: pointer to the corresponding tty_port for this port. * @lock: spinlock to grab when updating portions of this structure. * @mutex: mutex used to synchronize serial_open() and serial_close() * access for this port. @@ -44,19 +52,22 @@ * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe * for this port. * @bulk_in_buffer: pointer to the bulk in buffer for this port. + * @bulk_in_size: the size of the bulk_in_buffer, in bytes. * @read_urb: pointer to the bulk in struct urb for this port. * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this * port. * @bulk_out_buffer: pointer to the bulk out buffer for this port. * @bulk_out_size: the size of the bulk_out_buffer, in bytes. * @write_urb: pointer to the bulk out struct urb for this port. + * @write_urb_busy: port`s writing status * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this * port. * @write_wait: a wait_queue_head_t used by the port. * @work: work queue entry for the line discipline waking up. - * @open_count: number of times this port has been opened. * @throttled: nonzero if the read urb is inactive to throttle the device * @throttle_req: nonzero if the tty wants to throttle us + * @console: attached usb serial console + * @dev: pointer to the serial device * * This structure is used by the usb-serial core and drivers for the specific * ports of a device. @@ -88,12 +99,17 @@ struct usb_serial_port { int write_urb_busy; __u8 bulk_out_endpointAddress; + int tx_bytes_flight; + int urbs_in_flight; + wait_queue_head_t write_wait; struct work_struct work; char throttled; char throttle_req; char console; + unsigned long sysrq; /* sysrq timeout */ struct device dev; + enum port_dev_state dev_state; }; #define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev) @@ -130,7 +146,8 @@ struct usb_serial { struct usb_device *dev; struct usb_serial_driver *type; struct usb_interface *interface; - unsigned char disconnected; + unsigned char disconnected:1; + unsigned char suspending:1; unsigned char minor; unsigned char num_ports; unsigned char num_port_pointers; @@ -177,8 +194,10 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data) * This will be called when the struct usb_serial structure is fully set * set up. Do any local initialization of the device, or any private * memory structure allocation at this point in time. - * @shutdown: pointer to the driver's shutdown function. This will be - * called when the device is removed from the system. + * @disconnect: pointer to the driver's disconnect function. This will be + * called when the device is unplugged or unbound from the driver. + * @release: pointer to the driver's release function. This will be called + * when the usb_serial data structure is about to be destroyed. * @usb_driver: pointer to the struct usb_driver that controls this * device. This is necessary to allow dynamic ids to be added to * the driver from sysfs. @@ -203,12 +222,14 @@ struct usb_serial_driver { struct device_driver driver; struct usb_driver *usb_driver; struct usb_dynids dynids; + int max_in_flight_urbs; int (*probe)(struct usb_serial *serial, const struct usb_device_id *id); int (*attach)(struct usb_serial *serial); int (*calc_num_ports) (struct usb_serial *serial); - void (*shutdown)(struct usb_serial *serial); + void (*disconnect)(struct usb_serial *serial); + void (*release)(struct usb_serial *serial); int (*port_probe)(struct usb_serial_port *port); int (*port_remove)(struct usb_serial_port *port); @@ -220,8 +241,7 @@ struct usb_serial_driver { /* Called by console with tty = NULL and by tty */ int (*open)(struct tty_struct *tty, struct usb_serial_port *port, struct file *filp); - void (*close)(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); + void (*close)(struct usb_serial_port *port); int (*write)(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); /* Called only by the tty layer */ @@ -237,6 +257,10 @@ struct usb_serial_driver { int (*tiocmget)(struct tty_struct *tty, struct file *file); int (*tiocmset)(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); + /* Called by the tty layer for port level work. There may or may not + be an attached tty at this point */ + void (*dtr_rts)(struct usb_serial_port *port, int on); + int (*carrier_raised)(struct usb_serial_port *port); /* USB events */ void (*read_int_callback)(struct urb *urb); void (*write_int_callback)(struct urb *urb); @@ -279,8 +303,7 @@ extern int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port, struct file *filp); extern int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); -extern void usb_serial_generic_close(struct tty_struct *tty, - struct usb_serial_port *port, struct file *filp); +extern void usb_serial_generic_close(struct usb_serial_port *port); extern int usb_serial_generic_resume(struct usb_serial *serial); extern int usb_serial_generic_write_room(struct tty_struct *tty); extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); @@ -288,9 +311,17 @@ extern void usb_serial_generic_read_bulk_callback(struct urb *urb); extern void usb_serial_generic_write_bulk_callback(struct urb *urb); extern void usb_serial_generic_throttle(struct tty_struct *tty); extern void usb_serial_generic_unthrottle(struct tty_struct *tty); -extern void usb_serial_generic_shutdown(struct usb_serial *serial); +extern void usb_serial_generic_disconnect(struct usb_serial *serial); +extern void usb_serial_generic_release(struct usb_serial *serial); extern int usb_serial_generic_register(int debug); extern void usb_serial_generic_deregister(void); +extern void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port, + gfp_t mem_flags); +extern int usb_serial_handle_sysrq_char(struct tty_struct *tty, + struct usb_serial_port *port, + unsigned int ch); +extern int usb_serial_handle_break(struct usb_serial_port *port); + extern int usb_serial_bus_register(struct usb_serial_driver *device); extern void usb_serial_bus_deregister(struct usb_serial_driver *device); diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index ba09fe88add..f8147305205 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -42,7 +42,6 @@ struct usbnet { /* protocol/interface state */ struct net_device *net; - struct net_device_stats stats; int msg_enable; unsigned long data [5]; u32 xid; @@ -54,6 +53,7 @@ struct usbnet { struct sk_buff_head rxq; struct sk_buff_head txq; struct sk_buff_head done; + struct sk_buff_head rxq_pause; struct urb *interrupt; struct tasklet_struct bh; @@ -64,6 +64,7 @@ struct usbnet { # define EVENT_RX_MEMORY 2 # define EVENT_STS_SPLIT 3 # define EVENT_LINK_RESET 4 +# define EVENT_RX_PAUSED 5 }; static inline struct usb_driver *driver_of(struct usb_interface *intf) @@ -87,6 +88,8 @@ struct driver_info { #define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */ #define FLAG_WLAN 0x0080 /* use "wlan%d" names */ +#define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */ +#define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */ /* init device ... can sleep, or cause probe() failure */ @@ -98,6 +101,9 @@ struct driver_info { /* reset device ... can sleep */ int (*reset)(struct usbnet *); + /* stop device ... can sleep */ + int (*stop)(struct usbnet *); + /* see if peer is connected ... can sleep */ int (*check_connect)(struct usbnet *); @@ -119,9 +125,8 @@ struct driver_info { * right after minidriver have initialized hardware. */ int (*early_init)(struct usbnet *dev); - /* called by minidriver when link state changes, state: 0=disconnect, - * 1=connect */ - void (*link_change)(struct usbnet *dev, int state); + /* called by minidriver when receiving indication */ + void (*indication)(struct usbnet *dev, void *ind, int indlen); /* for new devices, use the descriptor-reading code instead */ int in; /* rx endpoint */ @@ -176,12 +181,23 @@ struct skb_data { /* skb->cb is one of these */ size_t length; }; +extern int usbnet_open (struct net_device *net); +extern int usbnet_stop (struct net_device *net); +extern netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, + struct net_device *net); +extern void usbnet_tx_timeout (struct net_device *net); +extern int usbnet_change_mtu (struct net_device *net, int new_mtu); extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *); +extern int usbnet_get_ethernet_addr(struct usbnet *, int); extern void usbnet_defer_kevent (struct usbnet *, int); extern void usbnet_skb_return (struct usbnet *, struct sk_buff *); extern void usbnet_unlink_rx_urbs(struct usbnet *); +extern void usbnet_pause_rx(struct usbnet *); +extern void usbnet_resume_rx(struct usbnet *); +extern void usbnet_purge_paused_rxq(struct usbnet *); + extern int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd); extern int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd); extern u32 usbnet_get_link (struct net_device *net); @@ -197,7 +213,9 @@ extern int usbnet_nway_reset(struct net_device *net); #define devdbg(usbnet, fmt, arg...) \ printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net->name , ## arg) #else -#define devdbg(usbnet, fmt, arg...) do {} while(0) +#define devdbg(usbnet, fmt, arg...) \ + ({ if (0) printk(KERN_DEBUG "%s: " fmt "\n" , (usbnet)->net->name , \ + ## arg); 0; }) #endif #define deverr(usbnet, fmt, arg...) \ diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h index 5f401b644ed..429c631d2aa 100644 --- a/include/linux/usb/wusb.h +++ b/include/linux/usb/wusb.h @@ -80,8 +80,7 @@ struct wusb_ckhdid { u8 data[16]; } __attribute__((packed)); -const static -struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; +static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; #define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index 1eea1ab68dc..3d15fb9bc11 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h @@ -96,39 +96,26 @@ enum { US_DO_ALL_FLAGS }; #define US_PR_CBI 0x00 /* Control/Bulk/Interrupt */ #define US_PR_CB 0x01 /* Control/Bulk w/o interrupt */ #define US_PR_BULK 0x50 /* bulk only */ -#ifdef CONFIG_USB_STORAGE_USBAT + #define US_PR_USBAT 0x80 /* SCM-ATAPI bridge */ -#endif -#ifdef CONFIG_USB_STORAGE_SDDR09 #define US_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */ -#endif -#ifdef CONFIG_USB_STORAGE_SDDR55 #define US_PR_SDDR55 0x82 /* SDDR-55 (made up) */ -#endif #define US_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */ -#ifdef CONFIG_USB_STORAGE_FREECOM #define US_PR_FREECOM 0xf1 /* Freecom */ -#endif -#ifdef CONFIG_USB_STORAGE_DATAFAB #define US_PR_DATAFAB 0xf2 /* Datafab chipsets */ -#endif -#ifdef CONFIG_USB_STORAGE_JUMPSHOT #define US_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */ -#endif -#ifdef CONFIG_USB_STORAGE_ALAUDA #define US_PR_ALAUDA 0xf4 /* Alauda chipsets */ -#endif -#ifdef CONFIG_USB_STORAGE_KARMA #define US_PR_KARMA 0xf5 /* Rio Karma */ -#endif #define US_PR_DEVICE 0xff /* Use device's value */ /* */ +extern int usb_usual_ignore_device(struct usb_interface *intf); +extern struct usb_device_id usb_storage_usb_ids[]; + #ifdef CONFIG_USB_LIBUSUAL -extern struct usb_device_id storage_usb_ids[]; extern void usb_usual_set_present(int type); extern void usb_usual_clear_present(int type); extern int usb_usual_check_type(const struct usb_device_id *, int type); diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 315bcd37522..cc4f45361db 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -13,6 +13,7 @@ struct user_namespace { struct kref kref; struct hlist_head uidhash_table[UIDHASH_SZ]; struct user_struct *creator; + struct work_struct destroyer; }; extern struct user_namespace init_user_ns; diff --git a/include/linux/utime.h b/include/linux/utime.h index 640be6a1959..5cdf673afbd 100644 --- a/include/linux/utime.h +++ b/include/linux/utime.h @@ -4,8 +4,8 @@ #include <linux/types.h> struct utimbuf { - time_t actime; - time_t modtime; + __kernel_time_t actime; + __kernel_time_t modtime; }; #endif diff --git a/include/linux/utsname.h b/include/linux/utsname.h index 11232676bff..3656b300de3 100644 --- a/include/linux/utsname.h +++ b/include/linux/utsname.h @@ -22,12 +22,12 @@ struct old_utsname { }; struct new_utsname { - char sysname[65]; - char nodename[65]; - char release[65]; - char version[65]; - char machine[65]; - char domainname[65]; + char sysname[__NEW_UTS_LEN + 1]; + char nodename[__NEW_UTS_LEN + 1]; + char release[__NEW_UTS_LEN + 1]; + char version[__NEW_UTS_LEN + 1]; + char machine[__NEW_UTS_LEN + 1]; + char domainname[__NEW_UTS_LEN + 1]; }; #ifdef __KERNEL__ diff --git a/include/linux/video_decoder.h b/include/linux/video_decoder.h deleted file mode 100644 index 121e26da2c1..00000000000 --- a/include/linux/video_decoder.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef _LINUX_VIDEO_DECODER_H -#define _LINUX_VIDEO_DECODER_H - -#define HAVE_VIDEO_DECODER 1 - -struct video_decoder_capability { /* this name is too long */ - __u32 flags; -#define VIDEO_DECODER_PAL 1 /* can decode PAL signal */ -#define VIDEO_DECODER_NTSC 2 /* can decode NTSC */ -#define VIDEO_DECODER_SECAM 4 /* can decode SECAM */ -#define VIDEO_DECODER_AUTO 8 /* can autosense norm */ -#define VIDEO_DECODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */ - int inputs; /* number of inputs */ - int outputs; /* number of outputs */ -}; - -/* -DECODER_GET_STATUS returns the following flags. The only one you need is -DECODER_STATUS_GOOD, the others are just nice things to know. -*/ -#define DECODER_STATUS_GOOD 1 /* receiving acceptable input */ -#define DECODER_STATUS_COLOR 2 /* receiving color information */ -#define DECODER_STATUS_PAL 4 /* auto detected */ -#define DECODER_STATUS_NTSC 8 /* auto detected */ -#define DECODER_STATUS_SECAM 16 /* auto detected */ - -struct video_decoder_init { - unsigned char len; - const unsigned char *data; -}; - -#define DECODER_GET_CAPABILITIES _IOR('d', 1, struct video_decoder_capability) -#define DECODER_GET_STATUS _IOR('d', 2, int) -#define DECODER_SET_NORM _IOW('d', 3, int) -#define DECODER_SET_INPUT _IOW('d', 4, int) /* 0 <= input < #inputs */ -#define DECODER_SET_OUTPUT _IOW('d', 5, int) /* 0 <= output < #outputs */ -#define DECODER_ENABLE_OUTPUT _IOW('d', 6, int) /* boolean output enable control */ -#define DECODER_SET_PICTURE _IOW('d', 7, struct video_picture) -#define DECODER_SET_GPIO _IOW('d', 8, int) /* switch general purpose pin */ -#define DECODER_INIT _IOW('d', 9, struct video_decoder_init) /* init internal registers at once */ -#define DECODER_SET_VBI_BYPASS _IOW('d', 10, int) /* switch vbi bypass */ - -#define DECODER_DUMP _IO('d', 192) /* debug hook */ - - -#endif diff --git a/include/linux/video_encoder.h b/include/linux/video_encoder.h deleted file mode 100644 index 4b0e6907a7b..00000000000 --- a/include/linux/video_encoder.h +++ /dev/null @@ -1,21 +0,0 @@ -#ifndef _LINUX_VIDEO_ENCODER_H -#define _LINUX_VIDEO_ENCODER_H - -struct video_encoder_capability { /* this name is too long */ - __u32 flags; -#define VIDEO_ENCODER_PAL 1 /* can encode PAL signal */ -#define VIDEO_ENCODER_NTSC 2 /* can encode NTSC */ -#define VIDEO_ENCODER_SECAM 4 /* can encode SECAM */ -#define VIDEO_ENCODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */ - int inputs; /* number of inputs */ - int outputs; /* number of outputs */ -}; - -#define ENCODER_GET_CAPABILITIES _IOR('e', 1, struct video_encoder_capability) -#define ENCODER_SET_NORM _IOW('e', 2, int) -#define ENCODER_SET_INPUT _IOW('e', 3, int) /* 0 <= input < #inputs */ -#define ENCODER_SET_OUTPUT _IOW('e', 4, int) /* 0 <= output < #outputs */ -#define ENCODER_ENABLE_OUTPUT _IOW('e', 5, int) /* boolean output enable control */ - - -#endif diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 15a653d4113..b19eab14097 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h @@ -12,9 +12,27 @@ #ifndef __LINUX_VIDEODEV_H #define __LINUX_VIDEODEV_H +#include <linux/types.h> #include <linux/ioctl.h> #include <linux/videodev2.h> +#if defined(__MIN_V4L1) && defined (__KERNEL__) + +/* + * Used by those V4L2 core functions that need a minimum V4L1 support, + * in order to allow V4L1 Compatibilty code compilation. + */ + +struct video_mbuf +{ + int size; /* Total memory to map */ + int frames; /* Frames */ + int offsets[VIDEO_MAX_FRAME]; +}; + +#define VIDIOCGMBUF _IOR('v',20, struct video_mbuf) /* Memory map buffer info */ + +#else #if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) #define VID_TYPE_CAPTURE 1 /* Can capture */ @@ -311,6 +329,7 @@ struct video_code #define VID_PLAY_END_MARK 14 #endif /* CONFIG_VIDEO_V4L1_COMPAT */ +#endif /* __MIN_V4L1 */ #endif /* __LINUX_VIDEODEV_H */ diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 5571dbe1c0a..74f16876f38 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -318,6 +318,8 @@ struct v4l2_pix_format { /* see http://www.siliconimaging.com/RGB%20Bayer.htm */ #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ #define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ +#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ + /* * 10bit raw bayer, expanded to 16 bits * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb... @@ -336,6 +338,7 @@ struct v4l2_pix_format { /* Vendor-specific formats */ #define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */ #define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */ +#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */ #define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */ #define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */ #define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */ @@ -344,8 +347,12 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ #define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ #define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */ +#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */ #define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ -#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ +#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ +#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */ +#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */ /* * F O R M A T E N U M E R A T I O N @@ -735,6 +742,11 @@ struct v4l2_input { #define V4L2_IN_ST_NO_SIGNAL 0x00000002 #define V4L2_IN_ST_NO_COLOR 0x00000004 +/* field 'status' - sensor orientation */ +/* If sensor is mounted upside down set both bits */ +#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */ +#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */ + /* field 'status' - analog */ #define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ #define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ @@ -829,6 +841,7 @@ struct v4l2_querymenu { #define V4L2_CTRL_FLAG_UPDATE 0x0008 #define V4L2_CTRL_FLAG_INACTIVE 0x0010 #define V4L2_CTRL_FLAG_SLIDER 0x0020 +#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040 /* Query flag, to be ORed with the control ID */ #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000 @@ -879,8 +892,16 @@ enum v4l2_power_line_frequency { #define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) #define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) #define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) +#define V4L2_CID_COLORFX (V4L2_CID_BASE+31) +enum v4l2_colorfx { + V4L2_COLORFX_NONE = 0, + V4L2_COLORFX_BW = 1, + V4L2_COLORFX_SEPIA = 2, +}; +#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32) + /* last CID + 1 */ -#define V4L2_CID_LASTP1 (V4L2_CID_BASE+31) +#define V4L2_CID_LASTP1 (V4L2_CID_BASE+33) /* MPEG-class control IDs defined by V4L2 */ #define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) @@ -1339,6 +1360,53 @@ struct v4l2_sliced_vbi_data { }; /* + * Sliced VBI data inserted into MPEG Streams + */ + +/* + * V4L2_MPEG_STREAM_VBI_FMT_IVTV: + * + * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an + * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI + * data + * + * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header + * definitions are not included here. See the MPEG-2 specifications for details + * on these headers. + */ + +/* Line type IDs */ +#define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1) +#define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4) +#define V4L2_MPEG_VBI_IVTV_WSS_625 (5) +#define V4L2_MPEG_VBI_IVTV_VPS (7) + +struct v4l2_mpeg_vbi_itv0_line { + __u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */ + __u8 data[42]; /* Sliced VBI data for the line */ +} __attribute__ ((packed)); + +struct v4l2_mpeg_vbi_itv0 { + __le32 linemask[2]; /* Bitmasks of VBI service lines present */ + struct v4l2_mpeg_vbi_itv0_line line[35]; +} __attribute__ ((packed)); + +struct v4l2_mpeg_vbi_ITV0 { + struct v4l2_mpeg_vbi_itv0_line line[36]; +} __attribute__ ((packed)); + +#define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0" +#define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0" + +struct v4l2_mpeg_vbi_fmt_ivtv { + __u8 magic[4]; + union { + struct v4l2_mpeg_vbi_itv0 itv0; + struct v4l2_mpeg_vbi_ITV0 ITV0; + }; +} __attribute__ ((packed)); + +/* * A G G R E G A T E S T R U C T U R E S */ @@ -1403,14 +1471,6 @@ struct v4l2_dbg_chip_ident { __u32 revision; /* chip revision, chip specific */ } __attribute__ ((packed)); -/* VIDIOC_G_CHIP_IDENT_OLD: Deprecated, do not use */ -struct v4l2_chip_ident_old { - __u32 match_type; /* Match type */ - __u32 match_chip; /* Match this chip, meaning determined by match_type */ - __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */ - __u32 revision; /* chip revision, chip specific */ -}; - /* * I O C T L C O D E S F O R V I D E O D E V I C E S * @@ -1488,8 +1548,6 @@ struct v4l2_chip_ident_old { /* Experimental, meant for debugging, testing and internal use. Never use this ioctl in applications! */ #define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident) -/* This is deprecated and will go away in 2.6.30 */ -#define VIDIOC_G_CHIP_IDENT_OLD _IOWR('V', 81, struct v4l2_chip_ident_old) #endif #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 06005fa9e98..4fca4f5440b 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -10,14 +10,17 @@ /** * virtqueue - a queue to register buffers for sending or receiving. + * @list: the chain of virtqueues for this device * @callback: the function to call when buffers are consumed (can be NULL). + * @name: the name of this virtqueue (mainly for debugging) * @vdev: the virtio device this queue was created for. * @vq_ops: the operations for this virtqueue (see below). * @priv: a pointer for the virtqueue implementation to use. */ -struct virtqueue -{ +struct virtqueue { + struct list_head list; void (*callback)(struct virtqueue *vq); + const char *name; struct virtio_device *vdev; struct virtqueue_ops *vq_ops; void *priv; @@ -76,15 +79,16 @@ struct virtqueue_ops { * @dev: underlying device. * @id: the device type identification (used to match it with a driver). * @config: the configuration ops for this device. + * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. */ -struct virtio_device -{ +struct virtio_device { int index; struct device dev; struct virtio_device_id id; struct virtio_config_ops *config; + struct list_head vqs; /* Note that this is a Linux set_bit-style bitmap. */ unsigned long features[1]; void *priv; @@ -99,8 +103,7 @@ void unregister_virtio_device(struct virtio_device *dev); * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this device. * @feature_table_size: number of entries in the feature table array. - * @probe: the function to call when a device is found. Returns a token for - * remove, or PTR_ERR(). + * @probe: the function to call when a device is found. Returns 0 or -errno. * @remove: the function when a device is removed. * @config_changed: optional function to call when the device configuration * changes; may be called in interrupt context. diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index c1aef85243b..8dab9f2b883 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h @@ -2,6 +2,7 @@ #define _LINUX_VIRTIO_BLK_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. */ +#include <linux/types.h> #include <linux/virtio_config.h> /* The ID for virtio_block */ @@ -14,9 +15,12 @@ #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ +#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ +#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ -struct virtio_blk_config -{ +#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ + +struct virtio_blk_config { /* The capacity (in 512-byte sectors). */ __u64 capacity; /* The maximum segment size (if VIRTIO_BLK_F_SIZE_MAX) */ @@ -31,6 +35,7 @@ struct virtio_blk_config } geometry; /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ __u32 blk_size; + __u8 identify[VIRTIO_BLK_ID_BYTES]; } __attribute__((packed)); /* These two define direction. */ @@ -44,8 +49,7 @@ struct virtio_blk_config #define VIRTIO_BLK_T_BARRIER 0x80000000 /* This is the first element of the read scatter-gather list. */ -struct virtio_blk_outhdr -{ +struct virtio_blk_outhdr { /* VIRTIO_BLK_T* */ __u32 type; /* io priority. */ @@ -54,6 +58,13 @@ struct virtio_blk_outhdr __u64 sector; }; +struct virtio_scsi_inhdr { + __u32 errors; + __u32 data_len; + __u32 sense_len; + __u32 residual; +}; + /* And this is the final byte of the write scatter-gather list. */ #define VIRTIO_BLK_S_OK 0 #define VIRTIO_BLK_S_IOERR 1 diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bf8ec283b23..e547e3c8ee9 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -29,6 +29,7 @@ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 #ifdef __KERNEL__ +#include <linux/err.h> #include <linux/virtio.h> /** @@ -49,15 +50,26 @@ * @set_status: write the status byte * vdev: the virtio_device * status: the new status byte + * @request_vqs: request the specified number of virtqueues + * vdev: the virtio_device + * max_vqs: the max number of virtqueues we want + * If supplied, must call before any virtqueues are instantiated. + * To modify the max number of virtqueues after request_vqs has been + * called, call free_vqs and then request_vqs with a new value. + * @free_vqs: cleanup resources allocated by request_vqs + * vdev: the virtio_device + * If supplied, must call after all virtqueues have been deleted. * @reset: reset the device * vdev: the virtio device * After this, status and feature negotiation must be done again - * @find_vq: find a virtqueue and instantiate it. + * @find_vqs: find virtqueues and instantiate them. * vdev: the virtio_device - * index: the 0-based virtqueue number in case there's more than one. - * callback: the virqtueue callback - * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT). - * @del_vq: free a virtqueue found by find_vq(). + * nvqs: the number of virtqueues to find + * vqs: on success, includes new virtqueues + * callbacks: array of callbacks, for each virtqueue + * names: array of virtqueue names (mainly for debugging) + * Returns 0 on success or error status + * @del_vqs: free virtqueues found by find_vqs(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 32 feature bits (all we currently need). @@ -66,8 +78,8 @@ * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. */ -struct virtio_config_ops -{ +typedef void vq_callback_t(struct virtqueue *); +struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, @@ -75,10 +87,11 @@ struct virtio_config_ops u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); - struct virtqueue *(*find_vq)(struct virtio_device *vdev, - unsigned index, - void (*callback)(struct virtqueue *)); - void (*del_vq)(struct virtqueue *vq); + int (*find_vqs)(struct virtio_device *, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]); + void (*del_vqs)(struct virtio_device *); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); }; @@ -99,7 +112,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, if (__builtin_constant_p(fbit)) BUILD_BUG_ON(fbit >= 32); - virtio_check_driver_offered_feature(vdev, fbit); + if (fbit < VIRTIO_TRANSPORT_F_START) + virtio_check_driver_offered_feature(vdev, fbit); + return test_bit(fbit, vdev->features); } @@ -126,5 +141,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev, vdev->config->get(vdev, offset, buf, len); return 0; } + +static inline +struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, + vq_callback_t *c, const char *n) +{ + vq_callback_t *callbacks[] = { c }; + const char *names[] = { n }; + struct virtqueue *vq; + int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); + if (err < 0) + return ERR_PTR(err); + return vq; +} #endif /* __KERNEL__ */ #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h index 7615ffcdd55..dc161115ae3 100644 --- a/include/linux/virtio_console.h +++ b/include/linux/virtio_console.h @@ -1,5 +1,6 @@ #ifndef _LINUX_VIRTIO_CONSOLE_H #define _LINUX_VIRTIO_CONSOLE_H +#include <linux/types.h> #include <linux/virtio_config.h> /* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so * anyone can use the definitions to implement compatible drivers/servers. */ diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 5cdd0aa8bde..d8dd539c9f4 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -2,7 +2,9 @@ #define _LINUX_VIRTIO_NET_H /* This header is BSD licensed so anyone can use the definitions to implement * compatible drivers/servers. */ +#include <linux/types.h> #include <linux/virtio_config.h> +#include <linux/if_ether.h> /* The ID for virtio_net */ #define VIRTIO_ID_NET 1 @@ -21,17 +23,24 @@ #define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ #define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ #define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ +#define VIRTIO_NET_F_STATUS 16 /* virtio_net_config.status available */ +#define VIRTIO_NET_F_CTRL_VQ 17 /* Control channel available */ +#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ +#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ +#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ -struct virtio_net_config -{ +#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ + +struct virtio_net_config { /* The config defining mac address (if VIRTIO_NET_F_MAC) */ __u8 mac[6]; + /* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */ + __u16 status; } __attribute__((packed)); /* This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ -struct virtio_net_hdr -{ +struct virtio_net_hdr { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset __u8 flags; #define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame @@ -53,4 +62,72 @@ struct virtio_net_hdr_mrg_rxbuf { __u16 num_buffers; /* Number of merged rx buffers */ }; +/* + * Control virtqueue data structures + * + * The control virtqueue expects a header in the first sg entry + * and an ack/status response in the last entry. Data for the + * command goes in between. + */ +struct virtio_net_ctrl_hdr { + __u8 class; + __u8 cmd; +} __attribute__((packed)); + +typedef __u8 virtio_net_ctrl_ack; + +#define VIRTIO_NET_OK 0 +#define VIRTIO_NET_ERR 1 + +/* + * Control the RX mode, ie. promisucous, allmulti, etc... + * All commands require an "out" sg entry containing a 1 byte + * state value, zero = disable, non-zero = enable. Commands + * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. + * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. + */ +#define VIRTIO_NET_CTRL_RX 0 + #define VIRTIO_NET_CTRL_RX_PROMISC 0 + #define VIRTIO_NET_CTRL_RX_ALLMULTI 1 + #define VIRTIO_NET_CTRL_RX_ALLUNI 2 + #define VIRTIO_NET_CTRL_RX_NOMULTI 3 + #define VIRTIO_NET_CTRL_RX_NOUNI 4 + #define VIRTIO_NET_CTRL_RX_NOBCAST 5 + +/* + * Control the MAC filter table. + * + * The MAC filter table is managed by the hypervisor, the guest should + * assume the size is infinite. Filtering should be considered + * non-perfect, ie. based on hypervisor resources, the guest may + * received packets from sources not specified in the filter list. + * + * In addition to the class/cmd header, the TABLE_SET command requires + * two out scatterlists. Each contains a 4 byte count of entries followed + * by a concatenated byte stream of the ETH_ALEN MAC addresses. The + * first sg list contains unicast addresses, the second is for multicast. + * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature + * is available. + */ +struct virtio_net_ctrl_mac { + __u32 entries; + __u8 macs[][ETH_ALEN]; +} __attribute__((packed)); + +#define VIRTIO_NET_CTRL_MAC 1 + #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 + +/* + * Control VLAN filtering + * + * The VLAN filter table is controlled via a simple ADD/DEL interface. + * VLAN IDs not added may be filterd by the hypervisor. Del is the + * opposite of add. Both commands expect an out entry containing a 2 + * byte VLAN ID. VLAN filterting is available with the + * VIRTIO_NET_F_CTRL_VLAN feature bit. + */ +#define VIRTIO_NET_CTRL_VLAN 2 + #define VIRTIO_NET_CTRL_VLAN_ADD 0 + #define VIRTIO_NET_CTRL_VLAN_DEL 1 + #endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index cd0fd5d181a..9a3d7c48c62 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h @@ -47,9 +47,17 @@ /* The bit of the ISR which indicates a device configuration change. */ #define VIRTIO_PCI_ISR_CONFIG 0x2 +/* MSI-X registers: only enabled if MSI-X is enabled. */ +/* A 16-bit vector for configuration changes. */ +#define VIRTIO_MSI_CONFIG_VECTOR 20 +/* A 16-bit vector for selected queue notifications. */ +#define VIRTIO_MSI_QUEUE_VECTOR 22 +/* Vector value used to disable MSI for queue */ +#define VIRTIO_MSI_NO_VECTOR 0xffff + /* The remaining space is defined by each driver as the per-driver * configuration space */ -#define VIRTIO_PCI_CONFIG 20 +#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 71e03722fb5..e4d144b132b 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -14,6 +14,8 @@ #define VRING_DESC_F_NEXT 1 /* This marks a buffer as write-only (otherwise read-only). */ #define VRING_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4 /* The Host uses this in used->flags to advise the Guest: don't kick me when * you add a buffer. It's unreliable, so it's simply an optimization. Guest @@ -24,9 +26,11 @@ * optimization. */ #define VRING_AVAIL_F_NO_INTERRUPT 1 +/* We support indirect buffer descriptors */ +#define VIRTIO_RING_F_INDIRECT_DESC 28 + /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ -struct vring_desc -{ +struct vring_desc { /* Address (guest-physical). */ __u64 addr; /* Length. */ @@ -37,24 +41,21 @@ struct vring_desc __u16 next; }; -struct vring_avail -{ +struct vring_avail { __u16 flags; __u16 idx; __u16 ring[]; }; /* u32 is used here for ids for padding reasons. */ -struct vring_used_elem -{ +struct vring_used_elem { /* Index of start of used descriptor chain. */ __u32 id; /* Total length of the descriptor chain which was used (written to) */ __u32 len; }; -struct vring_used -{ +struct vring_used { __u16 flags; __u16 idx; struct vring_used_elem ring[]; @@ -119,7 +120,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *vq), - void (*callback)(struct virtqueue *vq)); + void (*callback)(struct virtqueue *vq), + const char *name); void vring_del_virtqueue(struct virtqueue *vq); /* Filter out transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev); diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h new file mode 100644 index 00000000000..8f6a95882b0 --- /dev/null +++ b/include/linux/vlynq.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef __VLYNQ_H__ +#define __VLYNQ_H__ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> + +#define VLYNQ_NUM_IRQS 32 + +struct vlynq_mapping { + u32 size; + u32 offset; +}; + +enum vlynq_divisor { + vlynq_div_auto = 0, + vlynq_ldiv1, + vlynq_ldiv2, + vlynq_ldiv3, + vlynq_ldiv4, + vlynq_ldiv5, + vlynq_ldiv6, + vlynq_ldiv7, + vlynq_ldiv8, + vlynq_rdiv1, + vlynq_rdiv2, + vlynq_rdiv3, + vlynq_rdiv4, + vlynq_rdiv5, + vlynq_rdiv6, + vlynq_rdiv7, + vlynq_rdiv8, + vlynq_div_external +}; + +struct vlynq_device_id { + u32 id; + enum vlynq_divisor divisor; + unsigned long driver_data; +}; + +struct vlynq_regs; +struct vlynq_device { + u32 id, dev_id; + int local_irq; + int remote_irq; + enum vlynq_divisor divisor; + u32 regs_start, regs_end; + u32 mem_start, mem_end; + u32 irq_start, irq_end; + int irq; + int enabled; + struct vlynq_regs *local; + struct vlynq_regs *remote; + struct device dev; +}; + +struct vlynq_driver { + char *name; + struct vlynq_device_id *id_table; + int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id); + void (*remove)(struct vlynq_device *dev); + struct device_driver driver; +}; + +struct plat_vlynq_ops { + int (*on)(struct vlynq_device *dev); + void (*off)(struct vlynq_device *dev); +}; + +static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv) +{ + return container_of(drv, struct vlynq_driver, driver); +} + +static inline struct vlynq_device *to_vlynq_device(struct device *device) +{ + return container_of(device, struct vlynq_device, dev); +} + +extern struct bus_type vlynq_bus_type; + +extern int __vlynq_register_driver(struct vlynq_driver *driver, + struct module *owner); + +static inline int vlynq_register_driver(struct vlynq_driver *driver) +{ + return __vlynq_register_driver(driver, THIS_MODULE); +} + +static inline void *vlynq_get_drvdata(struct vlynq_device *dev) +{ + return dev_get_drvdata(&dev->dev); +} + +static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data) +{ + dev_set_drvdata(&dev->dev, data); +} + +static inline u32 vlynq_mem_start(struct vlynq_device *dev) +{ + return dev->mem_start; +} + +static inline u32 vlynq_mem_end(struct vlynq_device *dev) +{ + return dev->mem_end; +} + +static inline u32 vlynq_mem_len(struct vlynq_device *dev) +{ + return dev->mem_end - dev->mem_start + 1; +} + +static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq) +{ + int irq = dev->irq_start + virq; + if ((irq < dev->irq_start) || (irq > dev->irq_end)) + return -EINVAL; + + return irq; +} + +static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq) +{ + if ((irq < dev->irq_start) || (irq > dev->irq_end)) + return -EINVAL; + + return irq - dev->irq_start; +} + +extern void vlynq_unregister_driver(struct vlynq_driver *driver); +extern int vlynq_enable_device(struct vlynq_device *dev); +extern void vlynq_disable_device(struct vlynq_device *dev); +extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, + struct vlynq_mapping *mapping); +extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, + struct vlynq_mapping *mapping); +extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq); +extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq); + +#endif /* __VLYNQ_H__ */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 506e7620a98..a43ebec3a7b 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -84,6 +84,10 @@ extern struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, void *caller); extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end); +extern struct vm_struct *__get_vm_area_caller(unsigned long size, + unsigned long flags, + unsigned long start, unsigned long end, + void *caller); extern struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node, gfp_t gfp_mask); @@ -91,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); extern int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages); +extern int map_kernel_range_noflush(unsigned long start, unsigned long size, + pgprot_t prot, struct page **pages); +extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); /* Allocate/destroy a 'vmalloc' VM area. */ @@ -106,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count); */ extern rwlock_t vmlist_lock; extern struct vm_struct *vmlist; +extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); #endif /* _LINUX_VMALLOC_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 524cd1b28ec..81a97cf8f0a 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -36,12 +36,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, FOR_ALL_ZONES(PGSTEAL), FOR_ALL_ZONES(PGSCAN_KSWAPD), FOR_ALL_ZONES(PGSCAN_DIRECT), +#ifdef CONFIG_NUMA + PGSCAN_ZONE_RECLAIM_FAILED, +#endif PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, PAGEOUTRUN, ALLOCSTALL, PGROTATED, #ifdef CONFIG_HUGETLB_PAGE HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, #endif -#ifdef CONFIG_UNEVICTABLE_LRU UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ @@ -50,7 +52,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ UNEVICTABLE_MLOCKFREED, -#endif NR_VM_EVENT_ITEMS }; diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h index 9797fec7748..3adeff82212 100644 --- a/include/linux/w1-gpio.h +++ b/include/linux/w1-gpio.h @@ -18,6 +18,7 @@ struct w1_gpio_platform_data { unsigned int pin; unsigned int is_open_drain:1; + void (*enable_external_pullup)(int enable); }; #endif /* _LINUX_W1_GPIO_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index ef609f842fa..cf3c2f5dba5 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -77,7 +77,14 @@ struct task_struct; #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ { .flags = word, .bit_nr = bit, } -extern void init_waitqueue_head(wait_queue_head_t *q); +extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *); + +#define init_waitqueue_head(q) \ + do { \ + static struct lock_class_key __key; \ + \ + __init_waitqueue_head((q), &__key); \ + } while (0) #ifdef CONFIG_LOCKDEP # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ @@ -133,8 +140,11 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, } void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); -extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); -extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, + void *key); +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_bit(wait_queue_head_t *, void *, int); int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); @@ -153,21 +163,17 @@ wait_queue_head_t *bit_waitqueue(void *, int); #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) -#ifdef CONFIG_DEBUG_LOCK_ALLOC /* - * macro to avoid include hell + * Wakeup macros to be used to report events to the targets. */ -#define wake_up_nested(x, s) \ -do { \ - unsigned long flags; \ - \ - spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \ - wake_up_locked(x); \ - spin_unlock_irqrestore(&(x)->lock, flags); \ -} while (0) -#else -#define wake_up_nested(x, s) wake_up(x) -#endif +#define wake_up_poll(x, m) \ + __wake_up(x, TASK_NORMAL, 1, (void *) (m)) +#define wake_up_locked_poll(x, m) \ + __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) +#define wake_up_interruptible_poll(x, m) \ + __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) +#define wake_up_interruptible_sync_poll(x, m) \ + __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) #define __wait_event(wq, condition) \ do { \ @@ -333,16 +339,19 @@ do { \ for (;;) { \ prepare_to_wait_exclusive(&wq, &__wait, \ TASK_INTERRUPTIBLE); \ - if (condition) \ + if (condition) { \ + finish_wait(&wq, &__wait); \ break; \ + } \ if (!signal_pending(current)) { \ schedule(); \ continue; \ } \ ret = -ERESTARTSYS; \ + abort_exclusive_wait(&wq, &__wait, \ + TASK_INTERRUPTIBLE, NULL); \ break; \ } \ - finish_wait(&wq, &__wait); \ } while (0) #define wait_event_interruptible_exclusive(wq, condition) \ @@ -431,16 +440,20 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); +void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, + unsigned int mode, void *key); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); -#define DEFINE_WAIT(name) \ +#define DEFINE_WAIT_FUNC(name, function) \ wait_queue_t name = { \ .private = current, \ - .func = autoremove_wake_function, \ + .func = function, \ .task_list = LIST_HEAD_INIT((name).task_list), \ } +#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) + #define DEFINE_WAIT_BIT(name, word, bit) \ struct wait_bit_queue name = { \ .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ diff --git a/include/linux/wimax.h b/include/linux/wimax.h index c89de7f4e5b..4fdcc563551 100644 --- a/include/linux/wimax.h +++ b/include/linux/wimax.h @@ -59,7 +59,7 @@ enum { * M - Major: change if removing or modifying an existing call. * m - minor: change when adding a new call */ - WIMAX_GNL_VERSION = 00, + WIMAX_GNL_VERSION = 01, /* Generic NetLink attributes */ WIMAX_GNL_ATTR_INVALID = 0x00, WIMAX_GNL_ATTR_MAX = 10, @@ -78,6 +78,7 @@ enum { WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */ WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */ WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */ + WIMAX_GNL_OP_STATE_GET, /* Request for current state */ }; @@ -113,6 +114,10 @@ enum { WIMAX_GNL_RESET_IFIDX = 1, }; +/* Atributes for wimax_state_get() */ +enum { + WIMAX_GNL_STGET_IFIDX = 1, +}; /* * Attributes for the Report State Change diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h index ba0c49399a8..c703e034042 100644 --- a/include/linux/wimax/debug.h +++ b/include/linux/wimax/debug.h @@ -178,7 +178,7 @@ void __d_head(char *head, size_t head_size, WARN_ON(1); } else snprintf(head, head_size, "%s %s: ", - dev_driver_string(dev), dev->bus_id); + dev_driver_string(dev), dev_name(dev)); } diff --git a/include/linux/wimax/i2400m.h b/include/linux/wimax/i2400m.h index 74198f5bb4d..433693ef2bb 100644 --- a/include/linux/wimax/i2400m.h +++ b/include/linux/wimax/i2400m.h @@ -207,6 +207,7 @@ enum i2400m_pt { I2400M_PT_TRACE, /* For device debug */ I2400M_PT_RESET_WARM, /* device reset */ I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */ + I2400M_PT_EDATA, /* Extended RX data */ I2400M_PT_ILLEGAL }; @@ -221,9 +222,51 @@ struct i2400m_pl_data_hdr { } __attribute__((packed)); +/* + * Payload for an extended data packet + * + * New in fw v1.4 + * + * @reorder: if this payload has to be reorder or not (and how) + * @cs: the type of data in the packet, as defined per (802.16e + * T11.13.19.1). Currently only 2 (IPv4 packet) supported. + * + * This is prefixed to each and every INCOMING DATA packet. + */ +struct i2400m_pl_edata_hdr { + __le32 reorder; /* bits defined in i2400m_ro */ + __u8 cs; + __u8 reserved[11]; +} __attribute__((packed)); + +enum i2400m_cs { + I2400M_CS_IPV4_0 = 0, + I2400M_CS_IPV4 = 2, +}; + +enum i2400m_ro { + I2400M_RO_NEEDED = 0x01, + I2400M_RO_TYPE = 0x03, + I2400M_RO_TYPE_SHIFT = 1, + I2400M_RO_CIN = 0x0f, + I2400M_RO_CIN_SHIFT = 4, + I2400M_RO_FBN = 0x07ff, + I2400M_RO_FBN_SHIFT = 8, + I2400M_RO_SN = 0x07ff, + I2400M_RO_SN_SHIFT = 21, +}; + +enum i2400m_ro_type { + I2400M_RO_TYPE_RESET = 0, + I2400M_RO_TYPE_PACKET, + I2400M_RO_TYPE_WS, + I2400M_RO_TYPE_PACKET_WS, +}; + + /* Misc constants */ enum { - I2400M_PL_PAD = 16, /* Payload data size alignment */ + I2400M_PL_ALIGN = 16, /* Payload data size alignment */ I2400M_PL_SIZE_MAX = 0x3EFF, I2400M_MAX_PLS_IN_MSG = 60, /* protocol barkers: sync sequences; for notifications they @@ -381,6 +424,9 @@ enum i2400m_tlv { I2400M_TLV_RF_STATUS = 163, I2400M_TLV_DEVICE_RESET_TYPE = 132, I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601, + I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611, + I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614, + I2400M_TLV_CONFIG_DL_HOST_REORDER = 615, }; @@ -509,4 +555,27 @@ struct i2400m_tlv_media_status { __le32 media_status; } __attribute__((packed)); + +/* New in v1.4 */ +struct i2400m_tlv_config_idle_timeout { + struct i2400m_tlv_hdr hdr; + __le32 timeout; /* 100 to 300000 ms [5min], 100 increments + * 0 disabled */ +} __attribute__((packed)); + +/* New in v1.4 -- for backward compat, will be removed */ +struct i2400m_tlv_config_d2h_data_format { + struct i2400m_tlv_hdr hdr; + __u8 format; /* 0 old format, 1 enhanced */ + __u8 reserved[3]; +} __attribute__((packed)); + +/* New in v1.4 */ +struct i2400m_tlv_config_dl_host_reorder { + struct i2400m_tlv_hdr hdr; + __u8 reorder; /* 0 disabled, 1 enabled */ + __u8 reserved[3]; +} __attribute__((packed)); + + #endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */ diff --git a/include/linux/wireless.h b/include/linux/wireless.h index d7958f9b52c..5b4c6c772a9 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h @@ -577,18 +577,22 @@ #define IW_AUTH_RX_UNENCRYPTED_EAPOL 8 #define IW_AUTH_ROAMING_CONTROL 9 #define IW_AUTH_PRIVACY_INVOKED 10 +#define IW_AUTH_CIPHER_GROUP_MGMT 11 +#define IW_AUTH_MFP 12 /* IW_AUTH_WPA_VERSION values (bit field) */ #define IW_AUTH_WPA_VERSION_DISABLED 0x00000001 #define IW_AUTH_WPA_VERSION_WPA 0x00000002 #define IW_AUTH_WPA_VERSION_WPA2 0x00000004 -/* IW_AUTH_PAIRWISE_CIPHER and IW_AUTH_GROUP_CIPHER values (bit field) */ +/* IW_AUTH_PAIRWISE_CIPHER, IW_AUTH_GROUP_CIPHER, and IW_AUTH_CIPHER_GROUP_MGMT + * values (bit field) */ #define IW_AUTH_CIPHER_NONE 0x00000001 #define IW_AUTH_CIPHER_WEP40 0x00000002 #define IW_AUTH_CIPHER_TKIP 0x00000004 #define IW_AUTH_CIPHER_CCMP 0x00000008 #define IW_AUTH_CIPHER_WEP104 0x00000010 +#define IW_AUTH_CIPHER_AES_CMAC 0x00000020 /* IW_AUTH_KEY_MGMT values (bit field) */ #define IW_AUTH_KEY_MGMT_802_1X 1 @@ -604,6 +608,11 @@ #define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming * control */ +/* IW_AUTH_MFP (management frame protection) values */ +#define IW_AUTH_MFP_DISABLED 0 /* MFP disabled */ +#define IW_AUTH_MFP_OPTIONAL 1 /* MFP optional */ +#define IW_AUTH_MFP_REQUIRED 2 /* MFP required */ + /* SIOCSIWENCODEEXT definitions */ #define IW_ENCODE_SEQ_MAX_SIZE 8 /* struct iw_encode_ext ->alg */ @@ -612,6 +621,7 @@ #define IW_ENCODE_ALG_TKIP 2 #define IW_ENCODE_ALG_CCMP 3 #define IW_ENCODE_ALG_PMK 4 +#define IW_ENCODE_ALG_AES_CMAC 5 /* struct iw_encode_ext ->ext_flags */ #define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001 #define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002 @@ -1122,6 +1132,14 @@ struct __compat_iw_event { }; #define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer) #define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length) + +/* Size of the various events for compat */ +#define IW_EV_COMPAT_CHAR_LEN (IW_EV_COMPAT_LCP_LEN + IFNAMSIZ) +#define IW_EV_COMPAT_UINT_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(__u32)) +#define IW_EV_COMPAT_FREQ_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_freq)) +#define IW_EV_COMPAT_PARAM_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_param)) +#define IW_EV_COMPAT_ADDR_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct sockaddr)) +#define IW_EV_COMPAT_QUAL_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_quality)) #define IW_EV_COMPAT_POINT_LEN \ (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \ IW_EV_COMPAT_POINT_OFF) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b36291130f2..6273fa97b52 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -41,6 +41,11 @@ struct delayed_work { struct timer_list timer; }; +static inline struct delayed_work *to_delayed_work(struct work_struct *work) +{ + return container_of(work, struct delayed_work, work); +} + struct execute_work { struct work_struct work; }; @@ -118,12 +123,24 @@ struct execute_work { init_timer(&(_work)->timer); \ } while (0) +#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer_on_stack(&(_work)->timer); \ + } while (0) + #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ do { \ INIT_WORK(&(_work)->work, (_func)); \ init_timer_deferrable(&(_work)->timer); \ } while (0) +#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ + do { \ + INIT_WORK(&(_work)->work, (_func)); \ + init_timer_on_stack(&(_work)->timer); \ + } while (0) + /** * work_pending - Find out whether a work item is currently pending * @work: The work item in question @@ -223,6 +240,21 @@ static inline int cancel_delayed_work(struct delayed_work *work) return ret; } +/* + * Like above, but uses del_timer() instead of del_timer_sync(). This means, + * if it returns 0 the timer function may be running and the queueing is in + * progress. + */ +static inline int __cancel_delayed_work(struct delayed_work *work) +{ + int ret; + + ret = del_timer(&work->timer); + if (ret) + work_clear_pending(&work->work); + return ret; +} + extern int cancel_delayed_work_sync(struct delayed_work *work); /* Obsolete. use cancel_delayed_work_sync() */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 7300ecdc480..d347632f186 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -14,17 +14,6 @@ extern struct list_head inode_in_use; extern struct list_head inode_unused; /* - * Yes, writeback.h requires sched.h - * No, sched.h is not included from here. - */ -static inline int task_is_pdflush(struct task_struct *task) -{ - return task->flags & PF_FLUSHER; -} - -#define current_is_pdflush() task_is_pdflush(current) - -/* * fs/fs-writeback.c */ enum writeback_sync_modes { @@ -40,6 +29,8 @@ enum writeback_sync_modes { struct writeback_control { struct backing_dev_info *bdi; /* If !NULL, only write back this queue */ + struct super_block *sb; /* if !NULL, only write inodes from + this super_block */ enum writeback_sync_modes sync_mode; unsigned long *older_than_this; /* If !NULL, only write back inodes older than this */ @@ -76,10 +67,13 @@ struct writeback_control { /* * fs/fs-writeback.c */ -void writeback_inodes(struct writeback_control *wbc); +struct bdi_writeback; int inode_wait(void *); -void sync_inodes_sb(struct super_block *, int wait); -void sync_inodes(int wait); +long writeback_inodes_sb(struct super_block *); +long sync_inodes_sb(struct super_block *); +void writeback_inodes_wbc(struct writeback_control *wbc); +long wb_do_writeback(struct bdi_writeback *wb, int force_wait); +void wakeup_flusher_threads(long nr_pages); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) @@ -99,7 +93,6 @@ static inline void inode_sync_wait(struct inode *inode) /* * mm/page-writeback.c */ -int wakeup_pdflush(long nr_pages); void laptop_io_completion(void); void laptop_sync_completion(void); void throttle_vm_writeout(gfp_t gfp_mask); @@ -109,8 +102,8 @@ extern int dirty_background_ratio; extern unsigned long dirty_background_bytes; extern int vm_dirty_ratio; extern unsigned long vm_dirty_bytes; -extern int dirty_writeback_interval; -extern int dirty_expire_interval; +extern unsigned int dirty_writeback_interval; +extern unsigned int dirty_expire_interval; extern int vm_highmem_is_dirtyable; extern int block_dump; extern int laptop_mode; @@ -151,17 +144,12 @@ balance_dirty_pages_ratelimited(struct address_space *mapping) typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, void *data); -int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); int generic_writepages(struct address_space *mapping, struct writeback_control *wbc); int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); -int sync_page_range(struct inode *inode, struct address_space *mapping, - loff_t pos, loff_t count); -int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, - loff_t pos, loff_t count); void set_page_dirty_balance(struct page *page, int page_mkwrite); void writeback_set_ratelimit(void); diff --git a/include/linux/xattr.h b/include/linux/xattr.h index d131e352cfe..5c84af8c5f6 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -49,6 +49,7 @@ struct xattr_handler { ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); +int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int vfs_removexattr(struct dentry *, const char *); diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h index 52f3abd453a..2d4ec15abac 100644 --- a/include/linux/xfrm.h +++ b/include/linux/xfrm.h @@ -58,7 +58,7 @@ struct xfrm_selector __u8 prefixlen_s; __u8 proto; int ifindex; - uid_t user; + __kernel_uid32_t user; }; #define XFRM_INF (~(__u64)0) |